aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig37
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/asus_acpi.c9
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/bay.c490
-rw-r--r--drivers/acpi/blacklist.c29
-rw-r--r--drivers/acpi/bus.c44
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/container.c6
-rw-r--r--drivers/acpi/debug.c62
-rw-r--r--drivers/acpi/dispatcher/dsfield.c32
-rw-r--r--drivers/acpi/dispatcher/dsinit.c25
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c55
-rw-r--r--drivers/acpi/dispatcher/dsmthdat.c2
-rw-r--r--drivers/acpi/dispatcher/dsobject.c78
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c6
-rw-r--r--drivers/acpi/dispatcher/dsutils.c2
-rw-r--r--drivers/acpi/dispatcher/dswexec.c12
-rw-r--r--drivers/acpi/dispatcher/dswload.c19
-rw-r--r--drivers/acpi/dispatcher/dswscope.c2
-rw-r--r--drivers/acpi/dispatcher/dswstate.c2
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c13
-rw-r--r--drivers/acpi/events/evevent.c17
-rw-r--r--drivers/acpi/events/evgpe.c91
-rw-r--r--drivers/acpi/events/evgpeblk.c64
-rw-r--r--drivers/acpi/events/evmisc.c201
-rw-r--r--drivers/acpi/events/evregion.c17
-rw-r--r--drivers/acpi/events/evrgnini.c168
-rw-r--r--drivers/acpi/events/evsci.c14
-rw-r--r--drivers/acpi/events/evxface.c8
-rw-r--r--drivers/acpi/events/evxfevnt.c27
-rw-r--r--drivers/acpi/events/evxfregn.c2
-rw-r--r--drivers/acpi/executer/exconfig.c235
-rw-r--r--drivers/acpi/executer/exconvrt.c2
-rw-r--r--drivers/acpi/executer/excreate.c21
-rw-r--r--drivers/acpi/executer/exdump.c29
-rw-r--r--drivers/acpi/executer/exfield.c2
-rw-r--r--drivers/acpi/executer/exfldio.c7
-rw-r--r--drivers/acpi/executer/exmisc.c2
-rw-r--r--drivers/acpi/executer/exmutex.c86
-rw-r--r--drivers/acpi/executer/exnames.c2
-rw-r--r--drivers/acpi/executer/exoparg1.c4
-rw-r--r--drivers/acpi/executer/exoparg2.c2
-rw-r--r--drivers/acpi/executer/exoparg3.c2
-rw-r--r--drivers/acpi/executer/exoparg6.c2
-rw-r--r--drivers/acpi/executer/exprep.c2
-rw-r--r--drivers/acpi/executer/exregion.c16
-rw-r--r--drivers/acpi/executer/exresnte.c2
-rw-r--r--drivers/acpi/executer/exresolv.c10
-rw-r--r--drivers/acpi/executer/exresop.c12
-rw-r--r--drivers/acpi/executer/exstore.c2
-rw-r--r--drivers/acpi/executer/exstoren.c2
-rw-r--r--drivers/acpi/executer/exstorob.c2
-rw-r--r--drivers/acpi/executer/exsystem.c110
-rw-r--r--drivers/acpi/executer/exutils.c106
-rw-r--r--drivers/acpi/fan.c8
-rw-r--r--drivers/acpi/glue.c123
-rw-r--r--drivers/acpi/hardware/hwacpi.c56
-rw-r--r--drivers/acpi/hardware/hwgpe.c15
-rw-r--r--drivers/acpi/hardware/hwregs.c98
-rw-r--r--drivers/acpi/hardware/hwsleep.c81
-rw-r--r--drivers/acpi/hardware/hwtimer.c9
-rw-r--r--drivers/acpi/motherboard.c191
-rw-r--r--drivers/acpi/namespace/nsaccess.c36
-rw-r--r--drivers/acpi/namespace/nsalloc.c14
-rw-r--r--drivers/acpi/namespace/nsdump.c13
-rw-r--r--drivers/acpi/namespace/nsdumpdv.c2
-rw-r--r--drivers/acpi/namespace/nseval.c13
-rw-r--r--drivers/acpi/namespace/nsinit.c9
-rw-r--r--drivers/acpi/namespace/nsload.c160
-rw-r--r--drivers/acpi/namespace/nsnames.c2
-rw-r--r--drivers/acpi/namespace/nsobject.c2
-rw-r--r--drivers/acpi/namespace/nsparse.c52
-rw-r--r--drivers/acpi/namespace/nssearch.c9
-rw-r--r--drivers/acpi/namespace/nsutils.c9
-rw-r--r--drivers/acpi/namespace/nswalk.c65
-rw-r--r--drivers/acpi/namespace/nsxfeval.c13
-rw-r--r--drivers/acpi/namespace/nsxfname.c47
-rw-r--r--drivers/acpi/namespace/nsxfobj.c2
-rw-r--r--drivers/acpi/numa.c77
-rw-r--r--drivers/acpi/osl.c97
-rw-r--r--drivers/acpi/parser/psargs.c2
-rw-r--r--drivers/acpi/parser/psloop.c1408
-rw-r--r--drivers/acpi/parser/psopcode.c2
-rw-r--r--drivers/acpi/parser/psparse.c7
-rw-r--r--drivers/acpi/parser/psscope.c2
-rw-r--r--drivers/acpi/parser/pstree.c2
-rw-r--r--drivers/acpi/parser/psutils.c2
-rw-r--r--drivers/acpi/parser/pswalk.c2
-rw-r--r--drivers/acpi/parser/psxface.c116
-rw-r--r--drivers/acpi/pci_link.c4
-rw-r--r--drivers/acpi/pci_root.c38
-rw-r--r--drivers/acpi/processor_core.c189
-rw-r--r--drivers/acpi/processor_idle.c52
-rw-r--r--drivers/acpi/processor_perflib.c27
-rw-r--r--drivers/acpi/processor_throttling.c4
-rw-r--r--drivers/acpi/resources/rsaddr.c2
-rw-r--r--drivers/acpi/resources/rscalc.c2
-rw-r--r--drivers/acpi/resources/rscreate.c2
-rw-r--r--drivers/acpi/resources/rsdump.c2
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rsio.c2
-rw-r--r--drivers/acpi/resources/rsirq.c2
-rw-r--r--drivers/acpi/resources/rslist.c2
-rw-r--r--drivers/acpi/resources/rsmemory.c2
-rw-r--r--drivers/acpi/resources/rsmisc.c2
-rw-r--r--drivers/acpi/resources/rsutils.c2
-rw-r--r--drivers/acpi/resources/rsxface.c2
-rw-r--r--drivers/acpi/scan.c1265
-rw-r--r--drivers/acpi/sleep/proc.c36
-rw-r--r--drivers/acpi/system.c39
-rw-r--r--drivers/acpi/tables.c508
-rw-r--r--drivers/acpi/tables/Makefile3
-rw-r--r--drivers/acpi/tables/tbconvrt.c622
-rw-r--r--drivers/acpi/tables/tbfadt.c434
-rw-r--r--drivers/acpi/tables/tbfind.c126
-rw-r--r--drivers/acpi/tables/tbget.c471
-rw-r--r--drivers/acpi/tables/tbgetall.c311
-rw-r--r--drivers/acpi/tables/tbinstal.c664
-rw-r--r--drivers/acpi/tables/tbrsdt.c307
-rw-r--r--drivers/acpi/tables/tbutils.c513
-rw-r--r--drivers/acpi/tables/tbxface.c671
-rw-r--r--drivers/acpi/tables/tbxfroot.c552
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/acpi/utilities/utalloc.c11
-rw-r--r--drivers/acpi/utilities/utcache.c10
-rw-r--r--drivers/acpi/utilities/utcopy.c11
-rw-r--r--drivers/acpi/utilities/utdebug.c7
-rw-r--r--drivers/acpi/utilities/utdelete.c16
-rw-r--r--drivers/acpi/utilities/uteval.c2
-rw-r--r--drivers/acpi/utilities/utglobal.c199
-rw-r--r--drivers/acpi/utilities/utinit.c114
-rw-r--r--drivers/acpi/utilities/utmath.c2
-rw-r--r--drivers/acpi/utilities/utmisc.c102
-rw-r--r--drivers/acpi/utilities/utmutex.c2
-rw-r--r--drivers/acpi/utilities/utobject.c2
-rw-r--r--drivers/acpi/utilities/utresrc.c2
-rw-r--r--drivers/acpi/utilities/utstate.c2
-rw-r--r--drivers/acpi/utilities/utxface.c29
-rw-r--r--drivers/acpi/video.c166
-rw-r--r--drivers/ata/Kconfig41
-rw-r--r--drivers/ata/Makefile3
-rw-r--r--drivers/ata/ahci.c238
-rw-r--r--drivers/ata/ata_generic.c8
-rw-r--r--drivers/ata/ata_piix.c56
-rw-r--r--drivers/ata/libata-core.c592
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/libata-scsi.c98
-rw-r--r--drivers/ata/libata-sff.c641
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_ali.c32
-rw-r--r--drivers/ata/pata_amd.c36
-rw-r--r--drivers/ata/pata_artop.c12
-rw-r--r--drivers/ata/pata_atiixp.c17
-rw-r--r--drivers/ata/pata_cmd64x.c18
-rw-r--r--drivers/ata/pata_cs5520.c41
-rw-r--r--drivers/ata/pata_cs5530.c41
-rw-r--r--drivers/ata/pata_cs5535.c6
-rw-r--r--drivers/ata/pata_cypress.c6
-rw-r--r--drivers/ata/pata_efar.c6
-rw-r--r--drivers/ata/pata_hpt366.c26
-rw-r--r--drivers/ata/pata_hpt37x.c61
-rw-r--r--drivers/ata/pata_hpt3x2n.c26
-rw-r--r--drivers/ata/pata_hpt3x3.c8
-rw-r--r--drivers/ata/pata_isapnp.c21
-rw-r--r--drivers/ata/pata_it8213.c354
-rw-r--r--drivers/ata/pata_it821x.c58
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c50
-rw-r--r--drivers/ata/pata_jmicron.c26
-rw-r--r--drivers/ata/pata_legacy.c166
-rw-r--r--drivers/ata/pata_marvell.c12
-rw-r--r--drivers/ata/pata_mpc52xx.c538
-rw-r--r--drivers/ata/pata_mpiix.c113
-rw-r--r--drivers/ata/pata_netcell.c6
-rw-r--r--drivers/ata/pata_ns87410.c6
-rw-r--r--drivers/ata/pata_oldpiix.c24
-rw-r--r--drivers/ata/pata_opti.c24
-rw-r--r--drivers/ata/pata_optidma.c40
-rw-r--r--drivers/ata/pata_pcmcia.c27
-rw-r--r--drivers/ata/pata_pdc2027x.c122
-rw-r--r--drivers/ata/pata_pdc202xx_old.c41
-rw-r--r--drivers/ata/pata_platform.c67
-rw-r--r--drivers/ata/pata_qdi.c50
-rw-r--r--drivers/ata/pata_radisys.c6
-rw-r--r--drivers/ata/pata_rz1000.c6
-rw-r--r--drivers/ata/pata_sc1200.c6
-rw-r--r--drivers/ata/pata_serverworks.c31
-rw-r--r--drivers/ata/pata_sil680.c8
-rw-r--r--drivers/ata/pata_sis.c70
-rw-r--r--drivers/ata/pata_sl82c105.c10
-rw-r--r--drivers/ata/pata_triflex.c6
-rw-r--r--drivers/ata/pata_via.c25
-rw-r--r--drivers/ata/pata_winbond.c49
-rw-r--r--drivers/ata/pdc_adma.c120
-rw-r--r--drivers/ata/sata_inic162x.c781
-rw-r--r--drivers/ata/sata_mv.c200
-rw-r--r--drivers/ata/sata_nv.c629
-rw-r--r--drivers/ata/sata_promise.c379
-rw-r--r--drivers/ata/sata_qstor.c138
-rw-r--r--drivers/ata/sata_sil.c99
-rw-r--r--drivers/ata/sata_sil24.c178
-rw-r--r--drivers/ata/sata_sis.c161
-rw-r--r--drivers/ata/sata_svw.c133
-rw-r--r--drivers/ata/sata_sx4.c207
-rw-r--r--drivers/ata/sata_uli.c66
-rw-r--r--drivers/ata/sata_via.c193
-rw-r--r--drivers/ata/sata_vsc.c134
-rw-r--r--drivers/base/Kconfig12
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/class.c21
-rw-r--r--drivers/base/core.c205
-rw-r--r--drivers/base/dd.c24
-rw-r--r--drivers/base/devres.c644
-rw-r--r--drivers/base/dma-mapping.c218
-rw-r--r--drivers/base/dmapool.c59
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/base/platform.c11
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/apm-emulation.c672
-rw-r--r--drivers/char/drm/drmP.h36
-rw-r--r--drivers/char/drm/drm_bufs.c19
-rw-r--r--drivers/char/drm/drm_memory.c94
-rw-r--r--drivers/char/drm/drm_memory.h20
-rw-r--r--drivers/char/drm/drm_memory_debug.h70
-rw-r--r--drivers/char/drm/drm_mm.c183
-rw-r--r--drivers/char/drm/drm_pciids.h4
-rw-r--r--drivers/char/drm/drm_proc.c4
-rw-r--r--drivers/char/drm/drm_sman.c3
-rw-r--r--drivers/char/drm/drm_vm.c16
-rw-r--r--drivers/char/drm/i810_dma.c34
-rw-r--r--drivers/char/drm/i810_drv.h2
-rw-r--r--drivers/char/drm/i830_dma.c32
-rw-r--r--drivers/char/drm/i830_drv.h2
-rw-r--r--drivers/char/drm/via_dma.c9
-rw-r--r--drivers/char/drm/via_dmablit.c2
-rw-r--r--drivers/char/drm/via_drv.h11
-rw-r--r--drivers/char/drm/via_irq.c16
-rw-r--r--drivers/char/drm/via_map.c3
-rw-r--r--drivers/char/drm/via_verifier.c50
-rw-r--r--drivers/char/drm/via_verifier.h1
-rw-r--r--drivers/char/hvc_beat.c134
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c18
-rw-r--r--drivers/char/sysrq.c20
-rw-r--r--drivers/char/tpm/tpm_bios.c8
-rw-r--r--drivers/char/watchdog/booke_wdt.c20
-rw-r--r--drivers/char/watchdog/machzwd.c2
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/firmware/pcdp.c2
-rw-r--r--drivers/hid/Kconfig14
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-debug.c764
-rw-r--r--drivers/hid/hid-input.c35
-rw-r--r--drivers/hwmon/ams/ams-input.c2
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/ide/Kconfig24
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/ide-acpi.c697
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide.c44
-rw-r--r--drivers/ide/pci/Makefile4
-rw-r--r--drivers/ide/pci/delkin_cb.c140
-rw-r--r--drivers/ide/pci/hpt366.c1583
-rw-r--r--drivers/ide/pci/it8213.c362
-rw-r--r--drivers/ide/pci/pdc202xx_new.c56
-rw-r--r--drivers/ide/pci/pdc202xx_old.c27
-rw-r--r--drivers/ide/pci/piix.c31
-rw-r--r--drivers/ide/pci/slc90e66.c55
-rw-r--r--drivers/ide/pci/tc86c001.c309
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/ide/setup-pci.c7
-rw-r--r--drivers/ieee1394/.gitignore1
-rw-r--r--drivers/ieee1394/Kconfig21
-rw-r--r--drivers/ieee1394/Makefile10
-rw-r--r--drivers/ieee1394/csr1212.c15
-rw-r--r--drivers/ieee1394/dv1394.c46
-rw-r--r--drivers/ieee1394/hosts.c13
-rw-r--r--drivers/ieee1394/hosts.h7
-rw-r--r--drivers/ieee1394/ieee1394_core.c23
-rw-r--r--drivers/ieee1394/nodemgr.c63
-rw-r--r--drivers/ieee1394/nodemgr.h3
-rw-r--r--drivers/ieee1394/ohci1394.c17
-rw-r--r--drivers/ieee1394/oui.db7048
-rw-r--r--drivers/ieee1394/oui2c.sh22
-rw-r--r--drivers/ieee1394/raw1394.c48
-rw-r--r--drivers/ieee1394/sbp2.c23
-rw-r--r--drivers/ieee1394/video1394.c8
-rw-r--r--drivers/infiniband/core/addr.c3
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h29
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c65
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c78
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c395
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/serio/serio.c6
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c2
-rw-r--r--drivers/kvm/kvm_main.c63
-rw-r--r--drivers/kvm/mmu.c2
-rw-r--r--drivers/kvm/svm.c8
-rw-r--r--drivers/kvm/vmx.c4
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/rack-meter.c6
-rw-r--r--drivers/macintosh/windfarm_core.c6
-rw-r--r--drivers/md/bitmap.c22
-rw-r--r--drivers/md/raid5.c42
-rw-r--r--drivers/media/common/ir-keymaps.c1
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c2
-rw-r--r--drivers/media/video/zc0301/zc0301_sensor.h1
-rw-r--r--drivers/misc/Kconfig21
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/asus-laptop.c1165
-rw-r--r--drivers/misc/lkdtm.c4
-rw-r--r--drivers/misc/tifm_7xx1.c402
-rw-r--r--drivers/misc/tifm_core.c65
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/at91_mci.c3
-rw-r--r--drivers/mmc/au1xmmc.c13
-rw-r--r--drivers/mmc/imxmmc.c4
-rw-r--r--drivers/mmc/mmc.c182
-rw-r--r--drivers/mmc/mmc_block.c15
-rw-r--r--drivers/mmc/mmc_queue.c2
-rw-r--r--drivers/mmc/mmc_sysfs.c2
-rw-r--r--drivers/mmc/mmci.c15
-rw-r--r--drivers/mmc/omap.c6
-rw-r--r--drivers/mmc/pxamci.c10
-rw-r--r--drivers/mmc/sdhci.c91
-rw-r--r--drivers/mmc/sdhci.h2
-rw-r--r--drivers/mmc/tifm_sd.c487
-rw-r--r--drivers/mmc/wbsd.c102
-rw-r--r--drivers/mmc/wbsd.h1
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/3c59x.c3
-rw-r--r--drivers/net/Kconfig108
-rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/Space.c4
-rw-r--r--drivers/net/ac3200.c3
-rw-r--r--drivers/net/amd8111e.c3
-rw-r--r--drivers/net/arm/at91_ether.c2
-rw-r--r--drivers/net/arm/etherh.c2
-rw-r--r--drivers/net/atl1/Makefile2
-rw-r--r--drivers/net/atl1/atl1.h283
-rw-r--r--drivers/net/atl1/atl1_ethtool.c508
-rw-r--r--drivers/net/atl1/atl1_hw.c718
-rw-r--r--drivers/net/atl1/atl1_hw.h951
-rw-r--r--drivers/net/atl1/atl1_main.c2468
-rw-r--r--drivers/net/atl1/atl1_param.c206
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/b44.h10
-rw-r--r--drivers/net/bmac.c20
-rw-r--r--drivers/net/bnx2.c30
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c27
-rw-r--r--drivers/net/bonding/bond_sysfs.c302
-rw-r--r--drivers/net/bonding/bonding.h9
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cpl5_cmd.h18
-rw-r--r--drivers/net/chelsio/cxgb2.c149
-rw-r--r--drivers/net/chelsio/elmer0.h40
-rw-r--r--drivers/net/chelsio/espi.c44
-rw-r--r--drivers/net/chelsio/fpga_defs.h6
-rw-r--r--drivers/net/chelsio/gmac.h11
-rw-r--r--drivers/net/chelsio/ixf1010.c100
-rw-r--r--drivers/net/chelsio/mv88e1xxx.c27
-rw-r--r--drivers/net/chelsio/my3126.c16
-rw-r--r--drivers/net/chelsio/pm3393.c91
-rw-r--r--drivers/net/chelsio/sge.c328
-rw-r--r--drivers/net/chelsio/subr.c89
-rw-r--r--drivers/net/chelsio/tp.c62
-rw-r--r--drivers/net/chelsio/vsc7326.c139
-rw-r--r--drivers/net/chelsio/vsc7326_reg.h139
-rw-r--r--drivers/net/chelsio/vsc8244.c41
-rw-r--r--drivers/net/cxgb3/Makefile8
-rw-r--r--drivers/net/cxgb3/adapter.h279
-rw-r--r--drivers/net/cxgb3/ael1002.c251
-rw-r--r--drivers/net/cxgb3/common.h729
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h164
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h99
-rw-r--r--drivers/net/cxgb3/cxgb3_ioctl.h185
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2519
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c1222
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h193
-rw-r--r--drivers/net/cxgb3/firmware_exports.h177
-rw-r--r--drivers/net/cxgb3/l2t.c450
-rw-r--r--drivers/net/cxgb3/l2t.h143
-rw-r--r--drivers/net/cxgb3/mc5.c473
-rw-r--r--drivers/net/cxgb3/regs.h2195
-rw-r--r--drivers/net/cxgb3/sge.c2681
-rw-r--r--drivers/net/cxgb3/sge_defs.h251
-rw-r--r--drivers/net/cxgb3/t3_cpl.h1444
-rw-r--r--drivers/net/cxgb3/t3_hw.c3375
-rw-r--r--drivers/net/cxgb3/t3cdev.h73
-rw-r--r--drivers/net/cxgb3/version.h39
-rw-r--r--drivers/net/cxgb3/vsc8211.c228
-rw-r--r--drivers/net/cxgb3/xgmac.c409
-rw-r--r--drivers/net/declance.c164
-rw-r--r--drivers/net/defxx.c928
-rw-r--r--drivers/net/defxx.h58
-rw-r--r--drivers/net/e100.c7
-rw-r--r--drivers/net/e1000/e1000.h7
-rw-r--r--drivers/net/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/e1000/e1000_main.c130
-rw-r--r--drivers/net/e1000/e1000_osdep.h4
-rw-r--r--drivers/net/e1000/e1000_param.c15
-rw-r--r--drivers/net/e2100.c3
-rw-r--r--drivers/net/es3210.c2
-rw-r--r--drivers/net/forcedeth.c1342
-rw-r--r--drivers/net/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/gianfar_ethtool.c2
-rw-r--r--drivers/net/hamradio/Kconfig6
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/macb.c61
-rw-r--r--drivers/net/macb.h8
-rw-r--r--drivers/net/mace.c16
-rw-r--r--drivers/net/macmace.c18
-rw-r--r--drivers/net/macsonic.c7
-rw-r--r--drivers/net/mv643xx_eth.c1
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netxen/netxen_nic.h153
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c118
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c43
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h74
-rw-r--r--drivers/net/netxen/netxen_nic_init.c294
-rw-r--r--drivers/net/netxen/netxen_nic_isr.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c106
-rw-r--r--drivers/net/oaknet.c666
-rw-r--r--drivers/net/pasemi_mac.c1019
-rw-r--r--drivers/net/pasemi_mac.h460
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.c363
-rwxr-xr-x[-rw-r--r--]drivers/net/qla3xxx.h88
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io-regs.h7
-rw-r--r--drivers/net/s2io.c1180
-rw-r--r--drivers/net/s2io.h227
-rw-r--r--drivers/net/sc92031.c1620
-rw-r--r--drivers/net/sk_mca.c1216
-rw-r--r--drivers/net/sk_mca.h170
-rw-r--r--drivers/net/skfp/can.c83
-rw-r--r--drivers/net/skfp/drvfbi.c24
-rw-r--r--drivers/net/skfp/fplustm.c4
-rw-r--r--drivers/net/skfp/smt.c10
-rw-r--r--drivers/net/skge.c235
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c543
-rw-r--r--drivers/net/sky2.h85
-rw-r--r--drivers/net/slip.c5
-rw-r--r--drivers/net/smc-mca.c3
-rw-r--r--drivers/net/smc-ultra.c3
-rw-r--r--drivers/net/smc-ultra32.c3
-rw-r--r--drivers/net/smc911x.c7
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/spider_net.c317
-rw-r--r--drivers/net/spider_net.h20
-rw-r--r--drivers/net/spider_net_ethtool.c4
-rw-r--r--drivers/net/tg3.c34
-rw-r--r--drivers/net/ucc_geth.c102
-rw-r--r--drivers/net/ucc_geth_phy.c2
-rw-r--r--drivers/net/wan/Kconfig24
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/hdlc.c3
-rw-r--r--drivers/net/wan/pc300too.c565
-rw-r--r--drivers/net/wan/z85230.c14
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h7
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c11
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c36
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.h16
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/orinoco.c6
-rw-r--r--drivers/net/wireless/orinoco_cs.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h4
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c3
-rw-r--r--drivers/net/wireless/spectrum_cs.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c126
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h158
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_types.h71
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c128
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h6
-rw-r--r--drivers/pci/hotplug/Kconfig9
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c10
-rw-r--r--drivers/pci/hotplug/pciehp.h194
-rw-r--r--drivers/pci/hotplug/pciehp_core.c292
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c223
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c827
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c155
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c20
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c185
-rw-r--r--drivers/pci/msi.c325
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/pci.c298
-rw-r--r--drivers/pci/pci.h14
-rw-r--r--drivers/pci/probe.c70
-rw-r--r--drivers/pci/quirks.c145
-rw-r--r--drivers/pci/search.c48
-rw-r--r--drivers/pcmcia/cs.c34
-rw-r--r--drivers/pcmcia/cs_internal.h4
-rw-r--r--drivers/pcmcia/ds.c14
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c2
-rw-r--r--drivers/pcmcia/m32r_pcc.c2
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c1
-rw-r--r--drivers/pcmcia/pcmcia_resource.c1
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c56
-rw-r--r--drivers/pcmcia/soc_common.c6
-rw-r--r--drivers/pcmcia/socket_sysfs.c104
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/pnp/pnpacpi/Kconfig4
-rw-r--r--drivers/pnp/system.c52
-rw-r--r--drivers/ps3/Makefile1
-rw-r--r--drivers/ps3/system-bus.c362
-rw-r--r--drivers/ps3/vuart.c4
-rw-r--r--drivers/ps3/vuart.h38
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c40
-rw-r--r--drivers/s390/Kconfig8
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c95
-rw-r--r--drivers/s390/block/dasd_eer.c24
-rw-r--r--drivers/s390/block/dasd_erp.c80
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c8
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/defkeymap.c2
-rw-r--r--drivers/s390/char/fs3270.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monreader.c218
-rw-r--r--drivers/s390/char/monwriter.c4
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c93
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_con.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_info.c57
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape.h22
-rw-r--r--drivers/s390/char/tape_3590.c479
-rw-r--r--drivers/s390/char/tape_3590.h53
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_char.c27
-rw-r--r--drivers/s390/char/tape_core.c69
-rw-r--r--drivers/s390/char/tty3270.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c284
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c270
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/cio.c37
-rw-r--r--drivers/s390/cio/cmf.c4
-rw-r--r--drivers/s390/cio/css.c13
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c77
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c20
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c8
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/net/Kconfig7
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/claw.c16
-rw-r--r--drivers/s390/net/ctcmain.c8
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/iucv.c2540
-rw-r--r--drivers/s390/net/iucv.h849
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c1318
-rw-r--r--drivers/s390/net/qeth_eddp.c28
-rw-r--r--drivers/s390/net/qeth_main.c92
-rw-r--r--drivers/s390/net/qeth_sys.c30
-rw-r--r--drivers/s390/net/smsgiucv.c147
-rw-r--r--drivers/s390/s390mach.c37
-rw-r--r--drivers/s390/s390mach.h3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c38
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/sysinfo.c63
-rw-r--r--drivers/scsi/NCR53C9x.c8
-rw-r--r--drivers/scsi/NCR53C9x.h2
-rw-r--r--drivers/scsi/blz1230.c3
-rw-r--r--drivers/scsi/blz2060.c2
-rw-r--r--drivers/scsi/cyberstorm.c2
-rw-r--r--drivers/scsi/cyberstormII.c2
-rw-r--r--drivers/scsi/dec_esp.c355
-rw-r--r--drivers/scsi/fastlane.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libiscsi.c40
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/mca_53c9x.c2
-rw-r--r--drivers/scsi/oktagon_esp.c2
-rw-r--r--drivers/scsi/osst.c8
-rw-r--r--drivers/scsi/osst.h68
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c35
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c64
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/sd.c20
-rw-r--r--drivers/scsi/st.c19
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.c15
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm1.h3
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_cpm2.h3
-rw-r--r--drivers/serial/uartlite.c6
-rw-r--r--drivers/tc/Makefile2
-rw-r--r--drivers/tc/tc-driver.c110
-rw-r--r--drivers/tc/tc.c339
-rw-r--r--drivers/usb/atm/speedtch.c2
-rw-r--r--drivers/usb/class/usblp.c16
-rw-r--r--drivers/usb/core/Kconfig13
-rw-r--r--drivers/usb/core/buffer.c36
-rw-r--r--drivers/usb/core/devices.c22
-rw-r--r--drivers/usb/core/devio.c25
-rw-r--r--drivers/usb/core/driver.c39
-rw-r--r--drivers/usb/core/file.c13
-rw-r--r--drivers/usb/core/generic.c28
-rw-r--r--drivers/usb/core/hcd.c137
-rw-r--r--drivers/usb/core/hcd.h6
-rw-r--r--drivers/usb/core/hub.c64
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/core/sysfs.c98
-rw-r--r--drivers/usb/core/urb.c21
-rw-r--r--drivers/usb/core/usb.c96
-rw-r--r--drivers/usb/gadget/at91_udc.c21
-rw-r--r--drivers/usb/gadget/at91_udc.h1
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/ether.c148
-rw-r--r--drivers/usb/gadget/file_storage.c33
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/gmidi.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c240
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c2
-rw-r--r--drivers/usb/gadget/serial.c2
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/Kconfig38
-rw-r--r--drivers/usb/host/ehci-dbg.c24
-rw-r--r--drivers/usb/host/ehci-fsl.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c127
-rw-r--r--drivers/usb/host/ehci-hub.c324
-rw-r--r--drivers/usb/host/ehci-pci.c38
-rw-r--r--drivers/usb/host/ehci-ps3.c193
-rw-r--r--drivers/usb/host/ehci-q.c16
-rw-r--r--drivers/usb/host/ehci-sched.c22
-rw-r--r--drivers/usb/host/ehci.h46
-rw-r--r--drivers/usb/host/ohci-at91.c23
-rw-r--r--drivers/usb/host/ohci-au1xxx.c16
-rw-r--r--drivers/usb/host/ohci-ep93xx.c12
-rw-r--r--drivers/usb/host/ohci-hcd.c128
-rw-r--r--drivers/usb/host/ohci-lh7a404.c16
-rw-r--r--drivers/usb/host/ohci-omap.c19
-rw-r--r--drivers/usb/host/ohci-pci.c219
-rw-r--r--drivers/usb/host/ohci-pnx4008.c12
-rw-r--r--drivers/usb/host/ohci-pnx8550.c16
-rw-r--r--drivers/usb/host/ohci-ppc-of.c232
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c18
-rw-r--r--drivers/usb/host/ohci-ps3.c196
-rw-r--r--drivers/usb/host/ohci-pxa27x.c16
-rw-r--r--drivers/usb/host/ohci-s3c2410.c12
-rw-r--r--drivers/usb/host/ohci-sa1111.c16
-rw-r--r--drivers/usb/host/ohci.h155
-rw-r--r--drivers/usb/host/uhci-debug.c71
-rw-r--r--drivers/usb/host/uhci-hcd.c51
-rw-r--r--drivers/usb/host/uhci-hcd.h8
-rw-r--r--drivers/usb/host/uhci-q.c258
-rw-r--r--drivers/usb/image/mdc800.c4
-rw-r--r--drivers/usb/input/Kconfig20
-rw-r--r--drivers/usb/input/Makefile4
-rw-r--r--drivers/usb/input/gtco.c1104
-rw-r--r--drivers/usb/input/hid-core.c116
-rw-r--r--drivers/usb/input/hid-ff.c3
-rw-r--r--drivers/usb/input/hid-lgff.c4
-rw-r--r--drivers/usb/input/hid-plff.c129
-rw-r--r--drivers/usb/misc/idmouse.c10
-rw-r--r--drivers/usb/misc/rio500.c54
-rw-r--r--drivers/usb/mon/Makefile2
-rw-r--r--drivers/usb/mon/mon_bin.c1172
-rw-r--r--drivers/usb/mon/mon_dma.c39
-rw-r--r--drivers/usb/mon/mon_main.c97
-rw-r--r--drivers/usb/mon/mon_text.c67
-rw-r--r--drivers/usb/mon/usb_mon.h30
-rw-r--r--drivers/usb/net/Kconfig6
-rw-r--r--drivers/usb/net/asix.c4
-rw-r--r--drivers/usb/net/cdc_ether.c60
-rw-r--r--drivers/usb/net/gl620a.c26
-rw-r--r--drivers/usb/net/kaweth.c37
-rw-r--r--drivers/usb/net/pegasus.h4
-rw-r--r--drivers/usb/net/rndis_host.c81
-rw-r--r--drivers/usb/net/rtl8150.c3
-rw-r--r--drivers/usb/serial/aircable.c21
-rw-r--r--drivers/usb/serial/airprime.c1
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c1
-rw-r--r--drivers/usb/serial/bus.c45
-rw-r--r--drivers/usb/serial/cp2101.c9
-rw-r--r--drivers/usb/serial/cyberjack.c3
-rw-r--r--drivers/usb/serial/cypress_m8.c3
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/empeg.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.h1
-rw-r--r--drivers/usb/serial/funsoft.c1
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/generic.c35
-rw-r--r--drivers/usb/serial/hp4x.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c417
-rw-r--r--drivers/usb/serial/io_edgeport.h6
-rw-r--r--drivers/usb/serial/io_tables.h61
-rw-r--r--drivers/usb/serial/io_ti.c2
-rw-r--r--drivers/usb/serial/io_usbvend.h5
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c1
-rw-r--r--drivers/usb/serial/ir-usb.c1
-rw-r--r--drivers/usb/serial/keyspan.c49
-rw-r--r--drivers/usb/serial/keyspan.h7
-rw-r--r--drivers/usb/serial/keyspan_pda.c3
-rw-r--r--drivers/usb/serial/kl5kusb105.c1
-rw-r--r--drivers/usb/serial/kobil_sct.c1
-rw-r--r--drivers/usb/serial/mct_u232.c1
-rw-r--r--drivers/usb/serial/mos7720.c16
-rw-r--r--drivers/usb/serial/mos7840.c16
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/omninet.c1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/safe_serial.c1
-rw-r--r--drivers/usb/serial/sierra.c29
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial.c102
-rw-r--r--drivers/usb/serial/visor.c6
-rw-r--r--drivers/usb/serial/visor.h1
-rw-r--r--drivers/usb/serial/whiteheat.c2
-rw-r--r--drivers/usb/storage/onetouch.c1
-rw-r--r--drivers/usb/storage/scsiglue.c31
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/usb.c23
-rw-r--r--drivers/video/Kconfig8
-rw-r--r--drivers/video/output.c129
-rw-r--r--drivers/video/pmag-ba-fb.c95
-rw-r--r--drivers/video/pmagb-b-fb.c98
802 files changed, 60241 insertions, 35201 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 0dd96d1afd39..f28dcb4ec8b3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_PARPORT) += parport/
30obj-y += base/ block/ misc/ mfd/ net/ media/ 30obj-y += base/ block/ misc/ mfd/ net/ media/
31obj-$(CONFIG_NUBUS) += nubus/ 31obj-$(CONFIG_NUBUS) += nubus/
32obj-$(CONFIG_ATM) += atm/ 32obj-$(CONFIG_ATM) += atm/
33obj-$(CONFIG_PPC_PMAC) += macintosh/ 33obj-y += macintosh/
34obj-$(CONFIG_IDE) += ide/ 34obj-$(CONFIG_IDE) += ide/
35obj-$(CONFIG_FC4) += fc4/ 35obj-$(CONFIG_FC4) += fc4/
36obj-$(CONFIG_SCSI) += scsi/ 36obj-$(CONFIG_SCSI) += scsi/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index f4f000abc4e9..20eacc2c9e0e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "ACPI (Advanced Configuration and Power Interface) Support" 5menu "ACPI (Advanced Configuration and Power Interface) Support"
6 depends on !X86_NUMAQ
6 depends on !X86_VISWS 7 depends on !X86_VISWS
7 depends on !IA64_HP_SIM 8 depends on !IA64_HP_SIM
8 depends on IA64 || X86 9 depends on IA64 || X86
@@ -77,6 +78,20 @@ config ACPI_SLEEP_PROC_SLEEP
77 Create /proc/acpi/sleep 78 Create /proc/acpi/sleep
78 Deprecated by /sys/power/state 79 Deprecated by /sys/power/state
79 80
81config ACPI_PROCFS
82 bool "Procfs interface (deprecated)"
83 depends on ACPI
84 default y
85 ---help---
86 Procfs interface for ACPI is made optional for back-compatible.
87 As the same functions are duplicated in sysfs interface
88 and this proc interface will be removed some time later,
89 it's marked as deprecated.
90 ( /proc/acpi/debug_layer && debug_level are deprecated by
91 /sys/module/acpi/parameters/debug_layer && debug_level.
92 /proc/acpi/info is deprecated by
93 /sys/module/acpi/parameters/acpica_version )
94
80config ACPI_AC 95config ACPI_AC
81 tristate "AC Adapter" 96 tristate "AC Adapter"
82 depends on X86 97 depends on X86
@@ -107,7 +122,7 @@ config ACPI_BUTTON
107 122
108config ACPI_VIDEO 123config ACPI_VIDEO
109 tristate "Video" 124 tristate "Video"
110 depends on X86 125 depends on X86 && BACKLIGHT_CLASS_DEVICE
111 help 126 help
112 This driver implement the ACPI Extensions For Display Adapters 127 This driver implement the ACPI Extensions For Display Adapters
113 for integrated graphics devices on motherboard, as specified in 128 for integrated graphics devices on motherboard, as specified in
@@ -139,6 +154,13 @@ config ACPI_DOCK
139 help 154 help
140 This driver adds support for ACPI controlled docking stations 155 This driver adds support for ACPI controlled docking stations
141 156
157config ACPI_BAY
158 tristate "Removable Drive Bay (EXPERIMENTAL)"
159 depends on EXPERIMENTAL
160 help
161 This driver adds support for ACPI controlled removable drive
162 bays such as the IBM ultrabay or the Dell Module Bay.
163
142config ACPI_PROCESSOR 164config ACPI_PROCESSOR
143 tristate "Processor" 165 tristate "Processor"
144 default y 166 default y
@@ -186,19 +208,22 @@ config ACPI_ASUS
186 208
187 Note: display switching code is currently considered EXPERIMENTAL, 209 Note: display switching code is currently considered EXPERIMENTAL,
188 toying with these values may even lock your machine. 210 toying with these values may even lock your machine.
189 211
190 All settings are changed via /proc/acpi/asus directory entries. Owner 212 All settings are changed via /proc/acpi/asus directory entries. Owner
191 and group for these entries can be set with asus_uid and asus_gid 213 and group for these entries can be set with asus_uid and asus_gid
192 parameters. 214 parameters.
193 215
194 More information and a userspace daemon for handling the extra buttons 216 More information and a userspace daemon for handling the extra buttons
195 at <http://sourceforge.net/projects/acpi4asus/>. 217 at <http://sourceforge.net/projects/acpi4asus/>.
196 218
197 If you have an ACPI-compatible ASUS laptop, say Y or M here. This 219 If you have an ACPI-compatible ASUS laptop, say Y or M here. This
198 driver is still under development, so if your laptop is unsupported or 220 driver is still under development, so if your laptop is unsupported or
199 something works not quite as expected, please use the mailing list 221 something works not quite as expected, please use the mailing list
200 available on the above page (acpi4asus-user@lists.sourceforge.net) 222 available on the above page (acpi4asus-user@lists.sourceforge.net).
201 223
224 NOTE: This driver is deprecated and will probably be removed soon,
225 use asus-laptop instead.
226
202config ACPI_IBM 227config ACPI_IBM
203 tristate "IBM ThinkPad Laptop Extras" 228 tristate "IBM ThinkPad Laptop Extras"
204 depends on X86 229 depends on X86
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bce7ca27b429..856c32bccacb 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -37,13 +37,15 @@ endif
37 37
38obj-y += sleep/ 38obj-y += sleep/
39obj-y += bus.o glue.o 39obj-y += bus.o glue.o
40obj-y += scan.o
40obj-$(CONFIG_ACPI_AC) += ac.o 41obj-$(CONFIG_ACPI_AC) += ac.o
41obj-$(CONFIG_ACPI_BATTERY) += battery.o 42obj-$(CONFIG_ACPI_BATTERY) += battery.o
42obj-$(CONFIG_ACPI_BUTTON) += button.o 43obj-$(CONFIG_ACPI_BUTTON) += button.o
43obj-$(CONFIG_ACPI_EC) += ec.o 44obj-$(CONFIG_ACPI_EC) += ec.o
44obj-$(CONFIG_ACPI_FAN) += fan.o 45obj-$(CONFIG_ACPI_FAN) += fan.o
45obj-$(CONFIG_ACPI_DOCK) += dock.o 46obj-$(CONFIG_ACPI_DOCK) += dock.o
46obj-$(CONFIG_ACPI_VIDEO) += video.o 47obj-$(CONFIG_ACPI_BAY) += bay.o
48obj-$(CONFIG_ACPI_VIDEO) += video.o
47obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o 49obj-$(CONFIG_ACPI_HOTKEY) += hotkey.o
48obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o 50obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
49obj-$(CONFIG_ACPI_POWER) += power.o 51obj-$(CONFIG_ACPI_POWER) += power.o
@@ -56,7 +58,6 @@ obj-$(CONFIG_ACPI_NUMA) += numa.o
56obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o 58obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
57obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o 59obj-$(CONFIG_ACPI_IBM) += ibm_acpi.o
58obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o 60obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
59obj-y += scan.o motherboard.o
60obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o 61obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
61obj-y += cm_sbs.o 62obj-y += cm_sbs.o
62obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o 63obj-$(CONFIG_ACPI_SBS) += i2c_ec.o sbs.o
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 396140bbbe57..31ad70a6e22e 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -26,7 +26,7 @@
26 * Pontus Fuchs - Helper functions, cleanup 26 * Pontus Fuchs - Helper functions, cleanup
27 * Johann Wiesner - Small compile fixes 27 * Johann Wiesner - Small compile fixes
28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point. 28 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
29 * Éric Burghard - LED display support for W1N 29 * �ic Burghard - LED display support for W1N
30 * 30 *
31 */ 31 */
32 32
@@ -1128,7 +1128,6 @@ static int asus_model_match(char *model)
1128static int asus_hotk_get_info(void) 1128static int asus_hotk_get_info(void)
1129{ 1129{
1130 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 1130 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1131 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
1132 union acpi_object *model = NULL; 1131 union acpi_object *model = NULL;
1133 int bsts_result; 1132 int bsts_result;
1134 char *string = NULL; 1133 char *string = NULL;
@@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void)
1142 * HID), this bit will be moved. A global variable asus_info contains 1141 * HID), this bit will be moved. A global variable asus_info contains
1143 * the DSDT header. 1142 * the DSDT header.
1144 */ 1143 */
1145 status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); 1144 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
1146 if (ACPI_FAILURE(status)) 1145 if (ACPI_FAILURE(status))
1147 printk(KERN_WARNING " Couldn't get the DSDT table header\n"); 1146 printk(KERN_WARNING " Couldn't get the DSDT table header\n");
1148 else
1149 asus_info = dsdt.pointer;
1150 1147
1151 /* We have to write 0 on init this far for all ASUS models */ 1148 /* We have to write 0 on init this far for all ASUS models */
1152 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { 1149 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
@@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void)
1358 acpi_bus_unregister_driver(&asus_hotk_driver); 1355 acpi_bus_unregister_driver(&asus_hotk_driver);
1359 remove_proc_entry(PROC_ASUS, acpi_root_dir); 1356 remove_proc_entry(PROC_ASUS, acpi_root_dir);
1360 1357
1361 kfree(asus_info);
1362
1363 return; 1358 return;
1364} 1359}
1365 1360
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5f43e0d14899..2f4521a48fe7 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -64,7 +64,7 @@ extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
64 64
65static int acpi_battery_add(struct acpi_device *device); 65static int acpi_battery_add(struct acpi_device *device);
66static int acpi_battery_remove(struct acpi_device *device, int type); 66static int acpi_battery_remove(struct acpi_device *device, int type);
67static int acpi_battery_resume(struct acpi_device *device, int status); 67static int acpi_battery_resume(struct acpi_device *device);
68 68
69static struct acpi_driver acpi_battery_driver = { 69static struct acpi_driver acpi_battery_driver = {
70 .name = ACPI_BATTERY_DRIVER_NAME, 70 .name = ACPI_BATTERY_DRIVER_NAME,
@@ -753,7 +753,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
753} 753}
754 754
755/* this is needed to learn about changes made in suspended state */ 755/* this is needed to learn about changes made in suspended state */
756static int acpi_battery_resume(struct acpi_device *device, int state) 756static int acpi_battery_resume(struct acpi_device *device)
757{ 757{
758 struct acpi_battery *battery; 758 struct acpi_battery *battery;
759 759
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
new file mode 100644
index 000000000000..91082ce6f5d1
--- /dev/null
+++ b/drivers/acpi/bay.c
@@ -0,0 +1,490 @@
1/*
2 * bay.c - ACPI removable drive bay driver
3 *
4 * Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/notifier.h>
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31#include <linux/seq_file.h>
32#include <asm/uaccess.h>
33#include <linux/platform_device.h>
34
35#define ACPI_BAY_DRIVER_NAME "ACPI Removable Drive Bay Driver"
36
37ACPI_MODULE_NAME("bay")
38MODULE_AUTHOR("Kristen Carlson Accardi");
39MODULE_DESCRIPTION(ACPI_BAY_DRIVER_NAME);
40MODULE_LICENSE("GPL");
41#define ACPI_BAY_CLASS "bay"
42#define ACPI_BAY_COMPONENT 0x10000000
43#define _COMPONENT ACPI_BAY_COMPONENT
44#define bay_dprintk(h,s) {\
45 char prefix[80] = {'\0'};\
46 struct acpi_buffer buffer = {sizeof(prefix), prefix};\
47 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\
48 printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
49static void bay_notify(acpi_handle handle, u32 event, void *data);
50static int acpi_bay_add(struct acpi_device *device);
51static int acpi_bay_remove(struct acpi_device *device, int type);
52
53static struct acpi_driver acpi_bay_driver = {
54 .name = ACPI_BAY_DRIVER_NAME,
55 .class = ACPI_BAY_CLASS,
56 .ids = ACPI_BAY_HID,
57 .ops = {
58 .add = acpi_bay_add,
59 .remove = acpi_bay_remove,
60 },
61};
62
63struct bay {
64 acpi_handle handle;
65 char *name;
66 struct list_head list;
67 struct platform_device *pdev;
68};
69
70static LIST_HEAD(drive_bays);
71
72
73/*****************************************************************************
74 * Drive Bay functions *
75 *****************************************************************************/
76/**
77 * is_ejectable - see if a device is ejectable
78 * @handle: acpi handle of the device
79 *
80 * If an acpi object has a _EJ0 method, then it is ejectable
81 */
82static int is_ejectable(acpi_handle handle)
83{
84 acpi_status status;
85 acpi_handle tmp;
86
87 status = acpi_get_handle(handle, "_EJ0", &tmp);
88 if (ACPI_FAILURE(status))
89 return 0;
90 return 1;
91}
92
93/**
94 * bay_present - see if the bay device is present
95 * @bay: the drive bay
96 *
97 * execute the _STA method.
98 */
99static int bay_present(struct bay *bay)
100{
101 unsigned long sta;
102 acpi_status status;
103
104 if (bay) {
105 status = acpi_evaluate_integer(bay->handle, "_STA", NULL, &sta);
106 if (ACPI_SUCCESS(status) && sta)
107 return 1;
108 }
109 return 0;
110}
111
112/**
113 * eject_device - respond to an eject request
114 * @handle - the device to eject
115 *
116 * Call this devices _EJ0 method.
117 */
118static void eject_device(acpi_handle handle)
119{
120 struct acpi_object_list arg_list;
121 union acpi_object arg;
122
123 bay_dprintk(handle, "Ejecting device");
124
125 arg_list.count = 1;
126 arg_list.pointer = &arg;
127 arg.type = ACPI_TYPE_INTEGER;
128 arg.integer.value = 1;
129
130 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0",
131 &arg_list, NULL)))
132 pr_debug("Failed to evaluate _EJ0!\n");
133}
134
135/*
136 * show_present - read method for "present" file in sysfs
137 */
138static ssize_t show_present(struct device *dev,
139 struct device_attribute *attr, char *buf)
140{
141 struct bay *bay = dev_get_drvdata(dev);
142 return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
143
144}
145DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
146
147/*
148 * write_eject - write method for "eject" file in sysfs
149 */
150static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
151 const char *buf, size_t count)
152{
153 struct bay *bay = dev_get_drvdata(dev);
154
155 if (!count)
156 return -EINVAL;
157
158 eject_device(bay->handle);
159 return count;
160}
161DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
162
163/**
164 * is_ata - see if a device is an ata device
165 * @handle: acpi handle of the device
166 *
167 * If an acpi object has one of 4 ATA ACPI methods defined,
168 * then it is an ATA device
169 */
170static int is_ata(acpi_handle handle)
171{
172 acpi_handle tmp;
173
174 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
175 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
176 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
177 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
178 return 1;
179
180 return 0;
181}
182
183/**
184 * parent_is_ata(acpi_handle handle)
185 *
186 */
187static int parent_is_ata(acpi_handle handle)
188{
189 acpi_handle phandle;
190
191 if (acpi_get_parent(handle, &phandle))
192 return 0;
193
194 return is_ata(phandle);
195}
196
197/**
198 * is_ejectable_bay - see if a device is an ejectable drive bay
199 * @handle: acpi handle of the device
200 *
201 * If an acpi object is ejectable and has one of the ACPI ATA
202 * methods defined, then we can safely call it an ejectable
203 * drive bay
204 */
205static int is_ejectable_bay(acpi_handle handle)
206{
207 if ((is_ata(handle) || parent_is_ata(handle)) && is_ejectable(handle))
208 return 1;
209 return 0;
210}
211
212/**
213 * eject_removable_drive - try to eject this drive
214 * @dev : the device structure of the drive
215 *
216 * If a device is a removable drive that requires an _EJ0 method
217 * to be executed in order to safely remove from the system, do
218 * it. ATM - always returns success
219 */
220int eject_removable_drive(struct device *dev)
221{
222 acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
223
224 if (handle) {
225 bay_dprintk(handle, "Got device handle");
226 if (is_ejectable_bay(handle))
227 eject_device(handle);
228 } else {
229 printk("No acpi handle for device\n");
230 }
231
232 /* should I return an error code? */
233 return 0;
234}
235EXPORT_SYMBOL_GPL(eject_removable_drive);
236
237static int acpi_bay_add(struct acpi_device *device)
238{
239 bay_dprintk(device->handle, "adding bay device");
240 strcpy(acpi_device_name(device), "Dockable Bay");
241 strcpy(acpi_device_class(device), "bay");
242 return 0;
243}
244
245static int acpi_bay_add_fs(struct bay *bay)
246{
247 int ret;
248 struct device *dev = &bay->pdev->dev;
249
250 ret = device_create_file(dev, &dev_attr_present);
251 if (ret)
252 goto add_fs_err;
253 ret = device_create_file(dev, &dev_attr_eject);
254 if (ret) {
255 device_remove_file(dev, &dev_attr_present);
256 goto add_fs_err;
257 }
258 return 0;
259
260 add_fs_err:
261 bay_dprintk(bay->handle, "Error adding sysfs files\n");
262 return ret;
263}
264
265static void acpi_bay_remove_fs(struct bay *bay)
266{
267 struct device *dev = &bay->pdev->dev;
268
269 /* cleanup sysfs */
270 device_remove_file(dev, &dev_attr_present);
271 device_remove_file(dev, &dev_attr_eject);
272}
273
274static int bay_is_dock_device(acpi_handle handle)
275{
276 acpi_handle parent;
277
278 acpi_get_parent(handle, &parent);
279
280 /* if the device or it's parent is dependent on the
281 * dock, then we are a dock device
282 */
283 return (is_dock_device(handle) || is_dock_device(parent));
284}
285
286static int bay_add(acpi_handle handle, int id)
287{
288 acpi_status status;
289 struct bay *new_bay;
290 struct platform_device *pdev;
291 struct acpi_buffer nbuffer = {ACPI_ALLOCATE_BUFFER, NULL};
292 acpi_get_name(handle, ACPI_FULL_PATHNAME, &nbuffer);
293
294 bay_dprintk(handle, "Adding notify handler");
295
296 /*
297 * Initialize bay device structure
298 */
299 new_bay = kzalloc(sizeof(*new_bay), GFP_ATOMIC);
300 INIT_LIST_HEAD(&new_bay->list);
301 new_bay->handle = handle;
302 new_bay->name = (char *)nbuffer.pointer;
303
304 /* initialize platform device stuff */
305 pdev = platform_device_register_simple(ACPI_BAY_CLASS, id, NULL, 0);
306 if (pdev == NULL) {
307 printk(KERN_ERR PREFIX "Error registering bay device\n");
308 goto bay_add_err;
309 }
310 new_bay->pdev = pdev;
311 platform_set_drvdata(pdev, new_bay);
312
313 if (acpi_bay_add_fs(new_bay)) {
314 platform_device_unregister(new_bay->pdev);
315 goto bay_add_err;
316 }
317
318 /* register for events on this device */
319 status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
320 bay_notify, new_bay);
321 if (ACPI_FAILURE(status)) {
322 printk(KERN_ERR PREFIX "Error installing bay notify handler\n");
323 }
324
325 /* if we are on a dock station, we should register for dock
326 * notifications.
327 */
328 if (bay_is_dock_device(handle)) {
329 bay_dprintk(handle, "Is dependent on dock\n");
330 register_hotplug_dock_device(handle, bay_notify, new_bay);
331 }
332 list_add(&new_bay->list, &drive_bays);
333 printk(KERN_INFO PREFIX "Bay [%s] Added\n", new_bay->name);
334 return 0;
335
336bay_add_err:
337 kfree(new_bay->name);
338 kfree(new_bay);
339 return -ENODEV;
340}
341
342static int acpi_bay_remove(struct acpi_device *device, int type)
343{
344 /*** FIXME: do something here */
345 return 0;
346}
347
348/**
349 * bay_create_acpi_device - add new devices to acpi
350 * @handle - handle of the device to add
351 *
352 * This function will create a new acpi_device for the given
353 * handle if one does not exist already. This should cause
354 * acpi to scan for drivers for the given devices, and call
355 * matching driver's add routine.
356 *
357 * Returns a pointer to the acpi_device corresponding to the handle.
358 */
359static struct acpi_device * bay_create_acpi_device(acpi_handle handle)
360{
361 struct acpi_device *device = NULL;
362 struct acpi_device *parent_device;
363 acpi_handle parent;
364 int ret;
365
366 bay_dprintk(handle, "Trying to get device");
367 if (acpi_bus_get_device(handle, &device)) {
368 /*
369 * no device created for this object,
370 * so we should create one.
371 */
372 bay_dprintk(handle, "No device for handle");
373 acpi_get_parent(handle, &parent);
374 if (acpi_bus_get_device(parent, &parent_device))
375 parent_device = NULL;
376
377 ret = acpi_bus_add(&device, parent_device, handle,
378 ACPI_BUS_TYPE_DEVICE);
379 if (ret) {
380 pr_debug("error adding bus, %x\n",
381 -ret);
382 return NULL;
383 }
384 }
385 return device;
386}
387
388/**
389 * bay_notify - act upon an acpi bay notification
390 * @handle: the bay handle
391 * @event: the acpi event
392 * @data: our driver data struct
393 *
394 */
395static void bay_notify(acpi_handle handle, u32 event, void *data)
396{
397 struct acpi_device *dev;
398
399 bay_dprintk(handle, "Bay event");
400
401 switch(event) {
402 case ACPI_NOTIFY_BUS_CHECK:
403 printk("Bus Check\n");
404 case ACPI_NOTIFY_DEVICE_CHECK:
405 printk("Device Check\n");
406 dev = bay_create_acpi_device(handle);
407 if (dev)
408 acpi_bus_generate_event(dev, event, 0);
409 else
410 printk("No device for generating event\n");
411 /* wouldn't it be a good idea to just rescan SATA
412 * right here?
413 */
414 break;
415 case ACPI_NOTIFY_EJECT_REQUEST:
416 printk("Eject request\n");
417 dev = bay_create_acpi_device(handle);
418 if (dev)
419 acpi_bus_generate_event(dev, event, 0);
420 else
421 printk("No device for generating eventn");
422
423 /* wouldn't it be a good idea to just call the
424 * eject_device here if we were a SATA device?
425 */
426 break;
427 default:
428 printk("unknown event %d\n", event);
429 }
430}
431
432static acpi_status
433find_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
434{
435 int *count = (int *)context;
436
437 /*
438 * there could be more than one ejectable bay.
439 * so, just return AE_OK always so that every object
440 * will be checked.
441 */
442 if (is_ejectable_bay(handle)) {
443 bay_dprintk(handle, "found ejectable bay");
444 if (!bay_add(handle, *count))
445 (*count)++;
446 }
447 return AE_OK;
448}
449
450static int __init bay_init(void)
451{
452 int bays = 0;
453
454 INIT_LIST_HEAD(&drive_bays);
455
456 /* look for dockable drive bays */
457 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
458 ACPI_UINT32_MAX, find_bay, &bays, NULL);
459
460 if (bays)
461 if ((acpi_bus_register_driver(&acpi_bay_driver) < 0))
462 printk(KERN_ERR "Unable to register bay driver\n");
463
464 if (!bays)
465 return -ENODEV;
466
467 return 0;
468}
469
470static void __exit bay_exit(void)
471{
472 struct bay *bay, *tmp;
473
474 list_for_each_entry_safe(bay, tmp, &drive_bays, list) {
475 if (is_dock_device(bay->handle))
476 unregister_hotplug_dock_device(bay->handle);
477 acpi_bay_remove_fs(bay);
478 acpi_remove_notify_handler(bay->handle, ACPI_SYSTEM_NOTIFY,
479 bay_notify);
480 platform_device_unregister(bay->pdev);
481 kfree(bay->name);
482 kfree(bay);
483 }
484
485 acpi_bus_unregister_driver(&acpi_bay_driver);
486}
487
488postcore_initcall(bay_init);
489module_exit(bay_exit);
490
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f9c972b26f4f..f289fd41e77d 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -44,7 +44,7 @@ struct acpi_blacklist_item {
44 char oem_id[7]; 44 char oem_id[7];
45 char oem_table_id[9]; 45 char oem_table_id[9];
46 u32 oem_revision; 46 u32 oem_revision;
47 acpi_table_type table; 47 char *table;
48 enum acpi_blacklist_predicates oem_revision_predicate; 48 enum acpi_blacklist_predicates oem_revision_predicate;
49 char *reason; 49 char *reason;
50 u32 is_critical_error; 50 u32 is_critical_error;
@@ -56,18 +56,18 @@ struct acpi_blacklist_item {
56 */ 56 */
57static struct acpi_blacklist_item acpi_blacklist[] __initdata = { 57static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
58 /* Compaq Presario 1700 */ 58 /* Compaq Presario 1700 */
59 {"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal, 59 {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
60 "Multiple problems", 1}, 60 "Multiple problems", 1},
61 /* Sony FX120, FX140, FX150? */ 61 /* Sony FX120, FX140, FX150? */
62 {"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal, 62 {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
63 "ACPI driver problem", 1}, 63 "ACPI driver problem", 1},
64 /* Compaq Presario 800, Insyde BIOS */ 64 /* Compaq Presario 800, Insyde BIOS */
65 {"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal, 65 {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
66 "Does not use _REG to protect EC OpRegions", 1}, 66 "Does not use _REG to protect EC OpRegions", 1},
67 /* IBM 600E - _ADR should return 7, but it returns 1 */ 67 /* IBM 600E - _ADR should return 7, but it returns 1 */
68 {"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal, 68 {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
69 "Incorrect _ADR", 1}, 69 "Incorrect _ADR", 1},
70 {"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions, 70 {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
71 "Bogus PCI routing", 1}, 71 "Bogus PCI routing", 1},
72 72
73 {""} 73 {""}
@@ -79,7 +79,7 @@ static int __init blacklist_by_year(void)
79{ 79{
80 int year = dmi_get_year(DMI_BIOS_DATE); 80 int year = dmi_get_year(DMI_BIOS_DATE);
81 /* Doesn't exist? Likely an old system */ 81 /* Doesn't exist? Likely an old system */
82 if (year == -1) 82 if (year == -1)
83 return 1; 83 return 1;
84 /* 0? Likely a buggy new BIOS */ 84 /* 0? Likely a buggy new BIOS */
85 if (year == 0) 85 if (year == 0)
@@ -103,22 +103,21 @@ int __init acpi_blacklisted(void)
103{ 103{
104 int i = 0; 104 int i = 0;
105 int blacklisted = 0; 105 int blacklisted = 0;
106 struct acpi_table_header *table_header; 106 struct acpi_table_header table_header;
107 107
108 while (acpi_blacklist[i].oem_id[0] != '\0') { 108 while (acpi_blacklist[i].oem_id[0] != '\0') {
109 if (acpi_get_table_header_early 109 if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
110 (acpi_blacklist[i].table, &table_header)) {
111 i++; 110 i++;
112 continue; 111 continue;
113 } 112 }
114 113
115 if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) { 114 if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
116 i++; 115 i++;
117 continue; 116 continue;
118 } 117 }
119 118
120 if (strncmp 119 if (strncmp
121 (acpi_blacklist[i].oem_table_id, table_header->oem_table_id, 120 (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
122 8)) { 121 8)) {
123 i++; 122 i++;
124 continue; 123 continue;
@@ -127,14 +126,14 @@ int __init acpi_blacklisted(void)
127 if ((acpi_blacklist[i].oem_revision_predicate == all_versions) 126 if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
128 || (acpi_blacklist[i].oem_revision_predicate == 127 || (acpi_blacklist[i].oem_revision_predicate ==
129 less_than_or_equal 128 less_than_or_equal
130 && table_header->oem_revision <= 129 && table_header.oem_revision <=
131 acpi_blacklist[i].oem_revision) 130 acpi_blacklist[i].oem_revision)
132 || (acpi_blacklist[i].oem_revision_predicate == 131 || (acpi_blacklist[i].oem_revision_predicate ==
133 greater_than_or_equal 132 greater_than_or_equal
134 && table_header->oem_revision >= 133 && table_header.oem_revision >=
135 acpi_blacklist[i].oem_revision) 134 acpi_blacklist[i].oem_revision)
136 || (acpi_blacklist[i].oem_revision_predicate == equal 135 || (acpi_blacklist[i].oem_revision_predicate == equal
137 && table_header->oem_revision == 136 && table_header.oem_revision ==
138 acpi_blacklist[i].oem_revision)) { 137 acpi_blacklist[i].oem_revision)) {
139 138
140 printk(KERN_ERR PREFIX 139 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 766332e45592..c26468da4295 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus")
44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); 44extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
45#endif 45#endif
46 46
47struct fadt_descriptor acpi_fadt;
48EXPORT_SYMBOL(acpi_fadt);
49
50struct acpi_device *acpi_root; 47struct acpi_device *acpi_root;
51struct proc_dir_entry *acpi_root_dir; 48struct proc_dir_entry *acpi_root_dir;
52EXPORT_SYMBOL(acpi_root_dir); 49EXPORT_SYMBOL(acpi_root_dir);
@@ -195,7 +192,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
195 192
196 if (!device->flags.power_manageable) { 193 if (!device->flags.power_manageable) {
197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", 194 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
198 device->kobj.name)); 195 device->dev.kobj.name));
199 return -ENODEV; 196 return -ENODEV;
200 } 197 }
201 /* 198 /*
@@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void)
582 return 0; 579 return 0;
583} 580}
584 581
582acpi_native_uint acpi_gbl_permanent_mmap;
583
584
585void __init acpi_early_init(void) 585void __init acpi_early_init(void)
586{ 586{
587 acpi_status status = AE_OK; 587 acpi_status status = AE_OK;
588 struct acpi_buffer buffer = { sizeof(acpi_fadt), &acpi_fadt };
589
590 588
591 if (acpi_disabled) 589 if (acpi_disabled)
592 return; 590 return;
@@ -597,6 +595,15 @@ void __init acpi_early_init(void)
597 if (!acpi_strict) 595 if (!acpi_strict)
598 acpi_gbl_enable_interpreter_slack = TRUE; 596 acpi_gbl_enable_interpreter_slack = TRUE;
599 597
598 acpi_gbl_permanent_mmap = 1;
599
600 status = acpi_reallocate_root_table();
601 if (ACPI_FAILURE(status)) {
602 printk(KERN_ERR PREFIX
603 "Unable to reallocate ACPI tables\n");
604 goto error0;
605 }
606
600 status = acpi_initialize_subsystem(); 607 status = acpi_initialize_subsystem();
601 if (ACPI_FAILURE(status)) { 608 if (ACPI_FAILURE(status)) {
602 printk(KERN_ERR PREFIX 609 printk(KERN_ERR PREFIX
@@ -611,32 +618,25 @@ void __init acpi_early_init(void)
611 goto error0; 618 goto error0;
612 } 619 }
613 620
614 /*
615 * Get a separate copy of the FADT for use by other drivers.
616 */
617 status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
618 if (ACPI_FAILURE(status)) {
619 printk(KERN_ERR PREFIX "Unable to get the FADT\n");
620 goto error0;
621 }
622#ifdef CONFIG_X86 621#ifdef CONFIG_X86
623 if (!acpi_ioapic) { 622 if (!acpi_ioapic) {
624 extern acpi_interrupt_flags acpi_sci_flags; 623 extern u8 acpi_sci_flags;
625 624
626 /* compatible (0) means level (3) */ 625 /* compatible (0) means level (3) */
627 if (acpi_sci_flags.trigger == 0) 626 if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
628 acpi_sci_flags.trigger = 3; 627 acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
629 628 acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
629 }
630 /* Set PIC-mode SCI trigger type */ 630 /* Set PIC-mode SCI trigger type */
631 acpi_pic_sci_set_trigger(acpi_fadt.sci_int, 631 acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
632 acpi_sci_flags.trigger); 632 (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
633 } else { 633 } else {
634 extern int acpi_sci_override_gsi; 634 extern int acpi_sci_override_gsi;
635 /* 635 /*
636 * now that acpi_fadt is initialized, 636 * now that acpi_gbl_FADT is initialized,
637 * update it with result from INT_SRC_OVR parsing 637 * update it with result from INT_SRC_OVR parsing
638 */ 638 */
639 acpi_fadt.sci_int = acpi_sci_override_gsi; 639 acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
640 } 640 }
641#endif 641#endif
642 642
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index ac860583c203..c726612fafb6 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -75,7 +75,7 @@ static int acpi_button_state_open_fs(struct inode *inode, struct file *file);
75static struct acpi_driver acpi_button_driver = { 75static struct acpi_driver acpi_button_driver = {
76 .name = ACPI_BUTTON_DRIVER_NAME, 76 .name = ACPI_BUTTON_DRIVER_NAME,
77 .class = ACPI_BUTTON_CLASS, 77 .class = ACPI_BUTTON_CLASS,
78 .ids = "ACPI_FPB,ACPI_FSB,PNP0C0D,PNP0C0C,PNP0C0E", 78 .ids = "button_power,button_sleep,PNP0C0D,PNP0C0C,PNP0C0E",
79 .ops = { 79 .ops = {
80 .add = acpi_button_add, 80 .add = acpi_button_add,
81 .remove = acpi_button_remove, 81 .remove = acpi_button_remove,
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0a1863ec91f3..69a68fd394cf 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -167,7 +167,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
167 if (ACPI_FAILURE(status) || !device) { 167 if (ACPI_FAILURE(status) || !device) {
168 result = container_device_add(&device, handle); 168 result = container_device_add(&device, handle);
169 if (!result) 169 if (!result)
170 kobject_uevent(&device->kobj, 170 kobject_uevent(&device->dev.kobj,
171 KOBJ_ONLINE); 171 KOBJ_ONLINE);
172 else 172 else
173 printk("Failed to add container\n"); 173 printk("Failed to add container\n");
@@ -175,13 +175,13 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
175 } else { 175 } else {
176 if (ACPI_SUCCESS(status)) { 176 if (ACPI_SUCCESS(status)) {
177 /* device exist and this is a remove request */ 177 /* device exist and this is a remove request */
178 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 178 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
179 } 179 }
180 } 180 }
181 break; 181 break;
182 case ACPI_NOTIFY_EJECT_REQUEST: 182 case ACPI_NOTIFY_EJECT_REQUEST:
183 if (!acpi_bus_get_device(handle, &device) && device) { 183 if (!acpi_bus_get_device(handle, &device) && device) {
184 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 184 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
185 } 185 }
186 break; 186 break;
187 default: 187 default:
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index 35c6af8a83cd..d48f65a8f658 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -13,14 +13,11 @@
13 13
14#define _COMPONENT ACPI_SYSTEM_COMPONENT 14#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("debug") 15ACPI_MODULE_NAME("debug")
16#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer" 16
17#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
18#ifdef MODULE_PARAM_PREFIX 17#ifdef MODULE_PARAM_PREFIX
19#undef MODULE_PARAM_PREFIX 18#undef MODULE_PARAM_PREFIX
20#endif 19#endif
21#define MODULE_PARAM_PREFIX 20#define MODULE_PARAM_PREFIX "acpi."
22 module_param(acpi_dbg_layer, uint, 0400);
23module_param(acpi_dbg_level, uint, 0400);
24 21
25struct acpi_dlayer { 22struct acpi_dlayer {
26 const char *name; 23 const char *name;
@@ -86,6 +83,60 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
86 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 83 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
87}; 84};
88 85
86/* --------------------------------------------------------------------------
87 FS Interface (/sys)
88 -------------------------------------------------------------------------- */
89static int param_get_debug_layer(char *buffer, struct kernel_param *kp) {
90 int result = 0;
91 int i;
92
93 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
94
95 for(i = 0; i <ARRAY_SIZE(acpi_debug_layers); i++) {
96 result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
97 acpi_debug_layers[i].name,
98 acpi_debug_layers[i].value,
99 (acpi_dbg_layer & acpi_debug_layers[i].value) ? '*' : ' ');
100 }
101 result += sprintf(buffer+result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
102 ACPI_ALL_DRIVERS,
103 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
104 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer &
105 ACPI_ALL_DRIVERS) == 0 ? ' ' : '-');
106 result += sprintf(buffer+result, "--\ndebug_layer = 0x%08X ( * = enabled)\n", acpi_dbg_layer);
107
108 return result;
109}
110
111static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
112 int result = 0;
113 int i;
114
115 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
116
117 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
118 result += sprintf(buffer+result, "%-25s\t0x%08lX [%c]\n",
119 acpi_debug_levels[i].name,
120 acpi_debug_levels[i].value,
121 (acpi_dbg_level & acpi_debug_levels[i].
122 value) ? '*' : ' ');
123 }
124 result += sprintf(buffer+result, "--\ndebug_level = 0x%08X (* = enabled)\n",
125 acpi_dbg_level);
126
127 return result;
128}
129
130module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
131module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
132
133/* --------------------------------------------------------------------------
134 FS Interface (/proc)
135 -------------------------------------------------------------------------- */
136#ifdef CONFIG_ACPI_PROCFS
137#define ACPI_SYSTEM_FILE_DEBUG_LAYER "debug_layer"
138#define ACPI_SYSTEM_FILE_DEBUG_LEVEL "debug_level"
139
89static int 140static int
90acpi_system_read_debug(char *page, 141acpi_system_read_debug(char *page,
91 char **start, off_t off, int count, int *eof, void *data) 142 char **start, off_t off, int count, int *eof, void *data)
@@ -221,3 +272,4 @@ static int __init acpi_debug_init(void)
221} 272}
222 273
223subsys_initcall(acpi_debug_init); 274subsys_initcall(acpi_debug_init);
275#endif
diff --git a/drivers/acpi/dispatcher/dsfield.c b/drivers/acpi/dispatcher/dsfield.c
index a6d77efb41a0..f049639bac35 100644
--- a/drivers/acpi/dispatcher/dsfield.c
+++ b/drivers/acpi/dispatcher/dsfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
133 } 133 }
134 } 134 }
135 135
136 /* We could put the returned object (Node) on the object stack for later, 136 /*
137 * We could put the returned object (Node) on the object stack for later,
137 * but for now, we will put it in the "op" object that the parser uses, 138 * but for now, we will put it in the "op" object that the parser uses,
138 * so we can get it again at the end of this scope 139 * so we can get it again at the end of this scope
139 */ 140 */
@@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
514 515
515 /* Third arg is the bank_value */ 516 /* Third arg is the bank_value */
516 517
518 /* TBD: This arg is a term_arg, not a constant, and must be evaluated */
519
517 arg = arg->common.next; 520 arg = arg->common.next;
518 info.bank_value = (u32) arg->common.value.integer; 521
522 /* Currently, only the following constants are supported */
523
524 switch (arg->common.aml_opcode) {
525 case AML_ZERO_OP:
526 info.bank_value = 0;
527 break;
528
529 case AML_ONE_OP:
530 info.bank_value = 1;
531 break;
532
533 case AML_BYTE_OP:
534 case AML_WORD_OP:
535 case AML_DWORD_OP:
536 case AML_QWORD_OP:
537 info.bank_value = (u32) arg->common.value.integer;
538 break;
539
540 default:
541 info.bank_value = 0;
542 ACPI_ERROR((AE_INFO,
543 "Non-constant BankValue for BankField is not implemented"));
544 }
519 545
520 /* Fourth arg is the field flags */ 546 /* Fourth arg is the field flags */
521 547
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c
index 1888c055d10f..af923c388520 100644
--- a/drivers/acpi/dispatcher/dsinit.c
+++ b/drivers/acpi/dispatcher/dsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdispat.h> 45#include <acpi/acdispat.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_DISPATCHER 49#define _COMPONENT ACPI_DISPATCHER
49ACPI_MODULE_NAME("dsinit") 50ACPI_MODULE_NAME("dsinit")
@@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
90 * We are only interested in NS nodes owned by the table that 91 * We are only interested in NS nodes owned by the table that
91 * was just loaded 92 * was just loaded
92 */ 93 */
93 if (node->owner_id != info->table_desc->owner_id) { 94 if (node->owner_id != info->owner_id) {
94 return (AE_OK); 95 return (AE_OK);
95 } 96 }
96 97
@@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
150 ******************************************************************************/ 151 ******************************************************************************/
151 152
152acpi_status 153acpi_status
153acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, 154acpi_ds_initialize_objects(acpi_native_uint table_index,
154 struct acpi_namespace_node * start_node) 155 struct acpi_namespace_node * start_node)
155{ 156{
156 acpi_status status; 157 acpi_status status;
157 struct acpi_init_walk_info info; 158 struct acpi_init_walk_info info;
159 struct acpi_table_header *table;
160 acpi_owner_id owner_id;
158 161
159 ACPI_FUNCTION_TRACE(ds_initialize_objects); 162 ACPI_FUNCTION_TRACE(ds_initialize_objects);
160 163
164 status = acpi_tb_get_owner_id(table_index, &owner_id);
165 if (ACPI_FAILURE(status)) {
166 return_ACPI_STATUS(status);
167 }
168
161 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 169 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
162 "**** Starting initialization of namespace objects ****\n")); 170 "**** Starting initialization of namespace objects ****\n"));
163 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:")); 171 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
@@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
166 info.op_region_count = 0; 174 info.op_region_count = 0;
167 info.object_count = 0; 175 info.object_count = 0;
168 info.device_count = 0; 176 info.device_count = 0;
169 info.table_desc = table_desc; 177 info.table_index = table_index;
178 info.owner_id = owner_id;
170 179
171 /* Walk entire namespace from the supplied root */ 180 /* Walk entire namespace from the supplied root */
172 181
@@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
176 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); 185 ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
177 } 186 }
178 187
188 status = acpi_get_table_by_index(table_index, &table);
189 if (ACPI_FAILURE(status)) {
190 return_ACPI_STATUS(status);
191 }
192
179 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, 193 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
180 "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n", 194 "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
181 table_desc->pointer->signature, 195 table->signature, owner_id, info.object_count,
182 table_desc->owner_id, info.object_count,
183 info.device_count, info.method_count, 196 info.device_count, info.method_count,
184 info.op_region_count)); 197 info.op_region_count));
185 198
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index cf888add3191..1cbe61905824 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); 327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
328 328
329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
330 "Execute method %p, currentstate=%p\n", 330 "Calling method %p, currentstate=%p\n",
331 this_walk_state->prev_op, this_walk_state)); 331 this_walk_state->prev_op, this_walk_state));
332 332
333 /* 333 /*
@@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
351 return_ACPI_STATUS(status); 351 return_ACPI_STATUS(status);
352 } 352 }
353 353
354 /* 354 /* Begin method parse/execution. Create a new walk state */
355 * 1) Parse the method. All "normal" methods are parsed for each execution.
356 * Internal methods (_OSI, etc.) do not require parsing.
357 */
358 if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
359
360 /* Create a new walk state for the parse */
361
362 next_walk_state =
363 acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
364 obj_desc, NULL);
365 if (!next_walk_state) {
366 status = AE_NO_MEMORY;
367 goto cleanup;
368 }
369
370 /* Create and init a parse tree root */
371
372 op = acpi_ps_create_scope_op();
373 if (!op) {
374 status = AE_NO_MEMORY;
375 goto cleanup;
376 }
377
378 status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
379 obj_desc->method.aml_start,
380 obj_desc->method.aml_length,
381 NULL, 1);
382 if (ACPI_FAILURE(status)) {
383 acpi_ps_delete_parse_tree(op);
384 goto cleanup;
385 }
386
387 /* Begin AML parse (deletes next_walk_state) */
388
389 status = acpi_ps_parse_aml(next_walk_state);
390 acpi_ps_delete_parse_tree(op);
391 if (ACPI_FAILURE(status)) {
392 goto cleanup;
393 }
394 }
395
396 /* 2) Begin method execution. Create a new walk state */
397 355
398 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, 356 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
399 NULL, obj_desc, thread); 357 NULL, obj_desc, thread);
@@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
424 382
425 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, 383 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
426 obj_desc->method.aml_start, 384 obj_desc->method.aml_start,
427 obj_desc->method.aml_length, info, 3); 385 obj_desc->method.aml_length, info,
386 ACPI_IMODE_EXECUTE);
428 387
429 ACPI_FREE(info); 388 ACPI_FREE(info);
430 if (ACPI_FAILURE(status)) { 389 if (ACPI_FAILURE(status)) {
@@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
445 this_walk_state->num_operands = 0; 404 this_walk_state->num_operands = 0;
446 405
447 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 406 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
448 "Starting nested execution, newstate=%p\n", 407 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
449 next_walk_state)); 408 method_node->name.ascii, next_walk_state));
450 409
451 /* Invoke an internal method if necessary */ 410 /* Invoke an internal method if necessary */
452 411
diff --git a/drivers/acpi/dispatcher/dsmthdat.c b/drivers/acpi/dispatcher/dsmthdat.c
index 459160ff9058..ba4626e06a5e 100644
--- a/drivers/acpi/dispatcher/dsmthdat.c
+++ b/drivers/acpi/dispatcher/dsmthdat.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index 72190abb1d59..a474ca2334d5 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
260 } 260 }
261 261
262 obj_desc->buffer.flags |= AOPOBJ_DATA_VALID; 262 obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
263 op->common.node = (struct acpi_namespace_node *)obj_desc; 263 op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
264 return_ACPI_STATUS(AE_OK); 264 return_ACPI_STATUS(AE_OK);
265} 265}
266 266
@@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
270 * 270 *
271 * PARAMETERS: walk_state - Current walk state 271 * PARAMETERS: walk_state - Current walk state
272 * Op - Parser object to be translated 272 * Op - Parser object to be translated
273 * package_length - Number of elements in the package 273 * element_count - Number of elements in the package - this is
274 * the num_elements argument to Package()
274 * obj_desc_ptr - Where the ACPI internal object is returned 275 * obj_desc_ptr - Where the ACPI internal object is returned
275 * 276 *
276 * RETURN: Status 277 * RETURN: Status
@@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
278 * DESCRIPTION: Translate a parser Op package object to the equivalent 279 * DESCRIPTION: Translate a parser Op package object to the equivalent
279 * namespace object 280 * namespace object
280 * 281 *
282 * NOTE: The number of elements in the package will be always be the num_elements
283 * count, regardless of the number of elements in the package list. If
284 * num_elements is smaller, only that many package list elements are used.
285 * if num_elements is larger, the Package object is padded out with
286 * objects of type Uninitialized (as per ACPI spec.)
287 *
288 * Even though the ASL compilers do not allow num_elements to be smaller
289 * than the Package list length (for the fixed length package opcode), some
290 * BIOS code modifies the AML on the fly to adjust the num_elements, and
291 * this code compensates for that. This also provides compatibility with
292 * other AML interpreters.
293 *
281 ******************************************************************************/ 294 ******************************************************************************/
282 295
283acpi_status 296acpi_status
284acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, 297acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
285 union acpi_parse_object *op, 298 union acpi_parse_object *op,
286 u32 package_length, 299 u32 element_count,
287 union acpi_operand_object **obj_desc_ptr) 300 union acpi_operand_object **obj_desc_ptr)
288{ 301{
289 union acpi_parse_object *arg; 302 union acpi_parse_object *arg;
290 union acpi_parse_object *parent; 303 union acpi_parse_object *parent;
291 union acpi_operand_object *obj_desc = NULL; 304 union acpi_operand_object *obj_desc = NULL;
292 u32 package_list_length;
293 acpi_status status = AE_OK; 305 acpi_status status = AE_OK;
294 acpi_native_uint i; 306 acpi_native_uint i;
295 307
@@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
318 obj_desc->package.node = parent->common.node; 330 obj_desc->package.node = parent->common.node;
319 } 331 }
320 332
321 obj_desc->package.count = package_length;
322
323 /* Count the number of items in the package list */
324
325 arg = op->common.value.arg;
326 arg = arg->common.next;
327 for (package_list_length = 0; arg; package_list_length++) {
328 arg = arg->common.next;
329 }
330
331 /*
332 * The package length (number of elements) will be the greater
333 * of the specified length and the length of the initializer list
334 */
335 if (package_list_length > package_length) {
336 obj_desc->package.count = package_list_length;
337 }
338
339 /* 333 /*
340 * Allocate the pointer array (array of pointers to the 334 * Allocate the element array (array of pointers to the individual
341 * individual objects). Add an extra pointer slot so 335 * objects) based on the num_elements parameter. Add an extra pointer slot
342 * that the list is always null terminated. 336 * so that the list is always null terminated.
343 */ 337 */
344 obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) 338 obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
345 obj_desc->package. 339 element_count +
346 count +
347 1) * sizeof(void *)); 340 1) * sizeof(void *));
348 341
349 if (!obj_desc->package.elements) { 342 if (!obj_desc->package.elements) {
@@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
351 return_ACPI_STATUS(AE_NO_MEMORY); 344 return_ACPI_STATUS(AE_NO_MEMORY);
352 } 345 }
353 346
347 obj_desc->package.count = element_count;
348
354 /* 349 /*
355 * Initialize all elements of the package 350 * Initialize the elements of the package, up to the num_elements count.
351 * Package is automatically padded with uninitialized (NULL) elements
352 * if num_elements is greater than the package list length. Likewise,
353 * Package is truncated if num_elements is less than the list length.
356 */ 354 */
357 arg = op->common.value.arg; 355 arg = op->common.value.arg;
358 arg = arg->common.next; 356 arg = arg->common.next;
359 for (i = 0; arg; i++) { 357 for (i = 0; arg && (i < element_count); i++) {
360 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { 358 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
361 359
362 /* Object (package or buffer) is already built */ 360 /* This package element is already built, just get it */
363 361
364 obj_desc->package.elements[i] = 362 obj_desc->package.elements[i] =
365 ACPI_CAST_PTR(union acpi_operand_object, 363 ACPI_CAST_PTR(union acpi_operand_object,
@@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
373 arg = arg->common.next; 371 arg = arg->common.next;
374 } 372 }
375 373
374 if (!arg) {
375 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
376 "Package List length larger than NumElements count (%X), truncated\n",
377 element_count));
378 }
379
376 obj_desc->package.flags |= AOPOBJ_DATA_VALID; 380 obj_desc->package.flags |= AOPOBJ_DATA_VALID;
377 op->common.node = (struct acpi_namespace_node *)obj_desc; 381 op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
378 return_ACPI_STATUS(status); 382 return_ACPI_STATUS(status);
379} 383}
380 384
@@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
488 /* 492 /*
489 * Defer evaluation of Buffer term_arg operand 493 * Defer evaluation of Buffer term_arg operand
490 */ 494 */
491 obj_desc->buffer.node = (struct acpi_namespace_node *) 495 obj_desc->buffer.node =
492 walk_state->operands[0]; 496 ACPI_CAST_PTR(struct acpi_namespace_node,
497 walk_state->operands[0]);
493 obj_desc->buffer.aml_start = op->named.data; 498 obj_desc->buffer.aml_start = op->named.data;
494 obj_desc->buffer.aml_length = op->named.length; 499 obj_desc->buffer.aml_length = op->named.length;
495 break; 500 break;
@@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
499 /* 504 /*
500 * Defer evaluation of Package term_arg operand 505 * Defer evaluation of Package term_arg operand
501 */ 506 */
502 obj_desc->package.node = (struct acpi_namespace_node *) 507 obj_desc->package.node =
503 walk_state->operands[0]; 508 ACPI_CAST_PTR(struct acpi_namespace_node,
509 walk_state->operands[0]);
504 obj_desc->package.aml_start = op->named.data; 510 obj_desc->package.aml_start = op->named.data;
505 obj_desc->package.aml_length = op->named.length; 511 obj_desc->package.aml_length = op->named.length;
506 break; 512 break;
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 5b974a8fe614..6c6104a7a247 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
114 } 114 }
115 115
116 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, 116 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
117 aml_length, NULL, 1); 117 aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
118 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
119 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
120 goto cleanup; 120 goto cleanup;
@@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
157 /* Execute the opcode and arguments */ 157 /* Execute the opcode and arguments */
158 158
159 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, 159 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
160 aml_length, NULL, 3); 160 aml_length, NULL, ACPI_IMODE_EXECUTE);
161 if (ACPI_FAILURE(status)) { 161 if (ACPI_FAILURE(status)) {
162 acpi_ds_delete_walk_state(walk_state); 162 acpi_ds_delete_walk_state(walk_state);
163 goto cleanup; 163 goto cleanup;
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index 05230baf5de8..e4073e05a75c 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c
index d7a616c3104e..69693fa07224 100644
--- a/drivers/acpi/dispatcher/dswexec.c
+++ b/drivers/acpi/dispatcher/dswexec.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
219 if (!op) { 219 if (!op) {
220 status = acpi_ds_load2_begin_op(walk_state, out_op); 220 status = acpi_ds_load2_begin_op(walk_state, out_op);
221 if (ACPI_FAILURE(status)) { 221 if (ACPI_FAILURE(status)) {
222 return_ACPI_STATUS(status); 222 goto error_exit;
223 } 223 }
224 224
225 op = *out_op; 225 op = *out_op;
@@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
238 238
239 status = acpi_ds_scope_stack_pop(walk_state); 239 status = acpi_ds_scope_stack_pop(walk_state);
240 if (ACPI_FAILURE(status)) { 240 if (ACPI_FAILURE(status)) {
241 return_ACPI_STATUS(status); 241 goto error_exit;
242 } 242 }
243 } 243 }
244 } 244 }
@@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
287 287
288 status = acpi_ds_result_stack_push(walk_state); 288 status = acpi_ds_result_stack_push(walk_state);
289 if (ACPI_FAILURE(status)) { 289 if (ACPI_FAILURE(status)) {
290 return_ACPI_STATUS(status); 290 goto error_exit;
291 } 291 }
292 292
293 status = acpi_ds_exec_begin_control_op(walk_state, op); 293 status = acpi_ds_exec_begin_control_op(walk_state, op);
@@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
328 /* Nothing to do here during method execution */ 328 /* Nothing to do here during method execution */
329 329
330 return_ACPI_STATUS(status); 330 return_ACPI_STATUS(status);
331
332 error_exit:
333 status = acpi_ds_method_error(status, walk_state);
334 return_ACPI_STATUS(status);
331} 335}
332 336
333/***************************************************************************** 337/*****************************************************************************
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index e3ca7f6539c1..8ab9d1b29a4c 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
196 * one of the opcodes that actually opens a scope 196 * one of the opcodes that actually opens a scope
197 */ 197 */
198 switch (node->type) { 198 switch (node->type) {
199 case ACPI_TYPE_ANY:
199 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ 200 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
200 case ACPI_TYPE_DEVICE: 201 case ACPI_TYPE_DEVICE:
201 case ACPI_TYPE_POWER: 202 case ACPI_TYPE_POWER:
@@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
546 acpi_status status; 547 acpi_status status;
547 acpi_object_type object_type; 548 acpi_object_type object_type;
548 char *buffer_ptr; 549 char *buffer_ptr;
550 u32 flags;
549 551
550 ACPI_FUNCTION_TRACE(ds_load2_begin_op); 552 ACPI_FUNCTION_TRACE(ds_load2_begin_op);
551 553
@@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
669 * one of the opcodes that actually opens a scope 671 * one of the opcodes that actually opens a scope
670 */ 672 */
671 switch (node->type) { 673 switch (node->type) {
674 case ACPI_TYPE_ANY:
672 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ 675 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
673 case ACPI_TYPE_DEVICE: 676 case ACPI_TYPE_DEVICE:
674 case ACPI_TYPE_POWER: 677 case ACPI_TYPE_POWER:
@@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
750 break; 753 break;
751 } 754 }
752 755
753 /* Add new entry into namespace */ 756 flags = ACPI_NS_NO_UPSEARCH;
757 if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
758
759 /* Execution mode, node cannot already exist, node is temporary */
760
761 flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
762 }
763
764 /* Add new entry or lookup existing entry */
754 765
755 status = 766 status =
756 acpi_ns_lookup(walk_state->scope_info, buffer_ptr, 767 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
757 object_type, ACPI_IMODE_LOAD_PASS2, 768 object_type, ACPI_IMODE_LOAD_PASS2, flags,
758 ACPI_NS_NO_UPSEARCH, walk_state, &(node)); 769 walk_state, &node);
759 break; 770 break;
760 } 771 }
761 772
diff --git a/drivers/acpi/dispatcher/dswscope.c b/drivers/acpi/dispatcher/dswscope.c
index c9228972f5f6..3927c495e4bf 100644
--- a/drivers/acpi/dispatcher/dswscope.c
+++ b/drivers/acpi/dispatcher/dswscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 7817e5522679..16c8e38b51ef 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 90990a4b6526..688e83a16906 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -615,20 +615,28 @@ static acpi_status
615find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv) 615find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
616{ 616{
617 acpi_status status; 617 acpi_status status;
618 acpi_handle tmp; 618 acpi_handle tmp, parent;
619 struct dock_station *ds = context; 619 struct dock_station *ds = context;
620 struct dock_dependent_device *dd; 620 struct dock_dependent_device *dd;
621 621
622 status = acpi_bus_get_ejd(handle, &tmp); 622 status = acpi_bus_get_ejd(handle, &tmp);
623 if (ACPI_FAILURE(status)) 623 if (ACPI_FAILURE(status)) {
624 return AE_OK; 624 /* try the parent device as well */
625 status = acpi_get_parent(handle, &parent);
626 if (ACPI_FAILURE(status))
627 goto fdd_out;
628 /* see if parent is dependent on dock */
629 status = acpi_bus_get_ejd(parent, &tmp);
630 if (ACPI_FAILURE(status))
631 goto fdd_out;
632 }
625 633
626 if (tmp == ds->handle) { 634 if (tmp == ds->handle) {
627 dd = alloc_dock_dependent_device(handle); 635 dd = alloc_dock_dependent_device(handle);
628 if (dd) 636 if (dd)
629 add_dock_dependent_device(ds, dd); 637 add_dock_dependent_device(ds, dd);
630 } 638 }
631 639fdd_out:
632 return AE_OK; 640 return AE_OK;
633} 641}
634 642
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index cbdf031f3c09..743ce27fa0bb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void)
872 acpi_status status; 872 acpi_status status;
873 struct acpi_table_ecdt *ecdt_ptr; 873 struct acpi_table_ecdt *ecdt_ptr;
874 874
875 status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, 875 status = acpi_get_table(ACPI_SIG_ECDT, 1,
876 (struct acpi_table_header **) 876 (struct acpi_table_header **)&ecdt_ptr);
877 &ecdt_ptr);
878 if (ACPI_FAILURE(status)) 877 if (ACPI_FAILURE(status))
879 return -ENODEV; 878 return -ENODEV;
880 879
@@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void)
891 if (acpi_ec_mode == EC_INTR) { 890 if (acpi_ec_mode == EC_INTR) {
892 init_waitqueue_head(&ec_ecdt->wait); 891 init_waitqueue_head(&ec_ecdt->wait);
893 } 892 }
894 ec_ecdt->command_addr = ecdt_ptr->ec_control.address; 893 ec_ecdt->command_addr = ecdt_ptr->control.address;
895 ec_ecdt->data_addr = ecdt_ptr->ec_data.address; 894 ec_ecdt->data_addr = ecdt_ptr->data.address;
896 ec_ecdt->gpe = ecdt_ptr->gpe_bit; 895 ec_ecdt->gpe = ecdt_ptr->gpe;
897 /* use the GL just to be safe */ 896 /* use the GL just to be safe */
898 ec_ecdt->global_lock = TRUE; 897 ec_ecdt->global_lock = TRUE;
899 ec_ecdt->uid = ecdt_ptr->uid; 898 ec_ecdt->uid = ecdt_ptr->uid;
900 899
901 status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); 900 status = acpi_get_handle(NULL, ecdt_ptr->id, &ec_ecdt->handle);
902 if (ACPI_FAILURE(status)) { 901 if (ACPI_FAILURE(status)) {
903 goto error; 902 goto error;
904 } 903 }
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index 919037d6acff..a1f87b5def2a 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
70 70
71 ACPI_FUNCTION_TRACE(ev_initialize_events); 71 ACPI_FUNCTION_TRACE(ev_initialize_events);
72 72
73 /* Make sure we have ACPI tables */
74
75 if (!acpi_gbl_DSDT) {
76 ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
77 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
78 }
79
80 /* 73 /*
81 * Initialize the Fixed and General Purpose Events. This is done prior to 74 * Initialize the Fixed and General Purpose Events. This is done prior to
82 * enabling SCIs to prevent interrupts from occurring before the handlers are 75 * enabling SCIs to prevent interrupts from occurring before the handlers are
@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
211 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { 204 if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
212 status = 205 status =
213 acpi_set_register(acpi_gbl_fixed_event_info[i]. 206 acpi_set_register(acpi_gbl_fixed_event_info[i].
214 enable_register_id, 0, 207 enable_register_id, 0);
215 ACPI_MTX_LOCK);
216 if (ACPI_FAILURE(status)) { 208 if (ACPI_FAILURE(status)) {
217 return (status); 209 return (status);
218 } 210 }
@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
298 /* Clear the status bit */ 290 /* Clear the status bit */
299 291
300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 292 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
301 status_register_id, 1, ACPI_MTX_DO_NOT_LOCK); 293 status_register_id, 1);
302 294
303 /* 295 /*
304 * Make sure we've got a handler. If not, report an error. 296 * Make sure we've got a handler. If not, report an error.
@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
306 */ 298 */
307 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { 299 if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
308 (void)acpi_set_register(acpi_gbl_fixed_event_info[event]. 300 (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
309 enable_register_id, 0, 301 enable_register_id, 0);
310 ACPI_MTX_DO_NOT_LOCK);
311 302
312 ACPI_ERROR((AE_INFO, 303 ACPI_ERROR((AE_INFO,
313 "No installed handler for fixed event [%08X]", 304 "No installed handler for fixed event [%08X]",
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index c76c0583ca6a..dfac3ecc596e 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
121 if (!gpe_register_info) { 121 if (!gpe_register_info) {
122 return_ACPI_STATUS(AE_NOT_EXIST); 122 return_ACPI_STATUS(AE_NOT_EXIST);
123 } 123 }
124 register_bit = gpe_event_info->register_bit; 124 register_bit = (u8)
125 (1 <<
126 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
125 127
126 /* 1) Disable case. Simply clear all enable bits */ 128 /* 1) Disable case. Simply clear all enable bits */
127 129
@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
458 460
459 /* Examine one GPE bit */ 461 /* Examine one GPE bit */
460 462
461 if (enabled_status_byte & 463 if (enabled_status_byte & (1 << j)) {
462 acpi_gbl_decode_to8bit[j]) {
463 /* 464 /*
464 * Found an active GPE. Dispatch the event to a handler 465 * Found an active GPE. Dispatch the event to a handler
465 * or method. 466 * or method.
@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
570 571
571 if (ACPI_FAILURE(status)) { 572 if (ACPI_FAILURE(status)) {
572 ACPI_EXCEPTION((AE_INFO, status, 573 ACPI_EXCEPTION((AE_INFO, status,
573 "While evaluating GPE method [%4.4s]", 574 "while evaluating GPE method [%4.4s]",
574 acpi_ut_get_node_name 575 acpi_ut_get_node_name
575 (local_gpe_event_info.dispatch. 576 (local_gpe_event_info.dispatch.
576 method_node))); 577 method_node)));
@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
618 619
619 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 620 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
620 621
622 acpi_gpe_count++;
623
621 /* 624 /*
622 * If edge-triggered, clear the GPE status bit now. Note that 625 * If edge-triggered, clear the GPE status bit now. Note that
623 * level-triggered events are cleared after the GPE is serviced. 626 * level-triggered events are cleared after the GPE is serviced.
@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
633 } 636 }
634 } 637 }
635 638
636 /* Save current system state */ 639 if (!acpi_gbl_system_awake_and_running) {
637 640 /*
638 if (acpi_gbl_system_awake_and_running) { 641 * We just woke up because of a wake GPE. Disable any further GPEs
639 ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 642 * until we are fully up and running (Only wake GPEs should be enabled
640 } else { 643 * at this time, but we just brute-force disable them all.)
641 ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); 644 * 1) We must disable this particular wake GPE so it won't fire again
645 * 2) We want to disable all wake GPEs, since we are now awake
646 */
647 (void)acpi_hw_disable_all_gpes();
642 } 648 }
643 649
644 /* 650 /*
645 * Dispatch the GPE to either an installed handler, or the control 651 * Dispatch the GPE to either an installed handler, or the control method
646 * method associated with this GPE (_Lxx or _Exx). 652 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
647 * If a handler exists, we invoke it and do not attempt to run the method. 653 * it and do not attempt to run the method. If there is neither a handler
648 * If there is neither a handler nor a method, we disable the level to 654 * nor a method, we disable this GPE to prevent further such pointless
649 * prevent further events from coming in here. 655 * events from firing.
650 */ 656 */
651 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 657 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
652 case ACPI_GPE_DISPATCH_HANDLER: 658 case ACPI_GPE_DISPATCH_HANDLER:
@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
677 case ACPI_GPE_DISPATCH_METHOD: 683 case ACPI_GPE_DISPATCH_METHOD:
678 684
679 /* 685 /*
680 * Disable GPE, so it doesn't keep firing before the method has a 686 * Disable the GPE, so it doesn't keep firing before the method has a
681 * chance to run. 687 * chance to run (it runs asynchronously with interrupts enabled).
682 */ 688 */
683 status = acpi_ev_disable_gpe(gpe_event_info); 689 status = acpi_ev_disable_gpe(gpe_event_info);
684 if (ACPI_FAILURE(status)) { 690 if (ACPI_FAILURE(status)) {
@@ -711,7 +717,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
711 gpe_number)); 717 gpe_number));
712 718
713 /* 719 /*
714 * Disable the GPE. The GPE will remain disabled until the ACPI 720 * Disable the GPE. The GPE will remain disabled until the ACPI
715 * Core Subsystem is restarted, or a handler is installed. 721 * Core Subsystem is restarted, or a handler is installed.
716 */ 722 */
717 status = acpi_ev_disable_gpe(gpe_event_info); 723 status = acpi_ev_disable_gpe(gpe_event_info);
@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
726 732
727 return_UINT32(ACPI_INTERRUPT_HANDLED); 733 return_UINT32(ACPI_INTERRUPT_HANDLED);
728} 734}
729
730#ifdef ACPI_GPE_NOTIFY_CHECK
731/*******************************************************************************
732 * TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
733 *
734 * FUNCTION: acpi_ev_check_for_wake_only_gpe
735 *
736 * PARAMETERS: gpe_event_info - info for this GPE
737 *
738 * RETURN: Status
739 *
740 * DESCRIPTION: Determine if a a GPE is "wake-only".
741 *
742 * Called from Notify() code in interpreter when a "DeviceWake"
743 * Notify comes in.
744 *
745 ******************************************************************************/
746
747acpi_status
748acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
749{
750 acpi_status status;
751
752 ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
753
754 if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
755 ((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
756 /* This must be a wake-only GPE, disable it */
757
758 status = acpi_ev_disable_gpe(gpe_event_info);
759
760 /* Set GPE to wake-only. Do not change wake disabled/enabled status */
761
762 acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
763
764 ACPI_INFO((AE_INFO,
765 "GPE %p was updated from wake/run to wake-only",
766 gpe_event_info));
767
768 /* This was a wake-only GPE */
769
770 return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
771 }
772
773 return_ACPI_STATUS(AE_OK);
774}
775#endif
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index 95ddeb48bc0f..ad5bc76edf46 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
529 529
530 /* Install new interrupt handler if not SCI_INT */ 530 /* Install new interrupt handler if not SCI_INT */
531 531
532 if (interrupt_number != acpi_gbl_FADT->sci_int) { 532 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
533 status = acpi_os_install_interrupt_handler(interrupt_number, 533 status = acpi_os_install_interrupt_handler(interrupt_number,
534 acpi_ev_gpe_xrupt_handler, 534 acpi_ev_gpe_xrupt_handler,
535 gpe_xrupt); 535 gpe_xrupt);
@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
567 567
568 /* We never want to remove the SCI interrupt handler */ 568 /* We never want to remove the SCI interrupt handler */
569 569
570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) { 570 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
571 gpe_xrupt->gpe_block_list_head = NULL; 571 gpe_xrupt->gpe_block_list_head = NULL;
572 return_ACPI_STATUS(AE_OK); 572 return_ACPI_STATUS(AE_OK);
573 } 573 }
@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
796 (u8) (gpe_block->block_base_number + 796 (u8) (gpe_block->block_base_number +
797 (i * ACPI_GPE_REGISTER_WIDTH)); 797 (i * ACPI_GPE_REGISTER_WIDTH));
798 798
799 ACPI_STORE_ADDRESS(this_register->status_address.address, 799 this_register->status_address.address =
800 (gpe_block->block_address.address + i)); 800 gpe_block->block_address.address + i;
801 801
802 ACPI_STORE_ADDRESS(this_register->enable_address.address, 802 this_register->enable_address.address =
803 (gpe_block->block_address.address 803 gpe_block->block_address.address + i +
804 + i + gpe_block->register_count)); 804 gpe_block->register_count;
805 805
806 this_register->status_address.address_space_id = 806 this_register->status_address.space_id =
807 gpe_block->block_address.address_space_id; 807 gpe_block->block_address.space_id;
808 this_register->enable_address.address_space_id = 808 this_register->enable_address.space_id =
809 gpe_block->block_address.address_space_id; 809 gpe_block->block_address.space_id;
810 this_register->status_address.register_bit_width = 810 this_register->status_address.bit_width =
811 ACPI_GPE_REGISTER_WIDTH; 811 ACPI_GPE_REGISTER_WIDTH;
812 this_register->enable_address.register_bit_width = 812 this_register->enable_address.bit_width =
813 ACPI_GPE_REGISTER_WIDTH; 813 ACPI_GPE_REGISTER_WIDTH;
814 this_register->status_address.register_bit_offset = 814 this_register->status_address.bit_offset =
815 ACPI_GPE_REGISTER_WIDTH; 815 ACPI_GPE_REGISTER_WIDTH;
816 this_register->enable_address.register_bit_offset = 816 this_register->enable_address.bit_offset =
817 ACPI_GPE_REGISTER_WIDTH; 817 ACPI_GPE_REGISTER_WIDTH;
818 818
819 /* Init the event_info for each GPE within this register */ 819 /* Init the event_info for each GPE within this register */
820 820
821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 821 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
822 this_event->register_bit = acpi_gbl_decode_to8bit[j]; 822 this_event->gpe_number =
823 (u8) (this_register->base_gpe_number + j);
823 this_event->register_info = this_register; 824 this_event->register_info = this_register;
824 this_event++; 825 this_event++;
825 } 826 }
@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
1109 * If EITHER the register length OR the block address are zero, then that 1110 * If EITHER the register length OR the block address are zero, then that
1110 * particular block is not supported. 1111 * particular block is not supported.
1111 */ 1112 */
1112 if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) { 1113 if (acpi_gbl_FADT.gpe0_block_length &&
1114 acpi_gbl_FADT.xgpe0_block.address) {
1113 1115
1114 /* GPE block 0 exists (has both length and address > 0) */ 1116 /* GPE block 0 exists (has both length and address > 0) */
1115 1117
1116 register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); 1118 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1117 1119
1118 gpe_number_max = 1120 gpe_number_max =
1119 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; 1121 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
1121 /* Install GPE Block 0 */ 1123 /* Install GPE Block 0 */
1122 1124
1123 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1125 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1124 &acpi_gbl_FADT->xgpe0_blk, 1126 &acpi_gbl_FADT.xgpe0_block,
1125 register_count0, 0, 1127 register_count0, 0,
1126 acpi_gbl_FADT->sci_int, 1128 acpi_gbl_FADT.sci_interrupt,
1127 &acpi_gbl_gpe_fadt_blocks[0]); 1129 &acpi_gbl_gpe_fadt_blocks[0]);
1128 1130
1129 if (ACPI_FAILURE(status)) { 1131 if (ACPI_FAILURE(status)) {
@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
1132 } 1134 }
1133 } 1135 }
1134 1136
1135 if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) { 1137 if (acpi_gbl_FADT.gpe1_block_length &&
1138 acpi_gbl_FADT.xgpe1_block.address) {
1136 1139
1137 /* GPE block 1 exists (has both length and address > 0) */ 1140 /* GPE block 1 exists (has both length and address > 0) */
1138 1141
1139 register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); 1142 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1140 1143
1141 /* Check for GPE0/GPE1 overlap (if both banks exist) */ 1144 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1142 1145
1143 if ((register_count0) && 1146 if ((register_count0) &&
1144 (gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { 1147 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1145 ACPI_ERROR((AE_INFO, 1148 ACPI_ERROR((AE_INFO,
1146 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", 1149 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
1147 gpe_number_max, acpi_gbl_FADT->gpe1_base, 1150 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1148 acpi_gbl_FADT->gpe1_base + 1151 acpi_gbl_FADT.gpe1_base +
1149 ((register_count1 * 1152 ((register_count1 *
1150 ACPI_GPE_REGISTER_WIDTH) - 1))); 1153 ACPI_GPE_REGISTER_WIDTH) - 1)));
1151 1154
@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
1157 1160
1158 status = 1161 status =
1159 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, 1162 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1160 &acpi_gbl_FADT->xgpe1_blk, 1163 &acpi_gbl_FADT.xgpe1_block,
1161 register_count1, 1164 register_count1,
1162 acpi_gbl_FADT->gpe1_base, 1165 acpi_gbl_FADT.gpe1_base,
1163 acpi_gbl_FADT->sci_int, 1166 acpi_gbl_FADT.
1167 sci_interrupt,
1164 &acpi_gbl_gpe_fadt_blocks 1168 &acpi_gbl_gpe_fadt_blocks
1165 [1]); 1169 [1]);
1166 1170
@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
1173 * GPE0 and GPE1 do not have to be contiguous in the GPE number 1177 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1174 * space. However, GPE0 always starts at GPE number zero. 1178 * space. However, GPE0 always starts at GPE number zero.
1175 */ 1179 */
1176 gpe_number_max = acpi_gbl_FADT->gpe1_base + 1180 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1177 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); 1181 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1178 } 1182 }
1179 } 1183 }
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index bf63edc6608d..1b784ffe54c3 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,18 @@ static const char *acpi_notify_value_names[] = {
63}; 63};
64#endif 64#endif
65 65
66/* Pointer to FACS needed for the Global Lock */
67
68static struct acpi_table_facs *facs = NULL;
69
66/* Local prototypes */ 70/* Local prototypes */
67 71
68static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 72static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
69 73
70static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context);
71
72static u32 acpi_ev_global_lock_handler(void *context); 74static u32 acpi_ev_global_lock_handler(void *context);
73 75
76static acpi_status acpi_ev_remove_global_lock_handler(void);
77
74/******************************************************************************* 78/*******************************************************************************
75 * 79 *
76 * FUNCTION: acpi_ev_is_notify_object 80 * FUNCTION: acpi_ev_is_notify_object
@@ -282,49 +286,19 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
282 286
283/******************************************************************************* 287/*******************************************************************************
284 * 288 *
285 * FUNCTION: acpi_ev_global_lock_thread
286 *
287 * PARAMETERS: Context - From thread interface, not used
288 *
289 * RETURN: None
290 *
291 * DESCRIPTION: Invoked by SCI interrupt handler upon acquisition of the
292 * Global Lock. Simply signal all threads that are waiting
293 * for the lock.
294 *
295 ******************************************************************************/
296
297static void ACPI_SYSTEM_XFACE acpi_ev_global_lock_thread(void *context)
298{
299 acpi_status status;
300
301 /* Signal threads that are waiting for the lock */
302
303 if (acpi_gbl_global_lock_thread_count) {
304
305 /* Send sufficient units to the semaphore */
306
307 status =
308 acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore,
309 acpi_gbl_global_lock_thread_count);
310 if (ACPI_FAILURE(status)) {
311 ACPI_ERROR((AE_INFO,
312 "Could not signal Global Lock semaphore"));
313 }
314 }
315}
316
317/*******************************************************************************
318 *
319 * FUNCTION: acpi_ev_global_lock_handler 289 * FUNCTION: acpi_ev_global_lock_handler
320 * 290 *
321 * PARAMETERS: Context - From thread interface, not used 291 * PARAMETERS: Context - From thread interface, not used
322 * 292 *
323 * RETURN: ACPI_INTERRUPT_HANDLED or ACPI_INTERRUPT_NOT_HANDLED 293 * RETURN: ACPI_INTERRUPT_HANDLED
324 * 294 *
325 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 295 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
326 * release interrupt occurs. Grab the global lock and queue 296 * release interrupt occurs. Attempt to acquire the global lock,
327 * the global lock thread for execution 297 * if successful, signal the thread waiting for the lock.
298 *
299 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
300 * this is not possible for some reason, a separate thread will have to be
301 * scheduled to do this.
328 * 302 *
329 ******************************************************************************/ 303 ******************************************************************************/
330 304
@@ -333,16 +307,24 @@ static u32 acpi_ev_global_lock_handler(void *context)
333 u8 acquired = FALSE; 307 u8 acquired = FALSE;
334 308
335 /* 309 /*
336 * Attempt to get the lock 310 * Attempt to get the lock.
311 *
337 * If we don't get it now, it will be marked pending and we will 312 * If we don't get it now, it will be marked pending and we will
338 * take another interrupt when it becomes free. 313 * take another interrupt when it becomes free.
339 */ 314 */
340 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 315 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
341 if (acquired) { 316 if (acquired) {
342 317
343 /* Got the lock, now wake all threads waiting for it */ 318 /* Got the lock, now wake all threads waiting for it */
319
344 acpi_gbl_global_lock_acquired = TRUE; 320 acpi_gbl_global_lock_acquired = TRUE;
345 acpi_ev_global_lock_thread(context); 321 /* Send a unit to the semaphore */
322
323 if (ACPI_FAILURE(acpi_os_signal_semaphore(
324 acpi_gbl_global_lock_semaphore, 1))) {
325 ACPI_ERROR((AE_INFO,
326 "Could not signal Global Lock semaphore"));
327 }
346 } 328 }
347 329
348 return (ACPI_INTERRUPT_HANDLED); 330 return (ACPI_INTERRUPT_HANDLED);
@@ -366,6 +348,13 @@ acpi_status acpi_ev_init_global_lock_handler(void)
366 348
367 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler); 349 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
368 350
351 status =
352 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
353 (struct acpi_table_header **)&facs);
354 if (ACPI_FAILURE(status)) {
355 return_ACPI_STATUS(status);
356 }
357
369 acpi_gbl_global_lock_present = TRUE; 358 acpi_gbl_global_lock_present = TRUE;
370 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL, 359 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
371 acpi_ev_global_lock_handler, 360 acpi_ev_global_lock_handler,
@@ -389,6 +378,31 @@ acpi_status acpi_ev_init_global_lock_handler(void)
389 return_ACPI_STATUS(status); 378 return_ACPI_STATUS(status);
390} 379}
391 380
381/*******************************************************************************
382 *
383 * FUNCTION: acpi_ev_remove_global_lock_handler
384 *
385 * PARAMETERS: None
386 *
387 * RETURN: Status
388 *
389 * DESCRIPTION: Remove the handler for the Global Lock
390 *
391 ******************************************************************************/
392
393static acpi_status acpi_ev_remove_global_lock_handler(void)
394{
395 acpi_status status;
396
397 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
398
399 acpi_gbl_global_lock_present = FALSE;
400 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
401 acpi_ev_global_lock_handler);
402
403 return_ACPI_STATUS(status);
404}
405
392/****************************************************************************** 406/******************************************************************************
393 * 407 *
394 * FUNCTION: acpi_ev_acquire_global_lock 408 * FUNCTION: acpi_ev_acquire_global_lock
@@ -399,6 +413,16 @@ acpi_status acpi_ev_init_global_lock_handler(void)
399 * 413 *
400 * DESCRIPTION: Attempt to gain ownership of the Global Lock. 414 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
401 * 415 *
416 * MUTEX: Interpreter must be locked
417 *
418 * Note: The original implementation allowed multiple threads to "acquire" the
419 * Global Lock, and the OS would hold the lock until the last thread had
420 * released it. However, this could potentially starve the BIOS out of the
421 * lock, especially in the case where there is a tight handshake between the
422 * Embedded Controller driver and the BIOS. Therefore, this implementation
423 * allows only one thread to acquire the HW Global Lock at a time, and makes
424 * the global lock appear as a standard mutex on the OS side.
425 *
402 *****************************************************************************/ 426 *****************************************************************************/
403 427
404acpi_status acpi_ev_acquire_global_lock(u16 timeout) 428acpi_status acpi_ev_acquire_global_lock(u16 timeout)
@@ -408,53 +432,51 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
408 432
409 ACPI_FUNCTION_TRACE(ev_acquire_global_lock); 433 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
410 434
411#ifndef ACPI_APPLICATION 435 /*
412 /* Make sure that we actually have a global lock */ 436 * Only one thread can acquire the GL at a time, the global_lock_mutex
413 437 * enforces this. This interface releases the interpreter if we must wait.
414 if (!acpi_gbl_global_lock_present) { 438 */
415 return_ACPI_STATUS(AE_NO_GLOBAL_LOCK); 439 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
440 if (ACPI_FAILURE(status)) {
441 return_ACPI_STATUS(status);
416 } 442 }
417#endif
418
419 /* One more thread wants the global lock */
420
421 acpi_gbl_global_lock_thread_count++;
422 443
423 /* 444 /*
424 * If we (OS side vs. BIOS side) have the hardware lock already, 445 * Make sure that a global lock actually exists. If not, just treat
425 * we are done 446 * the lock as a standard mutex.
426 */ 447 */
427 if (acpi_gbl_global_lock_acquired) { 448 if (!acpi_gbl_global_lock_present) {
449 acpi_gbl_global_lock_acquired = TRUE;
428 return_ACPI_STATUS(AE_OK); 450 return_ACPI_STATUS(AE_OK);
429 } 451 }
430 452
431 /* We must acquire the actual hardware lock */ 453 /* Attempt to acquire the actual hardware lock */
432 454
433 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, acquired); 455 ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
434 if (acquired) { 456 if (acquired) {
435 457
436 /* We got the lock */ 458 /* We got the lock */
437 459
438 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 460 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
439 "Acquired the HW Global Lock\n")); 461 "Acquired hardware Global Lock\n"));
440 462
441 acpi_gbl_global_lock_acquired = TRUE; 463 acpi_gbl_global_lock_acquired = TRUE;
442 return_ACPI_STATUS(AE_OK); 464 return_ACPI_STATUS(AE_OK);
443 } 465 }
444 466
445 /* 467 /*
446 * Did not get the lock. The pending bit was set above, and we must now 468 * Did not get the lock. The pending bit was set above, and we must now
447 * wait until we get the global lock released interrupt. 469 * wait until we get the global lock released interrupt.
448 */ 470 */
449 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for the HW Global Lock\n")); 471 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
450 472
451 /* 473 /*
452 * Acquire the global lock semaphore first. 474 * Wait for handshake with the global lock interrupt handler.
453 * Since this wait will block, we must release the interpreter 475 * This interface releases the interpreter if we must wait.
454 */ 476 */
455 status = 477 status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
456 acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 478 ACPI_WAIT_FOREVER);
457 timeout); 479
458 return_ACPI_STATUS(status); 480 return_ACPI_STATUS(status);
459} 481}
460 482
@@ -477,38 +499,39 @@ acpi_status acpi_ev_release_global_lock(void)
477 499
478 ACPI_FUNCTION_TRACE(ev_release_global_lock); 500 ACPI_FUNCTION_TRACE(ev_release_global_lock);
479 501
480 if (!acpi_gbl_global_lock_thread_count) { 502 /* Lock must be already acquired */
503
504 if (!acpi_gbl_global_lock_acquired) {
481 ACPI_WARNING((AE_INFO, 505 ACPI_WARNING((AE_INFO,
482 "Cannot release HW Global Lock, it has not been acquired")); 506 "Cannot release the ACPI Global Lock, it has not been acquired"));
483 return_ACPI_STATUS(AE_NOT_ACQUIRED); 507 return_ACPI_STATUS(AE_NOT_ACQUIRED);
484 } 508 }
485 509
486 /* One fewer thread has the global lock */ 510 if (acpi_gbl_global_lock_present) {
487 511
488 acpi_gbl_global_lock_thread_count--; 512 /* Allow any thread to release the lock */
489 if (acpi_gbl_global_lock_thread_count) {
490 513
491 /* There are still some threads holding the lock, cannot release */ 514 ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
492 515
493 return_ACPI_STATUS(AE_OK); 516 /*
517 * If the pending bit was set, we must write GBL_RLS to the control
518 * register
519 */
520 if (pending) {
521 status =
522 acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
523 1);
524 }
525
526 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
527 "Released hardware Global Lock\n"));
494 } 528 }
495 529
496 /*
497 * No more threads holding lock, we can do the actual hardware
498 * release
499 */
500 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_common_fACS.global_lock, pending);
501 acpi_gbl_global_lock_acquired = FALSE; 530 acpi_gbl_global_lock_acquired = FALSE;
502 531
503 /* 532 /* Release the local GL mutex */
504 * If the pending bit was set, we must write GBL_RLS to the control
505 * register
506 */
507 if (pending) {
508 status = acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
509 1, ACPI_MTX_LOCK);
510 }
511 533
534 acpi_os_release_mutex(acpi_gbl_global_lock_mutex);
512 return_ACPI_STATUS(status); 535 return_ACPI_STATUS(status);
513} 536}
514 537
@@ -558,6 +581,12 @@ void acpi_ev_terminate(void)
558 if (ACPI_FAILURE(status)) { 581 if (ACPI_FAILURE(status)) {
559 ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); 582 ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
560 } 583 }
584
585 status = acpi_ev_remove_global_lock_handler();
586 if (ACPI_FAILURE(status)) {
587 ACPI_ERROR((AE_INFO,
588 "Could not remove Global Lock handler"));
589 }
561 } 590 }
562 591
563 /* Deallocate all handler objects installed within GPE info structs */ 592 /* Deallocate all handler objects installed within GPE info structs */
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 21caae04fe85..e99f0c435a47 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
291 u32 bit_width, acpi_integer * value) 291 u32 bit_width, acpi_integer * value)
292{ 292{
293 acpi_status status; 293 acpi_status status;
294 acpi_status status2;
295 acpi_adr_space_handler handler; 294 acpi_adr_space_handler handler;
296 acpi_adr_space_setup region_setup; 295 acpi_adr_space_setup region_setup;
297 union acpi_operand_object *handler_desc; 296 union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
345 * setup will potentially execute control methods 344 * setup will potentially execute control methods
346 * (e.g., _REG method for this region) 345 * (e.g., _REG method for this region)
347 */ 346 */
348 acpi_ex_exit_interpreter(); 347 acpi_ex_relinquish_interpreter();
349 348
350 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 349 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
351 handler_desc->address_space.context, 350 handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 352
354 /* Re-enter the interpreter */ 353 /* Re-enter the interpreter */
355 354
356 status2 = acpi_ex_enter_interpreter(); 355 acpi_ex_reacquire_interpreter();
357 if (ACPI_FAILURE(status2)) {
358 return_ACPI_STATUS(status2);
359 }
360 356
361 /* Check for failure of the Region Setup */ 357 /* Check for failure of the Region Setup */
362 358
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
409 * exit the interpreter because the handler *might* block -- we don't 405 * exit the interpreter because the handler *might* block -- we don't
410 * know what it will do, so we can't hold the lock on the intepreter. 406 * know what it will do, so we can't hold the lock on the intepreter.
411 */ 407 */
412 acpi_ex_exit_interpreter(); 408 acpi_ex_relinquish_interpreter();
413 } 409 }
414 410
415 /* Call the handler */ 411 /* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
430 * We just returned from a non-default handler, we must re-enter the 426 * We just returned from a non-default handler, we must re-enter the
431 * interpreter 427 * interpreter
432 */ 428 */
433 status2 = acpi_ex_enter_interpreter(); 429 acpi_ex_reacquire_interpreter();
434 if (ACPI_FAILURE(status2)) {
435 return_ACPI_STATUS(status2);
436 }
437 } 430 }
438 431
439 return_ACPI_STATUS(status); 432 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index 203d1359190a..a4fa7e6822a3 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,11 @@
48#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evrgnini") 49ACPI_MODULE_NAME("evrgnini")
50 50
51/* Local prototypes */
52static u8 acpi_ev_match_pci_root_bridge(char *id);
53
54static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
55
51/******************************************************************************* 56/*******************************************************************************
52 * 57 *
53 * FUNCTION: acpi_ev_system_memory_region_setup 58 * FUNCTION: acpi_ev_system_memory_region_setup
@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
62 * DESCRIPTION: Setup a system_memory operation region 67 * DESCRIPTION: Setup a system_memory operation region
63 * 68 *
64 ******************************************************************************/ 69 ******************************************************************************/
70
65acpi_status 71acpi_status
66acpi_ev_system_memory_region_setup(acpi_handle handle, 72acpi_ev_system_memory_region_setup(acpi_handle handle,
67 u32 function, 73 u32 function,
@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
168 union acpi_operand_object *handler_obj; 174 union acpi_operand_object *handler_obj;
169 struct acpi_namespace_node *parent_node; 175 struct acpi_namespace_node *parent_node;
170 struct acpi_namespace_node *pci_root_node; 176 struct acpi_namespace_node *pci_root_node;
177 struct acpi_namespace_node *pci_device_node;
171 union acpi_operand_object *region_obj = 178 union acpi_operand_object *region_obj =
172 (union acpi_operand_object *)handle; 179 (union acpi_operand_object *)handle;
173 struct acpi_device_id object_hID;
174 180
175 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup); 181 ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
176 182
@@ -215,45 +221,30 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
215 221
216 pci_root_node = parent_node; 222 pci_root_node = parent_node;
217 while (pci_root_node != acpi_gbl_root_node) { 223 while (pci_root_node != acpi_gbl_root_node) {
218 status =
219 acpi_ut_execute_HID(pci_root_node, &object_hID);
220 if (ACPI_SUCCESS(status)) {
221 /*
222 * Got a valid _HID string, check if this is a PCI root.
223 * New for ACPI 3.0: check for a PCI Express root also.
224 */
225 if (!
226 (ACPI_STRNCMP
227 (object_hID.value, PCI_ROOT_HID_STRING,
228 sizeof(PCI_ROOT_HID_STRING)))
229 ||
230 !(ACPI_STRNCMP
231 (object_hID.value,
232 PCI_EXPRESS_ROOT_HID_STRING,
233 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
234
235 /* Install a handler for this PCI root bridge */
236 224
237 status = 225 /* Get the _HID/_CID in order to detect a root_bridge */
238 acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 226
239 if (ACPI_FAILURE(status)) { 227 if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
240 if (status == AE_SAME_HANDLER) { 228
241 /* 229 /* Install a handler for this PCI root bridge */
242 * It is OK if the handler is already installed on the root 230
243 * bridge. Still need to return a context object for the 231 status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
244 * new PCI_Config operation region, however. 232 if (ACPI_FAILURE(status)) {
245 */ 233 if (status == AE_SAME_HANDLER) {
246 status = AE_OK; 234 /*
247 } else { 235 * It is OK if the handler is already installed on the root
248 ACPI_EXCEPTION((AE_INFO, 236 * bridge. Still need to return a context object for the
249 status, 237 * new PCI_Config operation region, however.
250 "Could not install PciConfig handler for Root Bridge %4.4s", 238 */
251 acpi_ut_get_node_name 239 status = AE_OK;
252 (pci_root_node))); 240 } else {
253 } 241 ACPI_EXCEPTION((AE_INFO, status,
242 "Could not install PciConfig handler for Root Bridge %4.4s",
243 acpi_ut_get_node_name
244 (pci_root_node)));
254 } 245 }
255 break;
256 } 246 }
247 break;
257 } 248 }
258 249
259 pci_root_node = acpi_ns_get_parent_node(pci_root_node); 250 pci_root_node = acpi_ns_get_parent_node(pci_root_node);
@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
282 /* 273 /*
283 * For PCI_Config space access, we need the segment, bus, 274 * For PCI_Config space access, we need the segment, bus,
284 * device and function numbers. Acquire them here. 275 * device and function numbers. Acquire them here.
276 *
277 * Find the parent device object. (This allows the operation region to be
278 * within a subscope under the device, such as a control method.)
285 */ 279 */
280 pci_device_node = region_obj->region.node;
281 while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
282 pci_device_node = acpi_ns_get_parent_node(pci_device_node);
283 }
284
285 if (!pci_device_node) {
286 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
287 }
286 288
287 /* 289 /*
288 * Get the PCI device and function numbers from the _ADR object 290 * Get the PCI device and function numbers from the _ADR object
289 * contained in the parent's scope. 291 * contained in the parent's scope.
290 */ 292 */
291 status = 293 status =
292 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node, 294 acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
293 &pci_value); 295 &pci_value);
294 296
295 /* 297 /*
@@ -329,6 +331,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
329 331
330/******************************************************************************* 332/*******************************************************************************
331 * 333 *
334 * FUNCTION: acpi_ev_match_pci_root_bridge
335 *
336 * PARAMETERS: Id - The HID/CID in string format
337 *
338 * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
339 *
340 * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
341 *
342 ******************************************************************************/
343
344static u8 acpi_ev_match_pci_root_bridge(char *id)
345{
346
347 /*
348 * Check if this is a PCI root.
349 * ACPI 3.0+: check for a PCI Express root also.
350 */
351 if (!(ACPI_STRNCMP(id,
352 PCI_ROOT_HID_STRING,
353 sizeof(PCI_ROOT_HID_STRING))) ||
354 !(ACPI_STRNCMP(id,
355 PCI_EXPRESS_ROOT_HID_STRING,
356 sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
357 return (TRUE);
358 }
359
360 return (FALSE);
361}
362
363/*******************************************************************************
364 *
365 * FUNCTION: acpi_ev_is_pci_root_bridge
366 *
367 * PARAMETERS: Node - Device node being examined
368 *
369 * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
370 *
371 * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
372 * examining the _HID and _CID for the device.
373 *
374 ******************************************************************************/
375
376static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
377{
378 acpi_status status;
379 struct acpi_device_id hid;
380 struct acpi_compatible_id_list *cid;
381 acpi_native_uint i;
382
383 /*
384 * Get the _HID and check for a PCI Root Bridge
385 */
386 status = acpi_ut_execute_HID(node, &hid);
387 if (ACPI_FAILURE(status)) {
388 return (FALSE);
389 }
390
391 if (acpi_ev_match_pci_root_bridge(hid.value)) {
392 return (TRUE);
393 }
394
395 /*
396 * The _HID did not match.
397 * Get the _CID and check for a PCI Root Bridge
398 */
399 status = acpi_ut_execute_CID(node, &cid);
400 if (ACPI_FAILURE(status)) {
401 return (FALSE);
402 }
403
404 /* Check all _CIDs in the returned list */
405
406 for (i = 0; i < cid->count; i++) {
407 if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
408 ACPI_FREE(cid);
409 return (TRUE);
410 }
411 }
412
413 ACPI_FREE(cid);
414 return (FALSE);
415}
416
417/*******************************************************************************
418 *
332 * FUNCTION: acpi_ev_pci_bar_region_setup 419 * FUNCTION: acpi_ev_pci_bar_region_setup
333 * 420 *
334 * PARAMETERS: Handle - Region we are interested in 421 * PARAMETERS: Handle - Region we are interested in
@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
432 * a PCI address in the scope of the definition. This address is 519 * a PCI address in the scope of the definition. This address is
433 * required to perform an access to PCI config space. 520 * required to perform an access to PCI config space.
434 * 521 *
522 * MUTEX: Interpreter should be unlocked, because we may run the _REG
523 * method for this region.
524 *
435 ******************************************************************************/ 525 ******************************************************************************/
436 526
437acpi_status 527acpi_status
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
index 8106215ad554..7e5d15ce2395 100644
--- a/drivers/acpi/events/evsci.c
+++ b/drivers/acpi/events/evsci.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -142,9 +142,10 @@ u32 acpi_ev_install_sci_handler(void)
142 142
143 ACPI_FUNCTION_TRACE(ev_install_sci_handler); 143 ACPI_FUNCTION_TRACE(ev_install_sci_handler);
144 144
145 status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 145 status =
146 acpi_ev_sci_xrupt_handler, 146 acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
147 acpi_gbl_gpe_xrupt_list_head); 147 acpi_ev_sci_xrupt_handler,
148 acpi_gbl_gpe_xrupt_list_head);
148 return_ACPI_STATUS(status); 149 return_ACPI_STATUS(status);
149} 150}
150 151
@@ -175,8 +176,9 @@ acpi_status acpi_ev_remove_sci_handler(void)
175 176
176 /* Just let the OS remove the handler and disable the level */ 177 /* Just let the OS remove the handler and disable the level */
177 178
178 status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int, 179 status =
179 acpi_ev_sci_xrupt_handler); 180 acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
181 acpi_ev_sci_xrupt_handler);
180 182
181 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
182} 184}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index 923fd2b46955..685a103a3587 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
768 return (AE_BAD_PARAMETER); 768 return (AE_BAD_PARAMETER);
769 } 769 }
770 770
771 status = acpi_ex_enter_interpreter(); 771 /* Must lock interpreter to prevent race conditions */
772 if (ACPI_FAILURE(status)) {
773 return (status);
774 }
775 772
773 acpi_ex_enter_interpreter();
776 status = acpi_ev_acquire_global_lock(timeout); 774 status = acpi_ev_acquire_global_lock(timeout);
777 acpi_ex_exit_interpreter(); 775 acpi_ex_exit_interpreter();
778 776
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 7ebc2efac936..17065e98807c 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acevents.h> 45#include <acpi/acevents.h>
46#include <acpi/acnamesp.h> 46#include <acpi/acnamesp.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evxfevnt") 50ACPI_MODULE_NAME("evxfevnt")
@@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
65 66
66 ACPI_FUNCTION_TRACE(acpi_enable); 67 ACPI_FUNCTION_TRACE(acpi_enable);
67 68
68 /* Make sure we have the FADT */ 69 /* ACPI tables must be present */
69 70
70 if (!acpi_gbl_FADT) { 71 if (!acpi_tb_tables_loaded()) {
71 ACPI_WARNING((AE_INFO, "No FADT information present!"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 } 73 }
74 74
75 /* Check current mode */
76
75 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 77 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
76 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 78 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
77 "System is already in ACPI mode\n")); 79 "System is already in ACPI mode\n"));
@@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
111 113
112 ACPI_FUNCTION_TRACE(acpi_disable); 114 ACPI_FUNCTION_TRACE(acpi_disable);
113 115
114 if (!acpi_gbl_FADT) {
115 ACPI_WARNING((AE_INFO, "No FADT information present!"));
116 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
117 }
118
119 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { 116 if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
120 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 117 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
121 "System is already in legacy (non-ACPI) mode\n")); 118 "System is already in legacy (non-ACPI) mode\n"));
@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
169 */ 166 */
170 status = 167 status =
171 acpi_set_register(acpi_gbl_fixed_event_info[event]. 168 acpi_set_register(acpi_gbl_fixed_event_info[event].
172 enable_register_id, 1, ACPI_MTX_LOCK); 169 enable_register_id, 1);
173 if (ACPI_FAILURE(status)) { 170 if (ACPI_FAILURE(status)) {
174 return_ACPI_STATUS(status); 171 return_ACPI_STATUS(status);
175 } 172 }
@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
178 175
179 status = 176 status =
180 acpi_get_register(acpi_gbl_fixed_event_info[event]. 177 acpi_get_register(acpi_gbl_fixed_event_info[event].
181 enable_register_id, &value, ACPI_MTX_LOCK); 178 enable_register_id, &value);
182 if (ACPI_FAILURE(status)) { 179 if (ACPI_FAILURE(status)) {
183 return_ACPI_STATUS(status); 180 return_ACPI_STATUS(status);
184 } 181 }
@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
368 */ 365 */
369 status = 366 status =
370 acpi_set_register(acpi_gbl_fixed_event_info[event]. 367 acpi_set_register(acpi_gbl_fixed_event_info[event].
371 enable_register_id, 0, ACPI_MTX_LOCK); 368 enable_register_id, 0);
372 if (ACPI_FAILURE(status)) { 369 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 370 return_ACPI_STATUS(status);
374 } 371 }
375 372
376 status = 373 status =
377 acpi_get_register(acpi_gbl_fixed_event_info[event]. 374 acpi_get_register(acpi_gbl_fixed_event_info[event].
378 enable_register_id, &value, ACPI_MTX_LOCK); 375 enable_register_id, &value);
379 if (ACPI_FAILURE(status)) { 376 if (ACPI_FAILURE(status)) {
380 return_ACPI_STATUS(status); 377 return_ACPI_STATUS(status);
381 } 378 }
@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
421 */ 418 */
422 status = 419 status =
423 acpi_set_register(acpi_gbl_fixed_event_info[event]. 420 acpi_set_register(acpi_gbl_fixed_event_info[event].
424 status_register_id, 1, ACPI_MTX_LOCK); 421 status_register_id, 1);
425 422
426 return_ACPI_STATUS(status); 423 return_ACPI_STATUS(status);
427} 424}
@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
510 507
511 status = 508 status =
512 acpi_get_register(acpi_gbl_fixed_event_info[event]. 509 acpi_get_register(acpi_gbl_fixed_event_info[event].
513 status_register_id, event_status, ACPI_MTX_LOCK); 510 status_register_id, event_status);
514 511
515 return_ACPI_STATUS(status); 512 return_ACPI_STATUS(status);
516} 513}
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
index 83b12a9afa32..7bf09c5fb242 100644
--- a/drivers/acpi/events/evxfregn.c
+++ b/drivers/acpi/events/evxfregn.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index c8341fa5fe01..25802f302ffe 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("exconfig")
54 54
55/* Local prototypes */ 55/* Local prototypes */
56static acpi_status 56static acpi_status
57acpi_ex_add_table(struct acpi_table_header *table, 57acpi_ex_add_table(acpi_native_uint table_index,
58 struct acpi_namespace_node *parent_node, 58 struct acpi_namespace_node *parent_node,
59 union acpi_operand_object **ddb_handle); 59 union acpi_operand_object **ddb_handle);
60 60
@@ -74,12 +74,11 @@ acpi_ex_add_table(struct acpi_table_header *table,
74 ******************************************************************************/ 74 ******************************************************************************/
75 75
76static acpi_status 76static acpi_status
77acpi_ex_add_table(struct acpi_table_header *table, 77acpi_ex_add_table(acpi_native_uint table_index,
78 struct acpi_namespace_node *parent_node, 78 struct acpi_namespace_node *parent_node,
79 union acpi_operand_object **ddb_handle) 79 union acpi_operand_object **ddb_handle)
80{ 80{
81 acpi_status status; 81 acpi_status status;
82 struct acpi_table_desc table_info;
83 union acpi_operand_object *obj_desc; 82 union acpi_operand_object *obj_desc;
84 83
85 ACPI_FUNCTION_TRACE(ex_add_table); 84 ACPI_FUNCTION_TRACE(ex_add_table);
@@ -98,42 +97,16 @@ acpi_ex_add_table(struct acpi_table_header *table,
98 97
99 /* Install the new table into the local data structures */ 98 /* Install the new table into the local data structures */
100 99
101 ACPI_MEMSET(&table_info, 0, sizeof(struct acpi_table_desc)); 100 obj_desc->reference.object = ACPI_CAST_PTR(void, table_index);
102
103 table_info.type = ACPI_TABLE_ID_SSDT;
104 table_info.pointer = table;
105 table_info.length = (acpi_size) table->length;
106 table_info.allocation = ACPI_MEM_ALLOCATED;
107
108 status = acpi_tb_install_table(&table_info);
109 obj_desc->reference.object = table_info.installed_desc;
110
111 if (ACPI_FAILURE(status)) {
112 if (status == AE_ALREADY_EXISTS) {
113
114 /* Table already exists, just return the handle */
115
116 return_ACPI_STATUS(AE_OK);
117 }
118 goto cleanup;
119 }
120 101
121 /* Add the table to the namespace */ 102 /* Add the table to the namespace */
122 103
123 status = acpi_ns_load_table(table_info.installed_desc, parent_node); 104 status = acpi_ns_load_table(table_index, parent_node);
124 if (ACPI_FAILURE(status)) { 105 if (ACPI_FAILURE(status)) {
125 106 acpi_ut_remove_reference(obj_desc);
126 /* Uninstall table on error */ 107 *ddb_handle = NULL;
127
128 (void)acpi_tb_uninstall_table(table_info.installed_desc);
129 goto cleanup;
130 } 108 }
131 109
132 return_ACPI_STATUS(AE_OK);
133
134 cleanup:
135 acpi_ut_remove_reference(obj_desc);
136 *ddb_handle = NULL;
137 return_ACPI_STATUS(status); 110 return_ACPI_STATUS(status);
138} 111}
139 112
@@ -146,7 +119,7 @@ acpi_ex_add_table(struct acpi_table_header *table,
146 * 119 *
147 * RETURN: Status 120 * RETURN: Status
148 * 121 *
149 * DESCRIPTION: Load an ACPI table 122 * DESCRIPTION: Load an ACPI table from the RSDT/XSDT
150 * 123 *
151 ******************************************************************************/ 124 ******************************************************************************/
152 125
@@ -156,33 +129,20 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
156{ 129{
157 acpi_status status; 130 acpi_status status;
158 union acpi_operand_object **operand = &walk_state->operands[0]; 131 union acpi_operand_object **operand = &walk_state->operands[0];
159 struct acpi_table_header *table; 132 acpi_native_uint table_index;
160 struct acpi_namespace_node *parent_node; 133 struct acpi_namespace_node *parent_node;
161 struct acpi_namespace_node *start_node; 134 struct acpi_namespace_node *start_node;
162 struct acpi_namespace_node *parameter_node = NULL; 135 struct acpi_namespace_node *parameter_node = NULL;
163 union acpi_operand_object *ddb_handle; 136 union acpi_operand_object *ddb_handle;
137 struct acpi_table_header *table;
164 138
165 ACPI_FUNCTION_TRACE(ex_load_table_op); 139 ACPI_FUNCTION_TRACE(ex_load_table_op);
166 140
167#if 0 141 /* Find the ACPI table in the RSDT/XSDT */
168 /*
169 * Make sure that the signature does not match one of the tables that
170 * is already loaded.
171 */
172 status = acpi_tb_match_signature(operand[0]->string.pointer, NULL);
173 if (status == AE_OK) {
174
175 /* Signature matched -- don't allow override */
176
177 return_ACPI_STATUS(AE_ALREADY_EXISTS);
178 }
179#endif
180
181 /* Find the ACPI table */
182 142
183 status = acpi_tb_find_table(operand[0]->string.pointer, 143 status = acpi_tb_find_table(operand[0]->string.pointer,
184 operand[1]->string.pointer, 144 operand[1]->string.pointer,
185 operand[2]->string.pointer, &table); 145 operand[2]->string.pointer, &table_index);
186 if (ACPI_FAILURE(status)) { 146 if (ACPI_FAILURE(status)) {
187 if (status != AE_NOT_FOUND) { 147 if (status != AE_NOT_FOUND) {
188 return_ACPI_STATUS(status); 148 return_ACPI_STATUS(status);
@@ -245,7 +205,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
245 205
246 /* Load the table into the namespace */ 206 /* Load the table into the namespace */
247 207
248 status = acpi_ex_add_table(table, parent_node, &ddb_handle); 208 status = acpi_ex_add_table(table_index, parent_node, &ddb_handle);
249 if (ACPI_FAILURE(status)) { 209 if (ACPI_FAILURE(status)) {
250 return_ACPI_STATUS(status); 210 return_ACPI_STATUS(status);
251 } 211 }
@@ -266,9 +226,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
266 } 226 }
267 } 227 }
268 228
269 ACPI_INFO((AE_INFO, 229 status = acpi_get_table_by_index(table_index, &table);
270 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", 230 if (ACPI_SUCCESS(status)) {
271 table->signature, table->oem_id, table->oem_table_id)); 231 ACPI_INFO((AE_INFO,
232 "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]",
233 table->signature, table->oem_id,
234 table->oem_table_id));
235 }
272 236
273 *return_desc = ddb_handle; 237 *return_desc = ddb_handle;
274 return_ACPI_STATUS(status); 238 return_ACPI_STATUS(status);
@@ -278,7 +242,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
278 * 242 *
279 * FUNCTION: acpi_ex_load_op 243 * FUNCTION: acpi_ex_load_op
280 * 244 *
281 * PARAMETERS: obj_desc - Region or Field where the table will be 245 * PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
282 * obtained 246 * obtained
283 * Target - Where a handle to the table will be stored 247 * Target - Where a handle to the table will be stored
284 * walk_state - Current state 248 * walk_state - Current state
@@ -287,6 +251,12 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
287 * 251 *
288 * DESCRIPTION: Load an ACPI table from a field or operation region 252 * DESCRIPTION: Load an ACPI table from a field or operation region
289 * 253 *
254 * NOTE: Region Fields (Field, bank_field, index_fields) are resolved to buffer
255 * objects before this code is reached.
256 *
257 * If source is an operation region, it must refer to system_memory, as
258 * per the ACPI specification.
259 *
290 ******************************************************************************/ 260 ******************************************************************************/
291 261
292acpi_status 262acpi_status
@@ -294,22 +264,26 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
294 union acpi_operand_object *target, 264 union acpi_operand_object *target,
295 struct acpi_walk_state *walk_state) 265 struct acpi_walk_state *walk_state)
296{ 266{
297 acpi_status status;
298 union acpi_operand_object *ddb_handle; 267 union acpi_operand_object *ddb_handle;
299 union acpi_operand_object *buffer_desc = NULL; 268 struct acpi_table_desc table_desc;
300 struct acpi_table_header *table_ptr = NULL; 269 acpi_native_uint table_index;
301 acpi_physical_address address; 270 acpi_status status;
302 struct acpi_table_header table_header;
303 acpi_integer temp;
304 u32 i;
305 271
306 ACPI_FUNCTION_TRACE(ex_load_op); 272 ACPI_FUNCTION_TRACE(ex_load_op);
307 273
308 /* Object can be either an op_region or a Field */ 274 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
275
276 /* Source Object can be either an op_region or a Buffer/Field */
309 277
310 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 278 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
311 case ACPI_TYPE_REGION: 279 case ACPI_TYPE_REGION:
312 280
281 /* Region must be system_memory (from ACPI spec) */
282
283 if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
284 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
285 }
286
313 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n", 287 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
314 obj_desc, 288 obj_desc,
315 acpi_ut_get_object_type_name(obj_desc))); 289 acpi_ut_get_object_type_name(obj_desc)));
@@ -325,113 +299,41 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
325 } 299 }
326 } 300 }
327 301
328 /* Get the base physical address of the region */ 302 table_desc.address = obj_desc->region.address;
329 303 table_desc.length = obj_desc->region.length;
330 address = obj_desc->region.address; 304 table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
331
332 /* Get part of the table header to get the table length */
333
334 table_header.length = 0;
335 for (i = 0; i < 8; i++) {
336 status =
337 acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
338 (acpi_physical_address)
339 (i + address), 8,
340 &temp);
341 if (ACPI_FAILURE(status)) {
342 return_ACPI_STATUS(status);
343 }
344
345 /* Get the one valid byte of the returned 64-bit value */
346
347 ACPI_CAST_PTR(u8, &table_header)[i] = (u8) temp;
348 }
349
350 /* Sanity check the table length */
351
352 if (table_header.length < sizeof(struct acpi_table_header)) {
353 return_ACPI_STATUS(AE_BAD_HEADER);
354 }
355
356 /* Allocate a buffer for the entire table */
357
358 table_ptr = ACPI_ALLOCATE(table_header.length);
359 if (!table_ptr) {
360 return_ACPI_STATUS(AE_NO_MEMORY);
361 }
362
363 /* Get the entire table from the op region */
364
365 for (i = 0; i < table_header.length; i++) {
366 status =
367 acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
368 (acpi_physical_address)
369 (i + address), 8,
370 &temp);
371 if (ACPI_FAILURE(status)) {
372 goto cleanup;
373 }
374
375 /* Get the one valid byte of the returned 64-bit value */
376
377 ACPI_CAST_PTR(u8, table_ptr)[i] = (u8) temp;
378 }
379 break; 305 break;
380 306
381 case ACPI_TYPE_LOCAL_REGION_FIELD: 307 case ACPI_TYPE_BUFFER: /* Buffer or resolved region_field */
382 case ACPI_TYPE_LOCAL_BANK_FIELD:
383 case ACPI_TYPE_LOCAL_INDEX_FIELD:
384 308
385 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Field %p %s\n", 309 /* Simply extract the buffer from the buffer object */
386 obj_desc,
387 acpi_ut_get_object_type_name(obj_desc)));
388 310
389 /* 311 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
390 * The length of the field must be at least as large as the table. 312 "Load from Buffer or Field %p %s\n", obj_desc,
391 * Read the entire field and thus the entire table. Buffer is 313 acpi_ut_get_object_type_name(obj_desc)));
392 * allocated during the read.
393 */
394 status =
395 acpi_ex_read_data_from_field(walk_state, obj_desc,
396 &buffer_desc);
397 if (ACPI_FAILURE(status)) {
398 return_ACPI_STATUS(status);
399 }
400
401 table_ptr = ACPI_CAST_PTR(struct acpi_table_header,
402 buffer_desc->buffer.pointer);
403
404 /* All done with the buffer_desc, delete it */
405
406 buffer_desc->buffer.pointer = NULL;
407 acpi_ut_remove_reference(buffer_desc);
408 314
409 /* Sanity check the table length */ 315 table_desc.pointer = ACPI_CAST_PTR(struct acpi_table_header,
316 obj_desc->buffer.pointer);
317 table_desc.length = table_desc.pointer->length;
318 table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
410 319
411 if (table_ptr->length < sizeof(struct acpi_table_header)) { 320 obj_desc->buffer.pointer = NULL;
412 status = AE_BAD_HEADER;
413 goto cleanup;
414 }
415 break; 321 break;
416 322
417 default: 323 default:
418 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 324 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
419 } 325 }
420 326
421 /* The table must be either an SSDT or a PSDT */ 327 /*
422 328 * Install the new table into the local data structures
423 if ((!ACPI_COMPARE_NAME(table_ptr->signature, PSDT_SIG)) && 329 */
424 (!ACPI_COMPARE_NAME(table_ptr->signature, SSDT_SIG))) { 330 status = acpi_tb_add_table(&table_desc, &table_index);
425 ACPI_ERROR((AE_INFO, 331 if (ACPI_FAILURE(status)) {
426 "Table has invalid signature [%4.4s], must be SSDT or PSDT",
427 table_ptr->signature));
428 status = AE_BAD_SIGNATURE;
429 goto cleanup; 332 goto cleanup;
430 } 333 }
431 334
432 /* Install the new table into the local data structures */ 335 status =
433 336 acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
434 status = acpi_ex_add_table(table_ptr, acpi_gbl_root_node, &ddb_handle);
435 if (ACPI_FAILURE(status)) { 337 if (ACPI_FAILURE(status)) {
436 338
437 /* On error, table_ptr was deallocated above */ 339 /* On error, table_ptr was deallocated above */
@@ -450,13 +352,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
450 return_ACPI_STATUS(status); 352 return_ACPI_STATUS(status);
451 } 353 }
452 354
453 ACPI_INFO((AE_INFO,
454 "Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]",
455 table_ptr->oem_id, table_ptr->oem_table_id));
456
457 cleanup: 355 cleanup:
458 if (ACPI_FAILURE(status)) { 356 if (ACPI_FAILURE(status)) {
459 ACPI_FREE(table_ptr); 357 acpi_tb_delete_table(&table_desc);
460 } 358 }
461 return_ACPI_STATUS(status); 359 return_ACPI_STATUS(status);
462} 360}
@@ -477,7 +375,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
477{ 375{
478 acpi_status status = AE_OK; 376 acpi_status status = AE_OK;
479 union acpi_operand_object *table_desc = ddb_handle; 377 union acpi_operand_object *table_desc = ddb_handle;
480 struct acpi_table_desc *table_info; 378 acpi_native_uint table_index;
481 379
482 ACPI_FUNCTION_TRACE(ex_unload_table); 380 ACPI_FUNCTION_TRACE(ex_unload_table);
483 381
@@ -493,19 +391,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
493 return_ACPI_STATUS(AE_BAD_PARAMETER); 391 return_ACPI_STATUS(AE_BAD_PARAMETER);
494 } 392 }
495 393
496 /* Get the actual table descriptor from the ddb_handle */ 394 /* Get the table index from the ddb_handle */
497 395
498 table_info = (struct acpi_table_desc *)table_desc->reference.object; 396 table_index = (acpi_native_uint) table_desc->reference.object;
499 397
500 /* 398 /*
501 * Delete the entire namespace under this table Node 399 * Delete the entire namespace under this table Node
502 * (Offset contains the table_id) 400 * (Offset contains the table_id)
503 */ 401 */
504 acpi_ns_delete_namespace_by_owner(table_info->owner_id); 402 acpi_tb_delete_namespace_by_owner(table_index);
505 403 acpi_tb_release_owner_id(table_index);
506 /* Delete the table itself */
507 404
508 (void)acpi_tb_uninstall_table(table_info->installed_desc); 405 acpi_tb_set_table_loaded_flag(table_index, FALSE);
509 406
510 /* Delete the table descriptor (ddb_handle) */ 407 /* Delete the table descriptor (ddb_handle) */
511 408
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index 544e81a6a438..d470e8b1f4ea 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index 34eec82c1b1e..7c38528a7e83 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
359 union acpi_operand_object **operand = &walk_state->operands[0]; 359 union acpi_operand_object **operand = &walk_state->operands[0];
360 union acpi_operand_object *obj_desc; 360 union acpi_operand_object *obj_desc;
361 struct acpi_namespace_node *node; 361 struct acpi_namespace_node *node;
362 struct acpi_table_header *table;
363 union acpi_operand_object *region_obj2; 362 union acpi_operand_object *region_obj2;
363 acpi_native_uint table_index;
364 struct acpi_table_header *table;
364 365
365 ACPI_FUNCTION_TRACE(ex_create_table_region); 366 ACPI_FUNCTION_TRACE(ex_create_table_region);
366 367
@@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
380 381
381 status = acpi_tb_find_table(operand[1]->string.pointer, 382 status = acpi_tb_find_table(operand[1]->string.pointer,
382 operand[2]->string.pointer, 383 operand[2]->string.pointer,
383 operand[3]->string.pointer, &table); 384 operand[3]->string.pointer, &table_index);
384 if (ACPI_FAILURE(status)) { 385 if (ACPI_FAILURE(status)) {
385 return_ACPI_STATUS(status); 386 return_ACPI_STATUS(status);
386 } 387 }
@@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
395 region_obj2 = obj_desc->common.next_object; 396 region_obj2 = obj_desc->common.next_object;
396 region_obj2->extra.region_context = NULL; 397 region_obj2->extra.region_context = NULL;
397 398
399 status = acpi_get_table_by_index(table_index, &table);
400 if (ACPI_FAILURE(status)) {
401 return_ACPI_STATUS(status);
402 }
403
398 /* Init the region from the operands */ 404 /* Init the region from the operands */
399 405
400 obj_desc->region.space_id = REGION_DATA_TABLE; 406 obj_desc->region.space_id = REGION_DATA_TABLE;
@@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start,
553 559
554 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); 560 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
555 if (!obj_desc) { 561 if (!obj_desc) {
556 return_ACPI_STATUS(AE_NO_MEMORY); 562 status = AE_NO_MEMORY;
563 goto exit;
557 } 564 }
558 565
559 /* Save the method's AML pointer and length */ 566 /* Save the method's AML pointer and length */
@@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
576 * Get the sync_level. If method is serialized, a mutex will be 583 * Get the sync_level. If method is serialized, a mutex will be
577 * created for this method when it is parsed. 584 * created for this method when it is parsed.
578 */ 585 */
579 if (acpi_gbl_all_methods_serialized) { 586 if (method_flags & AML_METHOD_SERIALIZED) {
580 obj_desc->method.sync_level = 0;
581 obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
582 } else if (method_flags & AML_METHOD_SERIALIZED) {
583 /* 587 /*
584 * ACPI 1.0: sync_level = 0 588 * ACPI 1.0: sync_level = 0
585 * ACPI 2.0: sync_level = sync_level in method declaration 589 * ACPI 2.0: sync_level = sync_level in method declaration
@@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start,
597 601
598 acpi_ut_remove_reference(obj_desc); 602 acpi_ut_remove_reference(obj_desc);
599 603
604 exit:
600 /* Remove a reference to the operand */ 605 /* Remove a reference to the operand */
601 606
602 acpi_ut_remove_reference(operand[1]); 607 acpi_ut_remove_reference(operand[1]);
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 2450943add33..68d283fd60e7 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -59,8 +59,6 @@ static void acpi_ex_out_string(char *title, char *value);
59 59
60static void acpi_ex_out_pointer(char *title, void *value); 60static void acpi_ex_out_pointer(char *title, void *value);
61 61
62static void acpi_ex_out_address(char *title, acpi_physical_address value);
63
64static void 62static void
65acpi_ex_dump_object(union acpi_operand_object *obj_desc, 63acpi_ex_dump_object(union acpi_operand_object *obj_desc,
66 struct acpi_exdump_info *info); 64 struct acpi_exdump_info *info);
@@ -92,10 +90,11 @@ static struct acpi_exdump_info acpi_ex_dump_string[4] = {
92 {ACPI_EXD_STRING, 0, NULL} 90 {ACPI_EXD_STRING, 0, NULL}
93}; 91};
94 92
95static struct acpi_exdump_info acpi_ex_dump_buffer[4] = { 93static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
96 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL}, 94 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
97 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"}, 95 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
98 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"}, 96 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
97 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
99 {ACPI_EXD_BUFFER, 0, NULL} 98 {ACPI_EXD_BUFFER, 0, NULL}
100}; 99};
101 100
@@ -165,8 +164,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
165 164
166static struct acpi_exdump_info acpi_ex_dump_processor[7] = { 165static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
167 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL}, 166 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
168 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, 167 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
169 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.length), "Length"}, 168 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
170 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, 169 {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
171 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify), 170 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
172 "System Notify"}, 171 "System Notify"},
@@ -379,18 +378,12 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
379 break; 378 break;
380 379
381 case ACPI_EXD_POINTER: 380 case ACPI_EXD_POINTER:
381 case ACPI_EXD_ADDRESS:
382 382
383 acpi_ex_out_pointer(name, 383 acpi_ex_out_pointer(name,
384 *ACPI_CAST_PTR(void *, target)); 384 *ACPI_CAST_PTR(void *, target));
385 break; 385 break;
386 386
387 case ACPI_EXD_ADDRESS:
388
389 acpi_ex_out_address(name,
390 *ACPI_CAST_PTR
391 (acpi_physical_address, target));
392 break;
393
394 case ACPI_EXD_STRING: 387 case ACPI_EXD_STRING:
395 388
396 acpi_ut_print_string(obj_desc->string.pointer, 389 acpi_ut_print_string(obj_desc->string.pointer,
@@ -834,16 +827,6 @@ static void acpi_ex_out_pointer(char *title, void *value)
834 acpi_os_printf("%20s : %p\n", title, value); 827 acpi_os_printf("%20s : %p\n", title, value);
835} 828}
836 829
837static void acpi_ex_out_address(char *title, acpi_physical_address value)
838{
839
840#if ACPI_MACHINE_WIDTH == 16
841 acpi_os_printf("%20s : %p\n", title, value);
842#else
843 acpi_os_printf("%20s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
844#endif
845}
846
847/******************************************************************************* 830/*******************************************************************************
848 * 831 *
849 * FUNCTION: acpi_ex_dump_namespace_node 832 * FUNCTION: acpi_ex_dump_namespace_node
diff --git a/drivers/acpi/executer/exfield.c b/drivers/acpi/executer/exfield.c
index 9ea9c3a67ca9..2d88a3d8d1ad 100644
--- a/drivers/acpi/executer/exfield.c
+++ b/drivers/acpi/executer/exfield.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c
index 40f0bee6faa5..65a48b6170ee 100644
--- a/drivers/acpi/executer/exfldio.c
+++ b/drivers/acpi/executer/exfldio.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
257 } 257 }
258 258
259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, 259 ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
260 " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", 260 " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
261 acpi_ut_get_region_name(rgn_desc->region. 261 acpi_ut_get_region_name(rgn_desc->region.
262 space_id), 262 space_id),
263 rgn_desc->region.space_id, 263 rgn_desc->region.space_id,
264 obj_desc->common_field.access_byte_width, 264 obj_desc->common_field.access_byte_width,
265 obj_desc->common_field.base_byte_offset, 265 obj_desc->common_field.base_byte_offset,
266 field_datum_byte_offset, 266 field_datum_byte_offset, (void *)address));
267 ACPI_FORMAT_UINT64(address)));
268 267
269 /* Invoke the appropriate address_space/op_region handler */ 268 /* Invoke the appropriate address_space/op_region handler */
270 269
diff --git a/drivers/acpi/executer/exmisc.c b/drivers/acpi/executer/exmisc.c
index bd98aab017cf..f13d1cec2d6d 100644
--- a/drivers/acpi/executer/exmisc.c
+++ b/drivers/acpi/executer/exmisc.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index bf90f04f2c60..5101bad5baf8 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,7 @@
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/acinterp.h> 46#include <acpi/acinterp.h>
47#include <acpi/acevents.h>
47 48
48#define _COMPONENT ACPI_EXECUTER 49#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exmutex") 50ACPI_MODULE_NAME("exmutex")
@@ -150,7 +151,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
150 return_ACPI_STATUS(AE_BAD_PARAMETER); 151 return_ACPI_STATUS(AE_BAD_PARAMETER);
151 } 152 }
152 153
153 /* Sanity check -- we must have a valid thread ID */ 154 /* Sanity check: we must have a valid thread ID */
154 155
155 if (!walk_state->thread) { 156 if (!walk_state->thread) {
156 ACPI_ERROR((AE_INFO, 157 ACPI_ERROR((AE_INFO,
@@ -174,24 +175,28 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
174 /* Support for multiple acquires by the owning thread */ 175 /* Support for multiple acquires by the owning thread */
175 176
176 if (obj_desc->mutex.owner_thread) { 177 if (obj_desc->mutex.owner_thread) {
177 178 if (obj_desc->mutex.owner_thread->thread_id ==
178 /* Special case for Global Lock, allow all threads */ 179 walk_state->thread->thread_id) {
179
180 if ((obj_desc->mutex.owner_thread->thread_id ==
181 walk_state->thread->thread_id) ||
182 (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) {
183 /* 180 /*
184 * The mutex is already owned by this thread, 181 * The mutex is already owned by this thread, just increment the
185 * just increment the acquisition depth 182 * acquisition depth
186 */ 183 */
187 obj_desc->mutex.acquisition_depth++; 184 obj_desc->mutex.acquisition_depth++;
188 return_ACPI_STATUS(AE_OK); 185 return_ACPI_STATUS(AE_OK);
189 } 186 }
190 } 187 }
191 188
192 /* Acquire the mutex, wait if necessary */ 189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
190
191 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
192 status =
193 acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
194 } else {
195 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
196 (u16) time_desc->integer.
197 value);
198 }
193 199
194 status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
195 if (ACPI_FAILURE(status)) { 200 if (ACPI_FAILURE(status)) {
196 201
197 /* Includes failure from a timeout on time_desc */ 202 /* Includes failure from a timeout on time_desc */
@@ -211,7 +216,6 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
211 /* Link the mutex to the current thread for force-unlock at method exit */ 216 /* Link the mutex to the current thread for force-unlock at method exit */
212 217
213 acpi_ex_link_mutex(obj_desc, walk_state->thread); 218 acpi_ex_link_mutex(obj_desc, walk_state->thread);
214
215 return_ACPI_STATUS(AE_OK); 219 return_ACPI_STATUS(AE_OK);
216} 220}
217 221
@@ -232,7 +236,7 @@ acpi_status
232acpi_ex_release_mutex(union acpi_operand_object *obj_desc, 236acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
233 struct acpi_walk_state *walk_state) 237 struct acpi_walk_state *walk_state)
234{ 238{
235 acpi_status status; 239 acpi_status status = AE_OK;
236 240
237 ACPI_FUNCTION_TRACE(ex_release_mutex); 241 ACPI_FUNCTION_TRACE(ex_release_mutex);
238 242
@@ -249,7 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
249 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); 253 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
250 } 254 }
251 255
252 /* Sanity check -- we must have a valid thread ID */ 256 /* Sanity check: we must have a valid thread ID */
253 257
254 if (!walk_state->thread) { 258 if (!walk_state->thread) {
255 ACPI_ERROR((AE_INFO, 259 ACPI_ERROR((AE_INFO,
@@ -264,7 +268,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
264 */ 268 */
265 if ((obj_desc->mutex.owner_thread->thread_id != 269 if ((obj_desc->mutex.owner_thread->thread_id !=
266 walk_state->thread->thread_id) 270 walk_state->thread->thread_id)
267 && (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) { 271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
268 ACPI_ERROR((AE_INFO, 272 ACPI_ERROR((AE_INFO,
269 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
270 (unsigned long)walk_state->thread->thread_id, 274 (unsigned long)walk_state->thread->thread_id,
@@ -274,8 +278,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
274 } 278 }
275 279
276 /* 280 /*
277 * The sync level of the mutex must be less than or 281 * The sync level of the mutex must be less than or equal to the current
278 * equal to the current sync level 282 * sync level
279 */ 283 */
280 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { 284 if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
281 ACPI_ERROR((AE_INFO, 285 ACPI_ERROR((AE_INFO,
@@ -298,11 +302,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
298 302
299 acpi_ex_unlink_mutex(obj_desc); 303 acpi_ex_unlink_mutex(obj_desc);
300 304
301 /* Release the mutex */ 305 /* Release the mutex, special case for Global Lock */
302 306
303 status = acpi_ex_system_release_mutex(obj_desc); 307 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
308 status = acpi_ev_release_global_lock();
309 } else {
310 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
311 }
304 312
305 /* Update the mutex and walk state, restore sync_level before acquire */ 313 /* Update the mutex and restore sync_level */
306 314
307 obj_desc->mutex.owner_thread = NULL; 315 obj_desc->mutex.owner_thread = NULL;
308 walk_state->thread->current_sync_level = 316 walk_state->thread->current_sync_level =
@@ -321,39 +329,49 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
321 * 329 *
322 * DESCRIPTION: Release all mutexes held by this thread 330 * DESCRIPTION: Release all mutexes held by this thread
323 * 331 *
332 * NOTE: This function is called as the thread is exiting the interpreter.
333 * Mutexes are not released when an individual control method is exited, but
334 * only when the parent thread actually exits the interpreter. This allows one
335 * method to acquire a mutex, and a different method to release it, as long as
336 * this is performed underneath a single parent control method.
337 *
324 ******************************************************************************/ 338 ******************************************************************************/
325 339
326void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) 340void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
327{ 341{
328 union acpi_operand_object *next = thread->acquired_mutex_list; 342 union acpi_operand_object *next = thread->acquired_mutex_list;
329 union acpi_operand_object *this; 343 union acpi_operand_object *obj_desc;
330 acpi_status status;
331 344
332 ACPI_FUNCTION_ENTRY(); 345 ACPI_FUNCTION_ENTRY();
333 346
334 /* Traverse the list of owned mutexes, releasing each one */ 347 /* Traverse the list of owned mutexes, releasing each one */
335 348
336 while (next) { 349 while (next) {
337 this = next; 350 obj_desc = next;
338 next = this->mutex.next; 351 next = obj_desc->mutex.next;
352
353 obj_desc->mutex.prev = NULL;
354 obj_desc->mutex.next = NULL;
355 obj_desc->mutex.acquisition_depth = 0;
356
357 /* Release the mutex, special case for Global Lock */
339 358
340 this->mutex.acquisition_depth = 1; 359 if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
341 this->mutex.prev = NULL;
342 this->mutex.next = NULL;
343 360
344 /* Release the mutex */ 361 /* Ignore errors */
345 362
346 status = acpi_ex_system_release_mutex(this); 363 (void)acpi_ev_release_global_lock();
347 if (ACPI_FAILURE(status)) { 364 } else {
348 continue; 365 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
349 } 366 }
350 367
351 /* Mark mutex unowned */ 368 /* Mark mutex unowned */
352 369
353 this->mutex.owner_thread = NULL; 370 obj_desc->mutex.owner_thread = NULL;
354 371
355 /* Update Thread sync_level (Last mutex is the important one) */ 372 /* Update Thread sync_level (Last mutex is the important one) */
356 373
357 thread->current_sync_level = this->mutex.original_sync_level; 374 thread->current_sync_level =
375 obj_desc->mutex.original_sync_level;
358 } 376 }
359} 377}
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index d3d70364626c..1ee4fb1175c6 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg1.c b/drivers/acpi/executer/exoparg1.c
index 6374d8be88e0..252f10acbbcc 100644
--- a/drivers/acpi/executer/exoparg1.c
+++ b/drivers/acpi/executer/exoparg1.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -104,9 +104,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
104 status = AE_NO_MEMORY; 104 status = AE_NO_MEMORY;
105 goto cleanup; 105 goto cleanup;
106 } 106 }
107#if ACPI_MACHINE_WIDTH != 16
108 return_desc->integer.value = acpi_os_get_timer(); 107 return_desc->integer.value = acpi_os_get_timer();
109#endif
110 break; 108 break;
111 109
112 default: /* Unknown opcode */ 110 default: /* Unknown opcode */
diff --git a/drivers/acpi/executer/exoparg2.c b/drivers/acpi/executer/exoparg2.c
index 7d2cbc113160..17e652e65379 100644
--- a/drivers/acpi/executer/exoparg2.c
+++ b/drivers/acpi/executer/exoparg2.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg3.c b/drivers/acpi/executer/exoparg3.c
index e2d945dfd509..7fe67cf82cee 100644
--- a/drivers/acpi/executer/exoparg3.c
+++ b/drivers/acpi/executer/exoparg3.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exoparg6.c b/drivers/acpi/executer/exoparg6.c
index f0c0ba6eb408..bd80a9cb3d65 100644
--- a/drivers/acpi/executer/exoparg6.c
+++ b/drivers/acpi/executer/exoparg6.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index 44d064f427b9..a6696621ff1b 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 3cc97ba48b36..2e9ce94798c7 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -155,16 +155,15 @@ acpi_ex_system_memory_space_handler(u32 function,
155 155
156 /* Create a new mapping starting at the address given */ 156 /* Create a new mapping starting at the address given */
157 157
158 status = acpi_os_map_memory(address, window_size, 158 mem_info->mapped_logical_address =
159 (void **)&mem_info-> 159 acpi_os_map_memory((acpi_native_uint) address, window_size);
160 mapped_logical_address); 160 if (!mem_info->mapped_logical_address) {
161 if (ACPI_FAILURE(status)) {
162 ACPI_ERROR((AE_INFO, 161 ACPI_ERROR((AE_INFO,
163 "Could not map memory at %8.8X%8.8X, size %X", 162 "Could not map memory at %8.8X%8.8X, size %X",
164 ACPI_FORMAT_UINT64(address), 163 ACPI_FORMAT_UINT64(address),
165 (u32) window_size)); 164 (u32) window_size));
166 mem_info->mapped_length = 0; 165 mem_info->mapped_length = 0;
167 return_ACPI_STATUS(status); 166 return_ACPI_STATUS(AE_NO_MEMORY);
168 } 167 }
169 168
170 /* Save the physical address and mapping size */ 169 /* Save the physical address and mapping size */
@@ -210,11 +209,10 @@ acpi_ex_system_memory_space_handler(u32 function,
210 *value = (acpi_integer) ACPI_GET32(logical_addr_ptr); 209 *value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
211 break; 210 break;
212 211
213#if ACPI_MACHINE_WIDTH != 16
214 case 64: 212 case 64:
215 *value = (acpi_integer) ACPI_GET64(logical_addr_ptr); 213 *value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
216 break; 214 break;
217#endif 215
218 default: 216 default:
219 /* bit_width was already validated */ 217 /* bit_width was already validated */
220 break; 218 break;
@@ -236,11 +234,9 @@ acpi_ex_system_memory_space_handler(u32 function,
236 ACPI_SET32(logical_addr_ptr) = (u32) * value; 234 ACPI_SET32(logical_addr_ptr) = (u32) * value;
237 break; 235 break;
238 236
239#if ACPI_MACHINE_WIDTH != 16
240 case 64: 237 case 64:
241 ACPI_SET64(logical_addr_ptr) = (u64) * value; 238 ACPI_SET64(logical_addr_ptr) = (u64) * value;
242 break; 239 break;
243#endif
244 240
245 default: 241 default:
246 /* bit_width was already validated */ 242 /* bit_width was already validated */
diff --git a/drivers/acpi/executer/exresnte.c b/drivers/acpi/executer/exresnte.c
index 3089b05a1368..2b3a01cc4929 100644
--- a/drivers/acpi/executer/exresnte.c
+++ b/drivers/acpi/executer/exresnte.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exresolv.c b/drivers/acpi/executer/exresolv.c
index 6499de878017..6c64e55dab0e 100644
--- a/drivers/acpi/executer/exresolv.c
+++ b/drivers/acpi/executer/exresolv.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -141,7 +141,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
141 acpi_status status = AE_OK; 141 acpi_status status = AE_OK;
142 union acpi_operand_object *stack_desc; 142 union acpi_operand_object *stack_desc;
143 void *temp_node; 143 void *temp_node;
144 union acpi_operand_object *obj_desc; 144 union acpi_operand_object *obj_desc = NULL;
145 u16 opcode; 145 u16 opcode;
146 146
147 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); 147 ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
@@ -299,8 +299,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
299 status = acpi_ds_get_package_arguments(stack_desc); 299 status = acpi_ds_get_package_arguments(stack_desc);
300 break; 300 break;
301 301
302 /* These cases may never happen here, but just in case.. */
303
304 case ACPI_TYPE_BUFFER_FIELD: 302 case ACPI_TYPE_BUFFER_FIELD:
305 case ACPI_TYPE_LOCAL_REGION_FIELD: 303 case ACPI_TYPE_LOCAL_REGION_FIELD:
306 case ACPI_TYPE_LOCAL_BANK_FIELD: 304 case ACPI_TYPE_LOCAL_BANK_FIELD:
@@ -314,6 +312,10 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
314 status = 312 status =
315 acpi_ex_read_data_from_field(walk_state, stack_desc, 313 acpi_ex_read_data_from_field(walk_state, stack_desc,
316 &obj_desc); 314 &obj_desc);
315
316 /* Remove a reference to the original operand, then override */
317
318 acpi_ut_remove_reference(*stack_ptr);
317 *stack_ptr = (void *)obj_desc; 319 *stack_ptr = (void *)obj_desc;
318 break; 320 break;
319 321
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index 4c93d0972333..ba761862a599 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -611,22 +611,20 @@ acpi_ex_resolve_operands(u16 opcode,
611 } 611 }
612 goto next_operand; 612 goto next_operand;
613 613
614 case ARGI_REGION_OR_FIELD: 614 case ARGI_REGION_OR_BUFFER: /* Used by Load() only */
615 615
616 /* Need an operand of type REGION or a FIELD in a region */ 616 /* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
617 617
618 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { 618 switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
619 case ACPI_TYPE_BUFFER:
619 case ACPI_TYPE_REGION: 620 case ACPI_TYPE_REGION:
620 case ACPI_TYPE_LOCAL_REGION_FIELD:
621 case ACPI_TYPE_LOCAL_BANK_FIELD:
622 case ACPI_TYPE_LOCAL_INDEX_FIELD:
623 621
624 /* Valid operand */ 622 /* Valid operand */
625 break; 623 break;
626 624
627 default: 625 default:
628 ACPI_ERROR((AE_INFO, 626 ACPI_ERROR((AE_INFO,
629 "Needed [Region/RegionField], found [%s] %p", 627 "Needed [Region/Buffer], found [%s] %p",
630 acpi_ut_get_object_type_name 628 acpi_ut_get_object_type_name
631 (obj_desc), obj_desc)); 629 (obj_desc), obj_desc));
632 630
diff --git a/drivers/acpi/executer/exstore.c b/drivers/acpi/executer/exstore.c
index 0456405ba019..f4b69a637820 100644
--- a/drivers/acpi/executer/exstore.c
+++ b/drivers/acpi/executer/exstore.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstoren.c b/drivers/acpi/executer/exstoren.c
index 591aaf0e18b3..1d622c625c64 100644
--- a/drivers/acpi/executer/exstoren.c
+++ b/drivers/acpi/executer/exstoren.c
@@ -7,7 +7,7 @@
7 *****************************************************************************/ 7 *****************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exstorob.c b/drivers/acpi/executer/exstorob.c
index 99ebe5adfcda..8233d40178ee 100644
--- a/drivers/acpi/executer/exstorob.c
+++ b/drivers/acpi/executer/exstorob.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index 28aef3e69ecc..9460baff3032 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) 66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
67{ 67{
68 acpi_status status; 68 acpi_status status;
69 acpi_status status2;
70 69
71 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); 70 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
72 71
@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
79 78
80 /* We must wait, so unlock the interpreter */ 79 /* We must wait, so unlock the interpreter */
81 80
82 acpi_ex_exit_interpreter(); 81 acpi_ex_relinquish_interpreter();
83 82
84 status = acpi_os_wait_semaphore(semaphore, 1, timeout); 83 status = acpi_os_wait_semaphore(semaphore, 1, timeout);
85 84
@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
89 88
90 /* Reacquire the interpreter */ 89 /* Reacquire the interpreter */
91 90
92 status2 = acpi_ex_enter_interpreter(); 91 acpi_ex_reacquire_interpreter();
93 if (ACPI_FAILURE(status2)) {
94
95 /* Report fatal error, could not acquire interpreter */
96
97 return_ACPI_STATUS(status2);
98 }
99 } 92 }
100 93
101 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
119acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) 112acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
120{ 113{
121 acpi_status status; 114 acpi_status status;
122 acpi_status status2;
123 115
124 ACPI_FUNCTION_TRACE(ex_system_wait_mutex); 116 ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
125 117
@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
132 124
133 /* We must wait, so unlock the interpreter */ 125 /* We must wait, so unlock the interpreter */
134 126
135 acpi_ex_exit_interpreter(); 127 acpi_ex_relinquish_interpreter();
136 128
137 status = acpi_os_acquire_mutex(mutex, timeout); 129 status = acpi_os_acquire_mutex(mutex, timeout);
138 130
@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
142 134
143 /* Reacquire the interpreter */ 135 /* Reacquire the interpreter */
144 136
145 status2 = acpi_ex_enter_interpreter(); 137 acpi_ex_reacquire_interpreter();
146 if (ACPI_FAILURE(status2)) {
147
148 /* Report fatal error, could not acquire interpreter */
149
150 return_ACPI_STATUS(status2);
151 }
152 } 138 }
153 139
154 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
@@ -209,96 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
209 195
210acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) 196acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
211{ 197{
212 acpi_status status;
213
214 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
215 199
216 /* Since this thread will sleep, we must release the interpreter */ 200 /* Since this thread will sleep, we must release the interpreter */
217 201
218 acpi_ex_exit_interpreter(); 202 acpi_ex_relinquish_interpreter();
219 203
220 acpi_os_sleep(how_long); 204 acpi_os_sleep(how_long);
221 205
222 /* And now we must get the interpreter again */ 206 /* And now we must get the interpreter again */
223 207
224 status = acpi_ex_enter_interpreter(); 208 acpi_ex_reacquire_interpreter();
225 return (status); 209 return (AE_OK);
226}
227
228/*******************************************************************************
229 *
230 * FUNCTION: acpi_ex_system_acquire_mutex
231 *
232 * PARAMETERS: time_desc - Maximum time to wait for the mutex
233 * obj_desc - The object descriptor for this op
234 *
235 * RETURN: Status
236 *
237 * DESCRIPTION: Provides an access point to perform synchronization operations
238 * within the AML. This function will cause a lock to be generated
239 * for the Mutex pointed to by obj_desc.
240 *
241 ******************************************************************************/
242
243acpi_status
244acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc,
245 union acpi_operand_object * obj_desc)
246{
247 acpi_status status = AE_OK;
248
249 ACPI_FUNCTION_TRACE_PTR(ex_system_acquire_mutex, obj_desc);
250
251 if (!obj_desc) {
252 return_ACPI_STATUS(AE_BAD_PARAMETER);
253 }
254
255 /* Support for the _GL_ Mutex object -- go get the global lock */
256
257 if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
258 status =
259 acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
260 return_ACPI_STATUS(status);
261 }
262
263 status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
264 (u16) time_desc->integer.value);
265 return_ACPI_STATUS(status);
266}
267
268/*******************************************************************************
269 *
270 * FUNCTION: acpi_ex_system_release_mutex
271 *
272 * PARAMETERS: obj_desc - The object descriptor for this op
273 *
274 * RETURN: Status
275 *
276 * DESCRIPTION: Provides an access point to perform synchronization operations
277 * within the AML. This operation is a request to release a
278 * previously acquired Mutex. If the Mutex variable is set then
279 * it will be decremented.
280 *
281 ******************************************************************************/
282
283acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
284{
285 acpi_status status = AE_OK;
286
287 ACPI_FUNCTION_TRACE(ex_system_release_mutex);
288
289 if (!obj_desc) {
290 return_ACPI_STATUS(AE_BAD_PARAMETER);
291 }
292
293 /* Support for the _GL_ Mutex object -- release the global lock */
294
295 if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) {
296 status = acpi_ev_release_global_lock();
297 return_ACPI_STATUS(status);
298 }
299
300 acpi_os_release_mutex(obj_desc->mutex.os_mutex);
301 return_ACPI_STATUS(AE_OK);
302} 210}
303 211
304/******************************************************************************* 212/*******************************************************************************
@@ -314,7 +222,7 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc)
314 * 222 *
315 ******************************************************************************/ 223 ******************************************************************************/
316 224
317acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc) 225acpi_status acpi_ex_system_signal_event(union acpi_operand_object * obj_desc)
318{ 226{
319 acpi_status status = AE_OK; 227 acpi_status status = AE_OK;
320 228
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index 982c8b65876f..6b0aeccbb69b 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
76 * 76 *
77 * PARAMETERS: None 77 * PARAMETERS: None
78 * 78 *
79 * RETURN: Status 79 * RETURN: None
80 * 80 *
81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter 81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter
82 * the interpreter region is a fatal system error 82 * the interpreter region is a fatal system error. Used in
83 * conjunction with exit_interpreter.
83 * 84 *
84 ******************************************************************************/ 85 ******************************************************************************/
85 86
86acpi_status acpi_ex_enter_interpreter(void) 87void acpi_ex_enter_interpreter(void)
87{ 88{
88 acpi_status status; 89 acpi_status status;
89 90
@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
91 92
92 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); 93 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
93 if (ACPI_FAILURE(status)) { 94 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); 95 ACPI_ERROR((AE_INFO,
96 "Could not acquire AML Interpreter mutex"));
95 } 97 }
96 98
97 return_ACPI_STATUS(status); 99 return_VOID;
98} 100}
99 101
100/******************************************************************************* 102/*******************************************************************************
101 * 103 *
102 * FUNCTION: acpi_ex_exit_interpreter 104 * FUNCTION: acpi_ex_reacquire_interpreter
103 * 105 *
104 * PARAMETERS: None 106 * PARAMETERS: None
105 * 107 *
106 * RETURN: None 108 * RETURN: None
107 * 109 *
108 * DESCRIPTION: Exit the interpreter execution region 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with
113 * relinquish_interpreter
114 *
115 ******************************************************************************/
116
117void acpi_ex_reacquire_interpreter(void)
118{
119 ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
120
121 /*
122 * If the global serialized flag is set, do not release the interpreter,
123 * since it was not actually released by acpi_ex_relinquish_interpreter.
124 * This forces the interpreter to be single threaded.
125 */
126 if (!acpi_gbl_all_methods_serialized) {
127 acpi_ex_enter_interpreter();
128 }
129
130 return_VOID;
131}
132
133/*******************************************************************************
134 *
135 * FUNCTION: acpi_ex_exit_interpreter
136 *
137 * PARAMETERS: None
138 *
139 * RETURN: None
109 * 140 *
110 * Cases where the interpreter is unlocked: 141 * DESCRIPTION: Exit the interpreter execution region. This is the top level
111 * 1) Completion of the execution of a control method 142 * routine used to exit the interpreter when all processing has
112 * 2) Method blocked on a Sleep() AML opcode 143 * been completed.
113 * 3) Method blocked on an Acquire() AML opcode
114 * 4) Method blocked on a Wait() AML opcode
115 * 5) Method blocked to acquire the global lock
116 * 6) Method blocked to execute a serialized control method that is
117 * already executing
118 * 7) About to invoke a user-installed opregion handler
119 * 144 *
120 ******************************************************************************/ 145 ******************************************************************************/
121 146
@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
127 152
128 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); 153 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
129 if (ACPI_FAILURE(status)) { 154 if (ACPI_FAILURE(status)) {
130 ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); 155 ACPI_ERROR((AE_INFO,
156 "Could not release AML Interpreter mutex"));
157 }
158
159 return_VOID;
160}
161
162/*******************************************************************************
163 *
164 * FUNCTION: acpi_ex_relinquish_interpreter
165 *
166 * PARAMETERS: None
167 *
168 * RETURN: None
169 *
170 * DESCRIPTION: Exit the interpreter execution region, from within the
171 * interpreter - before attempting an operation that will possibly
172 * block the running thread.
173 *
174 * Cases where the interpreter is unlocked internally
175 * 1) Method to be blocked on a Sleep() AML opcode
176 * 2) Method to be blocked on an Acquire() AML opcode
177 * 3) Method to be blocked on a Wait() AML opcode
178 * 4) Method to be blocked to acquire the global lock
179 * 5) Method to be blocked waiting to execute a serialized control method
180 * that is currently executing
181 * 6) About to invoke a user-installed opregion handler
182 *
183 ******************************************************************************/
184
185void acpi_ex_relinquish_interpreter(void)
186{
187 ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
188
189 /*
190 * If the global serialized flag is set, do not release the interpreter.
191 * This forces the interpreter to be single threaded.
192 */
193 if (!acpi_gbl_all_methods_serialized) {
194 acpi_ex_exit_interpreter();
131 } 195 }
132 196
133 return_VOID; 197 return_VOID;
@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
141 * 205 *
142 * RETURN: none 206 * RETURN: none
143 * 207 *
144 * DESCRIPTION: Truncate a number to 32-bits if the currently executing method 208 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
145 * belongs to a 32-bit ACPI table. 209 * 32-bit, as determined by the revision of the DSDT.
146 * 210 *
147 ******************************************************************************/ 211 ******************************************************************************/
148 212
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f305a826ca2d..af22fdf73413 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -48,8 +48,8 @@ MODULE_LICENSE("GPL");
48 48
49static int acpi_fan_add(struct acpi_device *device); 49static int acpi_fan_add(struct acpi_device *device);
50static int acpi_fan_remove(struct acpi_device *device, int type); 50static int acpi_fan_remove(struct acpi_device *device, int type);
51static int acpi_fan_suspend(struct acpi_device *device, int state); 51static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
52static int acpi_fan_resume(struct acpi_device *device, int state); 52static int acpi_fan_resume(struct acpi_device *device);
53 53
54static struct acpi_driver acpi_fan_driver = { 54static struct acpi_driver acpi_fan_driver = {
55 .name = ACPI_FAN_DRIVER_NAME, 55 .name = ACPI_FAN_DRIVER_NAME,
@@ -237,7 +237,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
237 return 0; 237 return 0;
238} 238}
239 239
240static int acpi_fan_suspend(struct acpi_device *device, int state) 240static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
241{ 241{
242 if (!device) 242 if (!device)
243 return -EINVAL; 243 return -EINVAL;
@@ -247,7 +247,7 @@ static int acpi_fan_suspend(struct acpi_device *device, int state)
247 return AE_OK; 247 return AE_OK;
248} 248}
249 249
250static int acpi_fan_resume(struct acpi_device *device, int state) 250static int acpi_fan_resume(struct acpi_device *device)
251{ 251{
252 int result = 0; 252 int result = 0;
253 int power_state = 0; 253 int power_state = 0;
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 8a0324b43e53..7b6c9ff9bebe 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -86,129 +86,6 @@ static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86 return ret; 86 return ret;
87} 87}
88 88
89/* Get PCI root bridge's handle from its segment and bus number */
90struct acpi_find_pci_root {
91 unsigned int seg;
92 unsigned int bus;
93 acpi_handle handle;
94};
95
96static acpi_status
97do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
98{
99 unsigned long *busnr = data;
100 struct acpi_resource_address64 address;
101
102 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
103 resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 &&
104 resource->type != ACPI_RESOURCE_TYPE_ADDRESS64)
105 return AE_OK;
106
107 acpi_resource_to_address64(resource, &address);
108 if ((address.address_length > 0) &&
109 (address.resource_type == ACPI_BUS_NUMBER_RANGE))
110 *busnr = address.minimum;
111
112 return AE_OK;
113}
114
115static int get_root_bridge_busnr(acpi_handle handle)
116{
117 acpi_status status;
118 unsigned long bus, bbn;
119 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
120
121 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
122
123 status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL,
124 &bbn);
125 if (status == AE_NOT_FOUND) {
126 /* Assume bus = 0 */
127 printk(KERN_INFO PREFIX
128 "Assume root bridge [%s] bus is 0\n",
129 (char *)buffer.pointer);
130 status = AE_OK;
131 bbn = 0;
132 }
133 if (ACPI_FAILURE(status)) {
134 bbn = -ENODEV;
135 goto exit;
136 }
137 if (bbn > 0)
138 goto exit;
139
140 /* _BBN in some systems return 0 for all root bridges */
141 bus = -1;
142 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
143 do_root_bridge_busnr_callback, &bus);
144 /* If _CRS failed, we just use _BBN */
145 if (ACPI_FAILURE(status) || (bus == -1))
146 goto exit;
147 /* We select _CRS */
148 if (bbn != bus) {
149 printk(KERN_INFO PREFIX
150 "_BBN and _CRS returns different value for %s. Select _CRS\n",
151 (char *)buffer.pointer);
152 bbn = bus;
153 }
154 exit:
155 kfree(buffer.pointer);
156 return (int)bbn;
157}
158
159static acpi_status
160find_pci_rootbridge(acpi_handle handle, u32 lvl, void *context, void **rv)
161{
162 struct acpi_find_pci_root *find = (struct acpi_find_pci_root *)context;
163 unsigned long seg, bus;
164 acpi_status status;
165 int tmp;
166 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
167
168 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
169
170 status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL, &seg);
171 if (status == AE_NOT_FOUND) {
172 /* Assume seg = 0 */
173 status = AE_OK;
174 seg = 0;
175 }
176 if (ACPI_FAILURE(status)) {
177 status = AE_CTRL_DEPTH;
178 goto exit;
179 }
180
181 tmp = get_root_bridge_busnr(handle);
182 if (tmp < 0) {
183 printk(KERN_ERR PREFIX
184 "Find root bridge failed for %s\n",
185 (char *)buffer.pointer);
186 status = AE_CTRL_DEPTH;
187 goto exit;
188 }
189 bus = tmp;
190
191 if (seg == find->seg && bus == find->bus)
192 {
193 find->handle = handle;
194 status = AE_CTRL_TERMINATE;
195 }
196 else
197 status = AE_OK;
198 exit:
199 kfree(buffer.pointer);
200 return status;
201}
202
203acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
204{
205 struct acpi_find_pci_root find = { seg, bus, NULL };
206
207 acpi_get_devices(PCI_ROOT_HID_STRING, find_pci_rootbridge, &find, NULL);
208 return find.handle;
209}
210EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
211
212/* Get device's handler per its address under its parent */ 89/* Get device's handler per its address under its parent */
213struct acpi_find_child { 90struct acpi_find_child {
214 acpi_handle handle; 91 acpi_handle handle;
diff --git a/drivers/acpi/hardware/hwacpi.c b/drivers/acpi/hardware/hwacpi.c
index de50fab2a910..6031ca13dd2f 100644
--- a/drivers/acpi/hardware/hwacpi.c
+++ b/drivers/acpi/hardware/hwacpi.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,41 +49,6 @@ ACPI_MODULE_NAME("hwacpi")
49 49
50/****************************************************************************** 50/******************************************************************************
51 * 51 *
52 * FUNCTION: acpi_hw_initialize
53 *
54 * PARAMETERS: None
55 *
56 * RETURN: Status
57 *
58 * DESCRIPTION: Initialize and validate the various ACPI registers defined in
59 * the FADT.
60 *
61 ******************************************************************************/
62acpi_status acpi_hw_initialize(void)
63{
64 acpi_status status;
65
66 ACPI_FUNCTION_TRACE(hw_initialize);
67
68 /* We must have the ACPI tables by the time we get here */
69
70 if (!acpi_gbl_FADT) {
71 ACPI_ERROR((AE_INFO, "No FADT is present"));
72 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
73 }
74
75 /* Sanity check the FADT for valid values */
76
77 status = acpi_ut_validate_fadt();
78 if (ACPI_FAILURE(status)) {
79 return_ACPI_STATUS(status);
80 }
81
82 return_ACPI_STATUS(AE_OK);
83}
84
85/******************************************************************************
86 *
87 * FUNCTION: acpi_hw_set_mode 52 * FUNCTION: acpi_hw_set_mode
88 * 53 *
89 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY 54 * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
@@ -93,7 +58,6 @@ acpi_status acpi_hw_initialize(void)
93 * DESCRIPTION: Transitions the system into the requested mode. 58 * DESCRIPTION: Transitions the system into the requested mode.
94 * 59 *
95 ******************************************************************************/ 60 ******************************************************************************/
96
97acpi_status acpi_hw_set_mode(u32 mode) 61acpi_status acpi_hw_set_mode(u32 mode)
98{ 62{
99 63
@@ -106,7 +70,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
106 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 70 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
107 * system does not support mode transition. 71 * system does not support mode transition.
108 */ 72 */
109 if (!acpi_gbl_FADT->smi_cmd) { 73 if (!acpi_gbl_FADT.smi_command) {
110 ACPI_ERROR((AE_INFO, 74 ACPI_ERROR((AE_INFO,
111 "No SMI_CMD in FADT, mode transition failed")); 75 "No SMI_CMD in FADT, mode transition failed"));
112 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); 76 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
@@ -119,7 +83,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
119 * we make sure both the numbers are zero to determine these 83 * we make sure both the numbers are zero to determine these
120 * transitions are not supported. 84 * transitions are not supported.
121 */ 85 */
122 if (!acpi_gbl_FADT->acpi_enable && !acpi_gbl_FADT->acpi_disable) { 86 if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) {
123 ACPI_ERROR((AE_INFO, 87 ACPI_ERROR((AE_INFO,
124 "No ACPI mode transition supported in this system (enable/disable both zero)")); 88 "No ACPI mode transition supported in this system (enable/disable both zero)"));
125 return_ACPI_STATUS(AE_OK); 89 return_ACPI_STATUS(AE_OK);
@@ -130,9 +94,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
130 94
131 /* BIOS should have disabled ALL fixed and GP events */ 95 /* BIOS should have disabled ALL fixed and GP events */
132 96
133 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 97 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
134 (u32) acpi_gbl_FADT->acpi_enable, 98 (u32) acpi_gbl_FADT.acpi_enable, 8);
135 8);
136 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 99 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
137 "Attempting to enable ACPI mode\n")); 100 "Attempting to enable ACPI mode\n"));
138 break; 101 break;
@@ -143,8 +106,8 @@ acpi_status acpi_hw_set_mode(u32 mode)
143 * BIOS should clear all fixed status bits and restore fixed event 106 * BIOS should clear all fixed status bits and restore fixed event
144 * enable bits to default 107 * enable bits to default
145 */ 108 */
146 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 109 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
147 (u32) acpi_gbl_FADT->acpi_disable, 110 (u32) acpi_gbl_FADT.acpi_disable,
148 8); 111 8);
149 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 112 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
150 "Attempting to enable Legacy (non-ACPI) mode\n")); 113 "Attempting to enable Legacy (non-ACPI) mode\n"));
@@ -204,12 +167,11 @@ u32 acpi_hw_get_mode(void)
204 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, 167 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
205 * system does not support mode transition. 168 * system does not support mode transition.
206 */ 169 */
207 if (!acpi_gbl_FADT->smi_cmd) { 170 if (!acpi_gbl_FADT.smi_command) {
208 return_UINT32(ACPI_SYS_MODE_ACPI); 171 return_UINT32(ACPI_SYS_MODE_ACPI);
209 } 172 }
210 173
211 status = 174 status = acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value);
212 acpi_get_register(ACPI_BITREG_SCI_ENABLE, &value, ACPI_MTX_LOCK);
213 if (ACPI_FAILURE(status)) { 175 if (ACPI_FAILURE(status)) {
214 return_UINT32(ACPI_SYS_MODE_LEGACY); 176 return_UINT32(ACPI_SYS_MODE_LEGACY);
215 } 177 }
diff --git a/drivers/acpi/hardware/hwgpe.c b/drivers/acpi/hardware/hwgpe.c
index 608a3a60ee11..117a05cadaaa 100644
--- a/drivers/acpi/hardware/hwgpe.c
+++ b/drivers/acpi/hardware/hwgpe.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -105,14 +105,20 @@ acpi_hw_write_gpe_enable_reg(struct acpi_gpe_event_info *gpe_event_info)
105acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) 105acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
106{ 106{
107 acpi_status status; 107 acpi_status status;
108 u8 register_bit;
108 109
109 ACPI_FUNCTION_ENTRY(); 110 ACPI_FUNCTION_ENTRY();
110 111
112 register_bit = (u8)
113 (1 <<
114 (gpe_event_info->gpe_number -
115 gpe_event_info->register_info->base_gpe_number));
116
111 /* 117 /*
112 * Write a one to the appropriate bit in the status register to 118 * Write a one to the appropriate bit in the status register to
113 * clear this GPE. 119 * clear this GPE.
114 */ 120 */
115 status = acpi_hw_low_level_write(8, gpe_event_info->register_bit, 121 status = acpi_hw_low_level_write(8, register_bit,
116 &gpe_event_info->register_info-> 122 &gpe_event_info->register_info->
117 status_address); 123 status_address);
118 124
@@ -155,7 +161,10 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
155 161
156 /* Get the register bitmask for this GPE */ 162 /* Get the register bitmask for this GPE */
157 163
158 register_bit = gpe_event_info->register_bit; 164 register_bit = (u8)
165 (1 <<
166 (gpe_event_info->gpe_number -
167 gpe_event_info->register_info->base_gpe_number));
159 168
160 /* GPE currently enabled? (enabled for runtime?) */ 169 /* GPE currently enabled? (enabled for runtime?) */
161 170
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c
index fa58c1edce1e..1d371fa663f2 100644
--- a/drivers/acpi/hardware/hwregs.c
+++ b/drivers/acpi/hardware/hwregs.c
@@ -7,7 +7,7 @@
7 ******************************************************************************/ 7 ******************************************************************************/
8 8
9/* 9/*
10 * Copyright (C) 2000 - 2006, R. Byron Moore 10 * Copyright (C) 2000 - 2007, R. Byron Moore
11 * All rights reserved. 11 * All rights reserved.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
@@ -54,17 +54,15 @@ ACPI_MODULE_NAME("hwregs")
54 * 54 *
55 * FUNCTION: acpi_hw_clear_acpi_status 55 * FUNCTION: acpi_hw_clear_acpi_status
56 * 56 *
57 * PARAMETERS: Flags - Lock the hardware or not 57 * PARAMETERS: None
58 * 58 *
59 * RETURN: none 59 * RETURN: None
60 * 60 *
61 * DESCRIPTION: Clears all fixed and general purpose status bits 61 * DESCRIPTION: Clears all fixed and general purpose status bits
62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 62 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
63 * 63 *
64 * NOTE: TBD: Flags parameter is obsolete, to be removed
65 *
66 ******************************************************************************/ 64 ******************************************************************************/
67acpi_status acpi_hw_clear_acpi_status(u32 flags) 65acpi_status acpi_hw_clear_acpi_status(void)
68{ 66{
69 acpi_status status; 67 acpi_status status;
70 acpi_cpu_flags lock_flags = 0; 68 acpi_cpu_flags lock_flags = 0;
@@ -73,7 +71,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
73 71
74 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n", 72 ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %04X\n",
75 ACPI_BITMASK_ALL_FIXED_STATUS, 73 ACPI_BITMASK_ALL_FIXED_STATUS,
76 (u16) acpi_gbl_FADT->xpm1a_evt_blk.address)); 74 (u16) acpi_gbl_FADT.xpm1a_event_block.address));
77 75
78 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); 76 lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
79 77
@@ -86,10 +84,10 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
86 84
87 /* Clear the fixed events */ 85 /* Clear the fixed events */
88 86
89 if (acpi_gbl_FADT->xpm1b_evt_blk.address) { 87 if (acpi_gbl_FADT.xpm1b_event_block.address) {
90 status = 88 status =
91 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS, 89 acpi_hw_low_level_write(16, ACPI_BITMASK_ALL_FIXED_STATUS,
92 &acpi_gbl_FADT->xpm1b_evt_blk); 90 &acpi_gbl_FADT.xpm1b_event_block);
93 if (ACPI_FAILURE(status)) { 91 if (ACPI_FAILURE(status)) {
94 goto unlock_and_exit; 92 goto unlock_and_exit;
95 } 93 }
@@ -253,18 +251,15 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
253 * 251 *
254 * PARAMETERS: register_id - ID of ACPI bit_register to access 252 * PARAMETERS: register_id - ID of ACPI bit_register to access
255 * return_value - Value that was read from the register 253 * return_value - Value that was read from the register
256 * Flags - Lock the hardware or not
257 * 254 *
258 * RETURN: Status and the value read from specified Register. Value 255 * RETURN: Status and the value read from specified Register. Value
259 * returned is normalized to bit0 (is shifted all the way right) 256 * returned is normalized to bit0 (is shifted all the way right)
260 * 257 *
261 * DESCRIPTION: ACPI bit_register read function. 258 * DESCRIPTION: ACPI bit_register read function.
262 * 259 *
263 * NOTE: TBD: Flags parameter is obsolete, to be removed
264 *
265 ******************************************************************************/ 260 ******************************************************************************/
266 261
267acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags) 262acpi_status acpi_get_register(u32 register_id, u32 * return_value)
268{ 263{
269 u32 register_value = 0; 264 u32 register_value = 0;
270 struct acpi_bit_register_info *bit_reg_info; 265 struct acpi_bit_register_info *bit_reg_info;
@@ -312,16 +307,13 @@ ACPI_EXPORT_SYMBOL(acpi_get_register)
312 * PARAMETERS: register_id - ID of ACPI bit_register to access 307 * PARAMETERS: register_id - ID of ACPI bit_register to access
313 * Value - (only used on write) value to write to the 308 * Value - (only used on write) value to write to the
314 * Register, NOT pre-normalized to the bit pos 309 * Register, NOT pre-normalized to the bit pos
315 * Flags - Lock the hardware or not
316 * 310 *
317 * RETURN: Status 311 * RETURN: Status
318 * 312 *
319 * DESCRIPTION: ACPI Bit Register write function. 313 * DESCRIPTION: ACPI Bit Register write function.
320 * 314 *
321 * NOTE: TBD: Flags parameter is obsolete, to be removed
322 *
323 ******************************************************************************/ 315 ******************************************************************************/
324acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) 316acpi_status acpi_set_register(u32 register_id, u32 value)
325{ 317{
326 u32 register_value = 0; 318 u32 register_value = 0;
327 struct acpi_bit_register_info *bit_reg_info; 319 struct acpi_bit_register_info *bit_reg_info;
@@ -422,8 +414,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
422 ACPI_DEBUG_PRINT((ACPI_DB_IO, 414 ACPI_DEBUG_PRINT((ACPI_DB_IO,
423 "PM2 control: Read %X from %8.8X%8.8X\n", 415 "PM2 control: Read %X from %8.8X%8.8X\n",
424 register_value, 416 register_value,
425 ACPI_FORMAT_UINT64(acpi_gbl_FADT-> 417 ACPI_FORMAT_UINT64(acpi_gbl_FADT.
426 xpm2_cnt_blk.address))); 418 xpm2_control_block.
419 address)));
427 420
428 ACPI_REGISTER_INSERT_VALUE(register_value, 421 ACPI_REGISTER_INSERT_VALUE(register_value,
429 bit_reg_info->bit_position, 422 bit_reg_info->bit_position,
@@ -433,8 +426,9 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
433 ACPI_DEBUG_PRINT((ACPI_DB_IO, 426 ACPI_DEBUG_PRINT((ACPI_DB_IO,
434 "About to write %4.4X to %8.8X%8.8X\n", 427 "About to write %4.4X to %8.8X%8.8X\n",
435 register_value, 428 register_value,
436 ACPI_FORMAT_UINT64(acpi_gbl_FADT-> 429 ACPI_FORMAT_UINT64(acpi_gbl_FADT.
437 xpm2_cnt_blk.address))); 430 xpm2_control_block.
431 address)));
438 432
439 status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, 433 status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
440 ACPI_REGISTER_PM2_CONTROL, 434 ACPI_REGISTER_PM2_CONTROL,
@@ -495,7 +489,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
495 489
496 status = 490 status =
497 acpi_hw_low_level_read(16, &value1, 491 acpi_hw_low_level_read(16, &value1,
498 &acpi_gbl_FADT->xpm1a_evt_blk); 492 &acpi_gbl_FADT.xpm1a_event_block);
499 if (ACPI_FAILURE(status)) { 493 if (ACPI_FAILURE(status)) {
500 goto unlock_and_exit; 494 goto unlock_and_exit;
501 } 495 }
@@ -504,7 +498,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
504 498
505 status = 499 status =
506 acpi_hw_low_level_read(16, &value2, 500 acpi_hw_low_level_read(16, &value2,
507 &acpi_gbl_FADT->xpm1b_evt_blk); 501 &acpi_gbl_FADT.xpm1b_event_block);
508 value1 |= value2; 502 value1 |= value2;
509 break; 503 break;
510 504
@@ -527,14 +521,14 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
527 521
528 status = 522 status =
529 acpi_hw_low_level_read(16, &value1, 523 acpi_hw_low_level_read(16, &value1,
530 &acpi_gbl_FADT->xpm1a_cnt_blk); 524 &acpi_gbl_FADT.xpm1a_control_block);
531 if (ACPI_FAILURE(status)) { 525 if (ACPI_FAILURE(status)) {
532 goto unlock_and_exit; 526 goto unlock_and_exit;
533 } 527 }
534 528
535 status = 529 status =
536 acpi_hw_low_level_read(16, &value2, 530 acpi_hw_low_level_read(16, &value2,
537 &acpi_gbl_FADT->xpm1b_cnt_blk); 531 &acpi_gbl_FADT.xpm1b_control_block);
538 value1 |= value2; 532 value1 |= value2;
539 break; 533 break;
540 534
@@ -542,19 +536,20 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
542 536
543 status = 537 status =
544 acpi_hw_low_level_read(8, &value1, 538 acpi_hw_low_level_read(8, &value1,
545 &acpi_gbl_FADT->xpm2_cnt_blk); 539 &acpi_gbl_FADT.xpm2_control_block);
546 break; 540 break;
547 541
548 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ 542 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
549 543
550 status = 544 status =
551 acpi_hw_low_level_read(32, &value1, 545 acpi_hw_low_level_read(32, &value1,
552 &acpi_gbl_FADT->xpm_tmr_blk); 546 &acpi_gbl_FADT.xpm_timer_block);
553 break; 547 break;
554 548
555 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 549 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
556 550
557 status = acpi_os_read_port(acpi_gbl_FADT->smi_cmd, &value1, 8); 551 status =
552 acpi_os_read_port(acpi_gbl_FADT.smi_command, &value1, 8);
558 break; 553 break;
559 554
560 default: 555 default:
@@ -635,7 +630,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
635 630
636 status = 631 status =
637 acpi_hw_low_level_write(16, value, 632 acpi_hw_low_level_write(16, value,
638 &acpi_gbl_FADT->xpm1a_evt_blk); 633 &acpi_gbl_FADT.xpm1a_event_block);
639 if (ACPI_FAILURE(status)) { 634 if (ACPI_FAILURE(status)) {
640 goto unlock_and_exit; 635 goto unlock_and_exit;
641 } 636 }
@@ -644,7 +639,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
644 639
645 status = 640 status =
646 acpi_hw_low_level_write(16, value, 641 acpi_hw_low_level_write(16, value,
647 &acpi_gbl_FADT->xpm1b_evt_blk); 642 &acpi_gbl_FADT.xpm1b_event_block);
648 break; 643 break;
649 644
650 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */ 645 case ACPI_REGISTER_PM1_ENABLE: /* 16-bit access */
@@ -682,49 +677,50 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
682 677
683 status = 678 status =
684 acpi_hw_low_level_write(16, value, 679 acpi_hw_low_level_write(16, value,
685 &acpi_gbl_FADT->xpm1a_cnt_blk); 680 &acpi_gbl_FADT.xpm1a_control_block);
686 if (ACPI_FAILURE(status)) { 681 if (ACPI_FAILURE(status)) {
687 goto unlock_and_exit; 682 goto unlock_and_exit;
688 } 683 }
689 684
690 status = 685 status =
691 acpi_hw_low_level_write(16, value, 686 acpi_hw_low_level_write(16, value,
692 &acpi_gbl_FADT->xpm1b_cnt_blk); 687 &acpi_gbl_FADT.xpm1b_control_block);
693 break; 688 break;
694 689
695 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */ 690 case ACPI_REGISTER_PM1A_CONTROL: /* 16-bit access */
696 691
697 status = 692 status =
698 acpi_hw_low_level_write(16, value, 693 acpi_hw_low_level_write(16, value,
699 &acpi_gbl_FADT->xpm1a_cnt_blk); 694 &acpi_gbl_FADT.xpm1a_control_block);
700 break; 695 break;
701 696
702 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */ 697 case ACPI_REGISTER_PM1B_CONTROL: /* 16-bit access */
703 698
704 status = 699 status =
705 acpi_hw_low_level_write(16, value, 700 acpi_hw_low_level_write(16, value,
706 &acpi_gbl_FADT->xpm1b_cnt_blk); 701 &acpi_gbl_FADT.xpm1b_control_block);
707 break; 702 break;
708 703
709 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */ 704 case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
710 705
711 status = 706 status =
712 acpi_hw_low_level_write(8, value, 707 acpi_hw_low_level_write(8, value,
713 &acpi_gbl_FADT->xpm2_cnt_blk); 708 &acpi_gbl_FADT.xpm2_control_block);
714 break; 709 break;
715 710
716 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */ 711 case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
717 712
718 status = 713 status =
719 acpi_hw_low_level_write(32, value, 714 acpi_hw_low_level_write(32, value,
720 &acpi_gbl_FADT->xpm_tmr_blk); 715 &acpi_gbl_FADT.xpm_timer_block);
721 break; 716 break;
722 717
723 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */ 718 case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
724 719
725 /* SMI_CMD is currently always in IO space */ 720 /* SMI_CMD is currently always in IO space */
726 721
727 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, value, 8); 722 status =
723 acpi_os_write_port(acpi_gbl_FADT.smi_command, value, 8);
728 break; 724 break;
729 725
730 default: 726 default:
@@ -783,7 +779,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
783 * Two address spaces supported: Memory or IO. 779 * Two address spaces supported: Memory or IO.
784 * PCI_Config is not supported here because the GAS struct is insufficient 780 * PCI_Config is not supported here because the GAS struct is insufficient
785 */ 781 */
786 switch (reg->address_space_id) { 782 switch (reg->space_id) {
787 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 783 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
788 784
789 status = acpi_os_read_memory((acpi_physical_address) address, 785 status = acpi_os_read_memory((acpi_physical_address) address,
@@ -792,22 +788,20 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg)
792 788
793 case ACPI_ADR_SPACE_SYSTEM_IO: 789 case ACPI_ADR_SPACE_SYSTEM_IO:
794 790
795 status = acpi_os_read_port((acpi_io_address) address, 791 status =
796 value, width); 792 acpi_os_read_port((acpi_io_address) address, value, width);
797 break; 793 break;
798 794
799 default: 795 default:
800 ACPI_ERROR((AE_INFO, 796 ACPI_ERROR((AE_INFO,
801 "Unsupported address space: %X", 797 "Unsupported address space: %X", reg->space_id));
802 reg->address_space_id));
803 return (AE_BAD_PARAMETER); 798 return (AE_BAD_PARAMETER);
804 } 799 }
805 800
806 ACPI_DEBUG_PRINT((ACPI_DB_IO, 801 ACPI_DEBUG_PRINT((ACPI_DB_IO,
807 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n", 802 "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
808 *value, width, 803 *value, width, ACPI_FORMAT_UINT64(address),
809 ACPI_FORMAT_UINT64(address), 804 acpi_ut_get_region_name(reg->space_id)));
810 acpi_ut_get_region_name(reg->address_space_id)));
811 805
812 return (status); 806 return (status);
813} 807}
@@ -854,7 +848,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
854 * Two address spaces supported: Memory or IO. 848 * Two address spaces supported: Memory or IO.
855 * PCI_Config is not supported here because the GAS struct is insufficient 849 * PCI_Config is not supported here because the GAS struct is insufficient
856 */ 850 */
857 switch (reg->address_space_id) { 851 switch (reg->space_id) {
858 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 852 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
859 853
860 status = acpi_os_write_memory((acpi_physical_address) address, 854 status = acpi_os_write_memory((acpi_physical_address) address,
@@ -863,22 +857,20 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg)
863 857
864 case ACPI_ADR_SPACE_SYSTEM_IO: 858 case ACPI_ADR_SPACE_SYSTEM_IO:
865 859
866 status = acpi_os_write_port((acpi_io_address) address, 860 status = acpi_os_write_port((acpi_io_address) address, value,
867 value, width); 861 width);
868 break; 862 break;
869 863
870 default: 864 default:
871 ACPI_ERROR((AE_INFO, 865 ACPI_ERROR((AE_INFO,
872 "Unsupported address space: %X", 866 "Unsupported address space: %X", reg->space_id));
873 reg->address_space_id));
874 return (AE_BAD_PARAMETER); 867 return (AE_BAD_PARAMETER);
875 } 868 }
876 869
877 ACPI_DEBUG_PRINT((ACPI_DB_IO, 870 ACPI_DEBUG_PRINT((ACPI_DB_IO,
878 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", 871 "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
879 value, width, 872 value, width, ACPI_FORMAT_UINT64(address),
880 ACPI_FORMAT_UINT64(address), 873 acpi_ut_get_region_name(reg->space_id)));
881 acpi_ut_get_region_name(reg->address_space_id)));
882 874
883 return (status); 875 return (status);
884} 876}
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 8bb43cae60c2..57901ca3ade9 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -43,6 +43,7 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <acpi/actables.h>
46 47
47#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
48ACPI_MODULE_NAME("hwsleep") 49ACPI_MODULE_NAME("hwsleep")
@@ -62,17 +63,32 @@ ACPI_MODULE_NAME("hwsleep")
62acpi_status 63acpi_status
63acpi_set_firmware_waking_vector(acpi_physical_address physical_address) 64acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
64{ 65{
66 struct acpi_table_facs *facs;
67 acpi_status status;
65 68
66 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector); 69 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
67 70
71 /* Get the FACS */
72
73 status =
74 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
75 (struct acpi_table_header **)&facs);
76 if (ACPI_FAILURE(status)) {
77 return_ACPI_STATUS(status);
78 }
79
68 /* Set the vector */ 80 /* Set the vector */
69 81
70 if (acpi_gbl_common_fACS.vector_width == 32) { 82 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
71 *(ACPI_CAST_PTR 83 /*
72 (u32, acpi_gbl_common_fACS.firmware_waking_vector)) 84 * ACPI 1.0 FACS or short table or optional X_ field is zero
73 = (u32) physical_address; 85 */
86 facs->firmware_waking_vector = (u32) physical_address;
74 } else { 87 } else {
75 *acpi_gbl_common_fACS.firmware_waking_vector = physical_address; 88 /*
89 * ACPI 2.0 FACS with valid X_ field
90 */
91 facs->xfirmware_waking_vector = physical_address;
76 } 92 }
77 93
78 return_ACPI_STATUS(AE_OK); 94 return_ACPI_STATUS(AE_OK);
@@ -97,6 +113,8 @@ ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
97acpi_status 113acpi_status
98acpi_get_firmware_waking_vector(acpi_physical_address * physical_address) 114acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
99{ 115{
116 struct acpi_table_facs *facs;
117 acpi_status status;
100 118
101 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector); 119 ACPI_FUNCTION_TRACE(acpi_get_firmware_waking_vector);
102 120
@@ -104,16 +122,29 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
104 return_ACPI_STATUS(AE_BAD_PARAMETER); 122 return_ACPI_STATUS(AE_BAD_PARAMETER);
105 } 123 }
106 124
125 /* Get the FACS */
126
127 status =
128 acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
129 (struct acpi_table_header **)&facs);
130 if (ACPI_FAILURE(status)) {
131 return_ACPI_STATUS(status);
132 }
133
107 /* Get the vector */ 134 /* Get the vector */
108 135
109 if (acpi_gbl_common_fACS.vector_width == 32) { 136 if ((facs->length < 32) || (!(facs->xfirmware_waking_vector))) {
110 *physical_address = (acpi_physical_address) 137 /*
111 * 138 * ACPI 1.0 FACS or short table or optional X_ field is zero
112 (ACPI_CAST_PTR 139 */
113 (u32, acpi_gbl_common_fACS.firmware_waking_vector)); 140 *physical_address =
141 (acpi_physical_address) facs->firmware_waking_vector;
114 } else { 142 } else {
143 /*
144 * ACPI 2.0 FACS with valid X_ field
145 */
115 *physical_address = 146 *physical_address =
116 *acpi_gbl_common_fACS.firmware_waking_vector; 147 (acpi_physical_address) facs->xfirmware_waking_vector;
117 } 148 }
118 149
119 return_ACPI_STATUS(AE_OK); 150 return_ACPI_STATUS(AE_OK);
@@ -246,15 +277,14 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
246 277
247 /* Clear wake status */ 278 /* Clear wake status */
248 279
249 status = 280 status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
250 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
251 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
252 return_ACPI_STATUS(status); 282 return_ACPI_STATUS(status);
253 } 283 }
254 284
255 /* Clear all fixed and general purpose status bits */ 285 /* Clear all fixed and general purpose status bits */
256 286
257 status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK); 287 status = acpi_hw_clear_acpi_status();
258 if (ACPI_FAILURE(status)) { 288 if (ACPI_FAILURE(status)) {
259 return_ACPI_STATUS(status); 289 return_ACPI_STATUS(status);
260 } 290 }
@@ -367,8 +397,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
367 /* Wait until we enter sleep state */ 397 /* Wait until we enter sleep state */
368 398
369 do { 399 do {
370 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, 400 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
371 ACPI_MTX_DO_NOT_LOCK);
372 if (ACPI_FAILURE(status)) { 401 if (ACPI_FAILURE(status)) {
373 return_ACPI_STATUS(status); 402 return_ACPI_STATUS(status);
374 } 403 }
@@ -401,13 +430,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
401 430
402 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios); 431 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
403 432
404 status = 433 status = acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1);
405 acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
406 if (ACPI_FAILURE(status)) { 434 if (ACPI_FAILURE(status)) {
407 return_ACPI_STATUS(status); 435 return_ACPI_STATUS(status);
408 } 436 }
409 437
410 status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK); 438 status = acpi_hw_clear_acpi_status();
411 if (ACPI_FAILURE(status)) { 439 if (ACPI_FAILURE(status)) {
412 return_ACPI_STATUS(status); 440 return_ACPI_STATUS(status);
413 } 441 }
@@ -429,13 +457,12 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
429 457
430 ACPI_FLUSH_CPU_CACHE(); 458 ACPI_FLUSH_CPU_CACHE();
431 459
432 status = acpi_os_write_port(acpi_gbl_FADT->smi_cmd, 460 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
433 (u32) acpi_gbl_FADT->S4bios_req, 8); 461 (u32) acpi_gbl_FADT.S4bios_request, 8);
434 462
435 do { 463 do {
436 acpi_os_stall(1000); 464 acpi_os_stall(1000);
437 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value, 465 status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
438 ACPI_MTX_DO_NOT_LOCK);
439 if (ACPI_FAILURE(status)) { 466 if (ACPI_FAILURE(status)) {
440 return_ACPI_STATUS(status); 467 return_ACPI_STATUS(status);
441 } 468 }
@@ -568,13 +595,11 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
568 595
569 (void) 596 (void)
570 acpi_set_register(acpi_gbl_fixed_event_info 597 acpi_set_register(acpi_gbl_fixed_event_info
571 [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1, 598 [ACPI_EVENT_POWER_BUTTON].enable_register_id, 1);
572 ACPI_MTX_DO_NOT_LOCK);
573 599
574 (void) 600 (void)
575 acpi_set_register(acpi_gbl_fixed_event_info 601 acpi_set_register(acpi_gbl_fixed_event_info
576 [ACPI_EVENT_POWER_BUTTON].status_register_id, 1, 602 [ACPI_EVENT_POWER_BUTTON].status_register_id, 1);
577 ACPI_MTX_DO_NOT_LOCK);
578 603
579 arg.integer.value = ACPI_SST_WORKING; 604 arg.integer.value = ACPI_SST_WORKING;
580 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL); 605 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
diff --git a/drivers/acpi/hardware/hwtimer.c b/drivers/acpi/hardware/hwtimer.c
index c4ec47c939fd..c32eab696acd 100644
--- a/drivers/acpi/hardware/hwtimer.c
+++ b/drivers/acpi/hardware/hwtimer.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@ acpi_status acpi_get_timer_resolution(u32 * resolution)
66 return_ACPI_STATUS(AE_BAD_PARAMETER); 66 return_ACPI_STATUS(AE_BAD_PARAMETER);
67 } 67 }
68 68
69 if (acpi_gbl_FADT->tmr_val_ext == 0) { 69 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
70 *resolution = 24; 70 *resolution = 24;
71 } else { 71 } else {
72 *resolution = 32; 72 *resolution = 32;
@@ -98,7 +98,8 @@ acpi_status acpi_get_timer(u32 * ticks)
98 return_ACPI_STATUS(AE_BAD_PARAMETER); 98 return_ACPI_STATUS(AE_BAD_PARAMETER);
99 } 99 }
100 100
101 status = acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT->xpm_tmr_blk); 101 status =
102 acpi_hw_low_level_read(32, ticks, &acpi_gbl_FADT.xpm_timer_block);
102 103
103 return_ACPI_STATUS(status); 104 return_ACPI_STATUS(status);
104} 105}
@@ -153,7 +154,7 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
153 if (start_ticks < end_ticks) { 154 if (start_ticks < end_ticks) {
154 delta_ticks = end_ticks - start_ticks; 155 delta_ticks = end_ticks - start_ticks;
155 } else if (start_ticks > end_ticks) { 156 } else if (start_ticks > end_ticks) {
156 if (acpi_gbl_FADT->tmr_val_ext == 0) { 157 if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
157 158
158 /* 24-bit Timer */ 159 /* 24-bit Timer */
159 160
diff --git a/drivers/acpi/motherboard.c b/drivers/acpi/motherboard.c
deleted file mode 100644
index 2e17ec75af03..000000000000
--- a/drivers/acpi/motherboard.c
+++ /dev/null
@@ -1,191 +0,0 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or (at
6 * your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 */
19
20/* Purpose: Prevent PCMCIA cards from using motherboard resources. */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/ioport.h>
27#include <asm/io.h>
28
29#include <acpi/acpi_bus.h>
30#include <acpi/acpi_drivers.h>
31
32#define _COMPONENT ACPI_SYSTEM_COMPONENT
33ACPI_MODULE_NAME("acpi_motherboard")
34
35/* Dell use PNP0C01 instead of PNP0C02 */
36#define ACPI_MB_HID1 "PNP0C01"
37#define ACPI_MB_HID2 "PNP0C02"
38/**
39 * Doesn't care about legacy IO ports, only IO ports beyond 0x1000 are reserved
40 * Doesn't care about the failure of 'request_region', since other may reserve
41 * the io ports as well
42 */
43#define IS_RESERVED_ADDR(base, len) \
44 (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \
45 && ((base) + (len) > PCIBIOS_MIN_IO))
46/*
47 * Clearing the flag (IORESOURCE_BUSY) allows drivers to use
48 * the io ports if they really know they can use it, while
49 * still preventing hotplug PCI devices from using it.
50 */
51
52/*
53 * When CONFIG_PNP is enabled, pnp/system.c binds to PNP0C01
54 * and PNP0C02, redundant with acpi_reserve_io_ranges().
55 * But acpi_reserve_io_ranges() is necessary for !CONFIG_PNP.
56 */
57static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
58{
59 struct resource *requested_res = NULL;
60
61
62 if (res->type == ACPI_RESOURCE_TYPE_IO) {
63 struct acpi_resource_io *io_res = &res->data.io;
64
65 if (io_res->minimum != io_res->maximum)
66 return AE_OK;
67 if (IS_RESERVED_ADDR
68 (io_res->minimum, io_res->address_length)) {
69 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
70 "Motherboard resources 0x%08x - 0x%08x\n",
71 io_res->minimum,
72 io_res->minimum +
73 io_res->address_length));
74 requested_res =
75 request_region(io_res->minimum,
76 io_res->address_length, "motherboard");
77 }
78 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_IO) {
79 struct acpi_resource_fixed_io *fixed_io_res =
80 &res->data.fixed_io;
81
82 if (IS_RESERVED_ADDR
83 (fixed_io_res->address, fixed_io_res->address_length)) {
84 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
85 "Motherboard resources 0x%08x - 0x%08x\n",
86 fixed_io_res->address,
87 fixed_io_res->address +
88 fixed_io_res->address_length));
89 requested_res =
90 request_region(fixed_io_res->address,
91 fixed_io_res->address_length,
92 "motherboard");
93 }
94 } else {
95 /* Memory mapped IO? */
96 }
97
98 if (requested_res)
99 requested_res->flags &= ~IORESOURCE_BUSY;
100 return AE_OK;
101}
102
103static int acpi_motherboard_add(struct acpi_device *device)
104{
105 if (!device)
106 return -EINVAL;
107 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
108 acpi_reserve_io_ranges, NULL);
109
110 return 0;
111}
112
113static struct acpi_driver acpi_motherboard_driver1 = {
114 .name = "motherboard",
115 .class = "",
116 .ids = ACPI_MB_HID1,
117 .ops = {
118 .add = acpi_motherboard_add,
119 },
120};
121
122static struct acpi_driver acpi_motherboard_driver2 = {
123 .name = "motherboard",
124 .class = "",
125 .ids = ACPI_MB_HID2,
126 .ops = {
127 .add = acpi_motherboard_add,
128 },
129};
130
131static void __init acpi_request_region (struct acpi_generic_address *addr,
132 unsigned int length, char *desc)
133{
134 if (!addr->address || !length)
135 return;
136
137 if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_IO)
138 request_region(addr->address, length, desc);
139 else if (addr->address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
140 request_mem_region(addr->address, length, desc);
141}
142
143static void __init acpi_reserve_resources(void)
144{
145 acpi_request_region(&acpi_gbl_FADT->xpm1a_evt_blk,
146 acpi_gbl_FADT->pm1_evt_len, "ACPI PM1a_EVT_BLK");
147
148 acpi_request_region(&acpi_gbl_FADT->xpm1b_evt_blk,
149 acpi_gbl_FADT->pm1_evt_len, "ACPI PM1b_EVT_BLK");
150
151 acpi_request_region(&acpi_gbl_FADT->xpm1a_cnt_blk,
152 acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1a_CNT_BLK");
153
154 acpi_request_region(&acpi_gbl_FADT->xpm1b_cnt_blk,
155 acpi_gbl_FADT->pm1_cnt_len, "ACPI PM1b_CNT_BLK");
156
157 if (acpi_gbl_FADT->pm_tm_len == 4)
158 acpi_request_region(&acpi_gbl_FADT->xpm_tmr_blk, 4, "ACPI PM_TMR");
159
160 acpi_request_region(&acpi_gbl_FADT->xpm2_cnt_blk,
161 acpi_gbl_FADT->pm2_cnt_len, "ACPI PM2_CNT_BLK");
162
163 /* Length of GPE blocks must be a non-negative multiple of 2 */
164
165 if (!(acpi_gbl_FADT->gpe0_blk_len & 0x1))
166 acpi_request_region(&acpi_gbl_FADT->xgpe0_blk,
167 acpi_gbl_FADT->gpe0_blk_len, "ACPI GPE0_BLK");
168
169 if (!(acpi_gbl_FADT->gpe1_blk_len & 0x1))
170 acpi_request_region(&acpi_gbl_FADT->xgpe1_blk,
171 acpi_gbl_FADT->gpe1_blk_len, "ACPI GPE1_BLK");
172}
173
174static int __init acpi_motherboard_init(void)
175{
176 acpi_bus_register_driver(&acpi_motherboard_driver1);
177 acpi_bus_register_driver(&acpi_motherboard_driver2);
178 /*
179 * Guarantee motherboard IO reservation first
180 * This module must run after scan.c
181 */
182 if (!acpi_disabled)
183 acpi_reserve_resources();
184 return 0;
185}
186
187/**
188 * Reserve motherboard resources after PCI claim BARs,
189 * but before PCI assign resources for uninitialized PCI devices
190 */
191fs_initcall(acpi_motherboard_init);
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c
index c1c6c236df9a..57faf598bad8 100644
--- a/drivers/acpi/namespace/nsaccess.c
+++ b/drivers/acpi/namespace/nsaccess.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -195,31 +195,27 @@ acpi_status acpi_ns_root_initialize(void)
195 obj_desc->mutex.sync_level = 195 obj_desc->mutex.sync_level =
196 (u8) (ACPI_TO_INTEGER(val) - 1); 196 (u8) (ACPI_TO_INTEGER(val) - 1);
197 197
198 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { 198 /* Create a mutex */
199 199
200 /* Create a counting semaphore for the global lock */ 200 status =
201 acpi_os_create_mutex(&obj_desc->mutex.
202 os_mutex);
203 if (ACPI_FAILURE(status)) {
204 acpi_ut_remove_reference(obj_desc);
205 goto unlock_and_exit;
206 }
201 207
202 status = 208 /* Special case for ACPI Global Lock */
203 acpi_os_create_semaphore
204 (ACPI_NO_UNIT_LIMIT, 1,
205 &acpi_gbl_global_lock_semaphore);
206 if (ACPI_FAILURE(status)) {
207 acpi_ut_remove_reference
208 (obj_desc);
209 goto unlock_and_exit;
210 }
211 209
212 /* Mark this mutex as very special */ 210 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
211 acpi_gbl_global_lock_mutex =
212 obj_desc->mutex.os_mutex;
213 213
214 obj_desc->mutex.os_mutex = 214 /* Create additional counting semaphore for global lock */
215 ACPI_GLOBAL_LOCK;
216 } else {
217 /* Create a mutex */
218 215
219 status = 216 status =
220 acpi_os_create_mutex(&obj_desc-> 217 acpi_os_create_semaphore(1, 0,
221 mutex. 218 &acpi_gbl_global_lock_semaphore);
222 os_mutex);
223 if (ACPI_FAILURE(status)) { 219 if (ACPI_FAILURE(status)) {
224 acpi_ut_remove_reference 220 acpi_ut_remove_reference
225 (obj_desc); 221 (obj_desc);
diff --git a/drivers/acpi/namespace/nsalloc.c b/drivers/acpi/namespace/nsalloc.c
index 55b407aae266..1d693d8ad2d8 100644
--- a/drivers/acpi/namespace/nsalloc.c
+++ b/drivers/acpi/namespace/nsalloc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -61,6 +61,9 @@ ACPI_MODULE_NAME("nsalloc")
61struct acpi_namespace_node *acpi_ns_create_node(u32 name) 61struct acpi_namespace_node *acpi_ns_create_node(u32 name)
62{ 62{
63 struct acpi_namespace_node *node; 63 struct acpi_namespace_node *node;
64#ifdef ACPI_DBG_TRACK_ALLOCATIONS
65 u32 temp;
66#endif
64 67
65 ACPI_FUNCTION_TRACE(ns_create_node); 68 ACPI_FUNCTION_TRACE(ns_create_node);
66 69
@@ -71,6 +74,15 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
71 74
72 ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++); 75 ACPI_MEM_TRACKING(acpi_gbl_ns_node_list->total_allocated++);
73 76
77#ifdef ACPI_DBG_TRACK_ALLOCATIONS
78 temp =
79 acpi_gbl_ns_node_list->total_allocated -
80 acpi_gbl_ns_node_list->total_freed;
81 if (temp > acpi_gbl_ns_node_list->max_occupied) {
82 acpi_gbl_ns_node_list->max_occupied = temp;
83 }
84#endif
85
74 node->name.integer = name; 86 node->name.integer = name;
75 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED); 87 ACPI_SET_DESCRIPTOR_TYPE(node, ACPI_DESC_TYPE_NAMED);
76 return_PTR(node); 88 return_PTR(node);
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index d72df66aa965..1fc4f86676e1 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -205,7 +205,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
205 205
206 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { 206 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
207 this_node->name.integer = 207 this_node->name.integer =
208 acpi_ut_repair_name(this_node->name.integer); 208 acpi_ut_repair_name(this_node->name.ascii);
209 209
210 ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X", 210 ACPI_WARNING((AE_INFO, "Invalid ACPI Name %08X",
211 this_node->name.integer)); 211 this_node->name.integer));
@@ -226,6 +226,12 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
226 obj_desc = acpi_ns_get_attached_object(this_node); 226 obj_desc = acpi_ns_get_attached_object(this_node);
227 acpi_dbg_level = dbg_level; 227 acpi_dbg_level = dbg_level;
228 228
229 /* Temp nodes are those nodes created by a control method */
230
231 if (this_node->flags & ANOBJ_TEMPORARY) {
232 acpi_os_printf("(T) ");
233 }
234
229 switch (info->display_type & ACPI_DISPLAY_MASK) { 235 switch (info->display_type & ACPI_DISPLAY_MASK) {
230 case ACPI_DISPLAY_SUMMARY: 236 case ACPI_DISPLAY_SUMMARY:
231 237
@@ -623,7 +629,8 @@ acpi_ns_dump_objects(acpi_object_type type,
623 info.display_type = display_type; 629 info.display_type = display_type;
624 630
625 (void)acpi_ns_walk_namespace(type, start_handle, max_depth, 631 (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
626 ACPI_NS_WALK_NO_UNLOCK, 632 ACPI_NS_WALK_NO_UNLOCK |
633 ACPI_NS_WALK_TEMP_NODES,
627 acpi_ns_dump_one_object, (void *)&info, 634 acpi_ns_dump_one_object, (void *)&info,
628 NULL); 635 NULL);
629} 636}
diff --git a/drivers/acpi/namespace/nsdumpdv.c b/drivers/acpi/namespace/nsdumpdv.c
index c6bf5d30fca3..5097e167939e 100644
--- a/drivers/acpi/namespace/nsdumpdv.c
+++ b/drivers/acpi/namespace/nsdumpdv.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 4b0a4a8c9843..aa6370c67ec1 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
154 * Execute the method via the interpreter. The interpreter is locked 154 * Execute the method via the interpreter. The interpreter is locked
155 * here before calling into the AML parser 155 * here before calling into the AML parser
156 */ 156 */
157 status = acpi_ex_enter_interpreter(); 157 acpi_ex_enter_interpreter();
158 if (ACPI_FAILURE(status)) {
159 return_ACPI_STATUS(status);
160 }
161
162 status = acpi_ps_execute_method(info); 158 status = acpi_ps_execute_method(info);
163 acpi_ex_exit_interpreter(); 159 acpi_ex_exit_interpreter();
164 } else { 160 } else {
@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
182 * resolution, we must lock it because we could access an opregion. 178 * resolution, we must lock it because we could access an opregion.
183 * The opregion access code assumes that the interpreter is locked. 179 * The opregion access code assumes that the interpreter is locked.
184 */ 180 */
185 status = acpi_ex_enter_interpreter(); 181 acpi_ex_enter_interpreter();
186 if (ACPI_FAILURE(status)) {
187 return_ACPI_STATUS(status);
188 }
189 182
190 /* Function has a strange interface */ 183 /* Function has a strange interface */
191 184
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index aec8488c0019..326af8fc0ce7 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -213,7 +213,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
213 u32 level, void *context, void **return_value) 213 u32 level, void *context, void **return_value)
214{ 214{
215 acpi_object_type type; 215 acpi_object_type type;
216 acpi_status status; 216 acpi_status status = AE_OK;
217 struct acpi_init_walk_info *info = 217 struct acpi_init_walk_info *info =
218 (struct acpi_init_walk_info *)context; 218 (struct acpi_init_walk_info *)context;
219 struct acpi_namespace_node *node = 219 struct acpi_namespace_node *node =
@@ -267,10 +267,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
267 /* 267 /*
268 * Must lock the interpreter before executing AML code 268 * Must lock the interpreter before executing AML code
269 */ 269 */
270 status = acpi_ex_enter_interpreter(); 270 acpi_ex_enter_interpreter();
271 if (ACPI_FAILURE(status)) {
272 return (status);
273 }
274 271
275 /* 272 /*
276 * Each of these types can contain executable AML code within the 273 * Each of these types can contain executable AML code within the
diff --git a/drivers/acpi/namespace/nsload.c b/drivers/acpi/namespace/nsload.c
index fe75d888e183..d4f9654fd20f 100644
--- a/drivers/acpi/namespace/nsload.c
+++ b/drivers/acpi/namespace/nsload.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,12 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acdispat.h> 46#include <acpi/acdispat.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
49ACPI_MODULE_NAME("nsload") 50ACPI_MODULE_NAME("nsload")
50 51
51/* Local prototypes */ 52/* Local prototypes */
52static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type);
53
54#ifdef ACPI_FUTURE_IMPLEMENTATION 53#ifdef ACPI_FUTURE_IMPLEMENTATION
55acpi_status acpi_ns_unload_namespace(acpi_handle handle); 54acpi_status acpi_ns_unload_namespace(acpi_handle handle);
56 55
@@ -62,7 +61,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
62 * 61 *
63 * FUNCTION: acpi_ns_load_table 62 * FUNCTION: acpi_ns_load_table
64 * 63 *
65 * PARAMETERS: table_desc - Descriptor for table to be loaded 64 * PARAMETERS: table_index - Index for table to be loaded
66 * Node - Owning NS node 65 * Node - Owning NS node
67 * 66 *
68 * RETURN: Status 67 * RETURN: Status
@@ -72,42 +71,13 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
72 ******************************************************************************/ 71 ******************************************************************************/
73 72
74acpi_status 73acpi_status
75acpi_ns_load_table(struct acpi_table_desc *table_desc, 74acpi_ns_load_table(acpi_native_uint table_index,
76 struct acpi_namespace_node *node) 75 struct acpi_namespace_node *node)
77{ 76{
78 acpi_status status; 77 acpi_status status;
79 78
80 ACPI_FUNCTION_TRACE(ns_load_table); 79 ACPI_FUNCTION_TRACE(ns_load_table);
81 80
82 /* Check if table contains valid AML (must be DSDT, PSDT, SSDT, etc.) */
83
84 if (!
85 (acpi_gbl_table_data[table_desc->type].
86 flags & ACPI_TABLE_EXECUTABLE)) {
87
88 /* Just ignore this table */
89
90 return_ACPI_STATUS(AE_OK);
91 }
92
93 /* Check validity of the AML start and length */
94
95 if (!table_desc->aml_start) {
96 ACPI_ERROR((AE_INFO, "Null AML pointer"));
97 return_ACPI_STATUS(AE_BAD_PARAMETER);
98 }
99
100 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "AML block at %p\n",
101 table_desc->aml_start));
102
103 /* Ignore table if there is no AML contained within */
104
105 if (!table_desc->aml_length) {
106 ACPI_WARNING((AE_INFO, "Zero-length AML block in table [%4.4s]",
107 table_desc->pointer->signature));
108 return_ACPI_STATUS(AE_OK);
109 }
110
111 /* 81 /*
112 * Parse the table and load the namespace with all named 82 * Parse the table and load the namespace with all named
113 * objects found within. Control methods are NOT parsed 83 * objects found within. Control methods are NOT parsed
@@ -117,15 +87,34 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
117 * to another control method, we can't continue parsing 87 * to another control method, we can't continue parsing
118 * because we don't know how many arguments to parse next! 88 * because we don't know how many arguments to parse next!
119 */ 89 */
90 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
91 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status);
93 }
94
95 /* If table already loaded into namespace, just return */
96
97 if (acpi_tb_is_table_loaded(table_index)) {
98 status = AE_ALREADY_EXISTS;
99 goto unlock;
100 }
101
120 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 102 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
121 "**** Loading table into namespace ****\n")); 103 "**** Loading table into namespace ****\n"));
122 104
123 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 105 status = acpi_tb_allocate_owner_id(table_index);
124 if (ACPI_FAILURE(status)) { 106 if (ACPI_FAILURE(status)) {
125 return_ACPI_STATUS(status); 107 goto unlock;
108 }
109
110 status = acpi_ns_parse_table(table_index, node->child);
111 if (ACPI_SUCCESS(status)) {
112 acpi_tb_set_table_loaded_flag(table_index, TRUE);
113 } else {
114 acpi_tb_release_owner_id(table_index);
126 } 115 }
127 116
128 status = acpi_ns_parse_table(table_desc, node->child); 117 unlock:
129 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 118 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
130 119
131 if (ACPI_FAILURE(status)) { 120 if (ACPI_FAILURE(status)) {
@@ -141,7 +130,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
141 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 130 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
142 "**** Begin Table Method Parsing and Object Initialization ****\n")); 131 "**** Begin Table Method Parsing and Object Initialization ****\n"));
143 132
144 status = acpi_ds_initialize_objects(table_desc, node); 133 status = acpi_ds_initialize_objects(table_index, node);
145 134
146 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 135 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
147 "**** Completed Table Method Parsing and Object Initialization ****\n")); 136 "**** Completed Table Method Parsing and Object Initialization ****\n"));
@@ -149,99 +138,7 @@ acpi_ns_load_table(struct acpi_table_desc *table_desc,
149 return_ACPI_STATUS(status); 138 return_ACPI_STATUS(status);
150} 139}
151 140
152/******************************************************************************* 141#ifdef ACPI_OBSOLETE_FUNCTIONS
153 *
154 * FUNCTION: acpi_ns_load_table_by_type
155 *
156 * PARAMETERS: table_type - Id of the table type to load
157 *
158 * RETURN: Status
159 *
160 * DESCRIPTION: Load an ACPI table or tables into the namespace. All tables
161 * of the given type are loaded. The mechanism allows this
162 * routine to be called repeatedly.
163 *
164 ******************************************************************************/
165
166static acpi_status acpi_ns_load_table_by_type(acpi_table_type table_type)
167{
168 u32 i;
169 acpi_status status;
170 struct acpi_table_desc *table_desc;
171
172 ACPI_FUNCTION_TRACE(ns_load_table_by_type);
173
174 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
175 if (ACPI_FAILURE(status)) {
176 return_ACPI_STATUS(status);
177 }
178
179 /*
180 * Table types supported are:
181 * DSDT (one), SSDT/PSDT (multiple)
182 */
183 switch (table_type) {
184 case ACPI_TABLE_ID_DSDT:
185
186 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace load: DSDT\n"));
187
188 table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_DSDT].next;
189
190 /* If table already loaded into namespace, just return */
191
192 if (table_desc->loaded_into_namespace) {
193 goto unlock_and_exit;
194 }
195
196 /* Now load the single DSDT */
197
198 status = acpi_ns_load_table(table_desc, acpi_gbl_root_node);
199 if (ACPI_SUCCESS(status)) {
200 table_desc->loaded_into_namespace = TRUE;
201 }
202 break;
203
204 case ACPI_TABLE_ID_SSDT:
205 case ACPI_TABLE_ID_PSDT:
206
207 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
208 "Namespace load: %d SSDT or PSDTs\n",
209 acpi_gbl_table_lists[table_type].count));
210
211 /*
212 * Traverse list of SSDT or PSDT tables
213 */
214 table_desc = acpi_gbl_table_lists[table_type].next;
215 for (i = 0; i < acpi_gbl_table_lists[table_type].count; i++) {
216 /*
217 * Only attempt to load table into namespace if it is not
218 * already loaded!
219 */
220 if (!table_desc->loaded_into_namespace) {
221 status =
222 acpi_ns_load_table(table_desc,
223 acpi_gbl_root_node);
224 if (ACPI_FAILURE(status)) {
225 break;
226 }
227
228 table_desc->loaded_into_namespace = TRUE;
229 }
230
231 table_desc = table_desc->next;
232 }
233 break;
234
235 default:
236 status = AE_SUPPORT;
237 break;
238 }
239
240 unlock_and_exit:
241 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
242 return_ACPI_STATUS(status);
243}
244
245/******************************************************************************* 142/*******************************************************************************
246 * 143 *
247 * FUNCTION: acpi_load_namespace 144 * FUNCTION: acpi_load_namespace
@@ -288,6 +185,7 @@ acpi_status acpi_ns_load_namespace(void)
288 185
289 return_ACPI_STATUS(status); 186 return_ACPI_STATUS(status);
290} 187}
188#endif
291 189
292#ifdef ACPI_FUTURE_IMPLEMENTATION 190#ifdef ACPI_FUTURE_IMPLEMENTATION
293/******************************************************************************* 191/*******************************************************************************
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 97b8332c9746..cbd94af08cc5 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsobject.c b/drivers/acpi/namespace/nsobject.c
index aabe8794b908..d9d7377bc6e6 100644
--- a/drivers/acpi/namespace/nsobject.c
+++ b/drivers/acpi/namespace/nsobject.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/namespace/nsparse.c b/drivers/acpi/namespace/nsparse.c
index 155505a4ef69..e696aa847990 100644
--- a/drivers/acpi/namespace/nsparse.c
+++ b/drivers/acpi/namespace/nsparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acparser.h> 46#include <acpi/acparser.h>
47#include <acpi/acdispat.h> 47#include <acpi/acdispat.h>
48#include <acpi/actables.h>
48 49
49#define _COMPONENT ACPI_NAMESPACE 50#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsparse") 51ACPI_MODULE_NAME("nsparse")
@@ -62,14 +63,24 @@ ACPI_MODULE_NAME("nsparse")
62 * 63 *
63 ******************************************************************************/ 64 ******************************************************************************/
64acpi_status 65acpi_status
65acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc) 66acpi_ns_one_complete_parse(acpi_native_uint pass_number,
67 acpi_native_uint table_index)
66{ 68{
67 union acpi_parse_object *parse_root; 69 union acpi_parse_object *parse_root;
68 acpi_status status; 70 acpi_status status;
71 acpi_native_uint aml_length;
72 u8 *aml_start;
69 struct acpi_walk_state *walk_state; 73 struct acpi_walk_state *walk_state;
74 struct acpi_table_header *table;
75 acpi_owner_id owner_id;
70 76
71 ACPI_FUNCTION_TRACE(ns_one_complete_parse); 77 ACPI_FUNCTION_TRACE(ns_one_complete_parse);
72 78
79 status = acpi_tb_get_owner_id(table_index, &owner_id);
80 if (ACPI_FAILURE(status)) {
81 return_ACPI_STATUS(status);
82 }
83
73 /* Create and init a Root Node */ 84 /* Create and init a Root Node */
74 85
75 parse_root = acpi_ps_create_scope_op(); 86 parse_root = acpi_ps_create_scope_op();
@@ -79,26 +90,41 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
79 90
80 /* Create and initialize a new walk state */ 91 /* Create and initialize a new walk state */
81 92
82 walk_state = acpi_ds_create_walk_state(table_desc->owner_id, 93 walk_state = acpi_ds_create_walk_state(owner_id, NULL, NULL, NULL);
83 NULL, NULL, NULL);
84 if (!walk_state) { 94 if (!walk_state) {
85 acpi_ps_free_op(parse_root); 95 acpi_ps_free_op(parse_root);
86 return_ACPI_STATUS(AE_NO_MEMORY); 96 return_ACPI_STATUS(AE_NO_MEMORY);
87 } 97 }
88 98
89 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL, 99 status = acpi_get_table_by_index(table_index, &table);
90 table_desc->aml_start, 100 if (ACPI_FAILURE(status)) {
91 table_desc->aml_length, NULL, 101 acpi_ds_delete_walk_state(walk_state);
92 pass_number); 102 acpi_ps_free_op(parse_root);
103 return_ACPI_STATUS(status);
104 }
105
106 /* Table must consist of at least a complete header */
107
108 if (table->length < sizeof(struct acpi_table_header)) {
109 status = AE_BAD_HEADER;
110 } else {
111 aml_start = (u8 *) table + sizeof(struct acpi_table_header);
112 aml_length = table->length - sizeof(struct acpi_table_header);
113 status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
114 aml_start, aml_length, NULL,
115 (u8) pass_number);
116 }
117
93 if (ACPI_FAILURE(status)) { 118 if (ACPI_FAILURE(status)) {
94 acpi_ds_delete_walk_state(walk_state); 119 acpi_ds_delete_walk_state(walk_state);
120 acpi_ps_delete_parse_tree(parse_root);
95 return_ACPI_STATUS(status); 121 return_ACPI_STATUS(status);
96 } 122 }
97 123
98 /* Parse the AML */ 124 /* Parse the AML */
99 125
100 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n", 126 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "*PARSE* pass %d parse\n",
101 pass_number)); 127 (unsigned)pass_number));
102 status = acpi_ps_parse_aml(walk_state); 128 status = acpi_ps_parse_aml(walk_state);
103 129
104 acpi_ps_delete_parse_tree(parse_root); 130 acpi_ps_delete_parse_tree(parse_root);
@@ -119,7 +145,7 @@ acpi_ns_one_complete_parse(u8 pass_number, struct acpi_table_desc *table_desc)
119 ******************************************************************************/ 145 ******************************************************************************/
120 146
121acpi_status 147acpi_status
122acpi_ns_parse_table(struct acpi_table_desc *table_desc, 148acpi_ns_parse_table(acpi_native_uint table_index,
123 struct acpi_namespace_node *start_node) 149 struct acpi_namespace_node *start_node)
124{ 150{
125 acpi_status status; 151 acpi_status status;
@@ -134,10 +160,10 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
134 * each Parser Op subtree is deleted when it is finished. This saves 160 * each Parser Op subtree is deleted when it is finished. This saves
135 * a great deal of memory, and allows a small cache of parse objects 161 * a great deal of memory, and allows a small cache of parse objects
136 * to service the entire parse. The second pass of the parse then 162 * to service the entire parse. The second pass of the parse then
137 * performs another complete parse of the AML.. 163 * performs another complete parse of the AML.
138 */ 164 */
139 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); 165 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
140 status = acpi_ns_one_complete_parse(1, table_desc); 166 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index);
141 if (ACPI_FAILURE(status)) { 167 if (ACPI_FAILURE(status)) {
142 return_ACPI_STATUS(status); 168 return_ACPI_STATUS(status);
143 } 169 }
@@ -152,7 +178,7 @@ acpi_ns_parse_table(struct acpi_table_desc *table_desc,
152 * parse objects are all cached. 178 * parse objects are all cached.
153 */ 179 */
154 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); 180 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
155 status = acpi_ns_one_complete_parse(2, table_desc); 181 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index);
156 if (ACPI_FAILURE(status)) { 182 if (ACPI_FAILURE(status)) {
157 return_ACPI_STATUS(status); 183 return_ACPI_STATUS(status);
158 } 184 }
diff --git a/drivers/acpi/namespace/nssearch.c b/drivers/acpi/namespace/nssearch.c
index 500e2bbcfaf7..e863be665ce8 100644
--- a/drivers/acpi/namespace/nssearch.c
+++ b/drivers/acpi/namespace/nssearch.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -321,7 +321,8 @@ acpi_ns_search_and_enter(u32 target_name,
321 * even though there are a few bad names. 321 * even though there are a few bad names.
322 */ 322 */
323 if (!acpi_ut_valid_acpi_name(target_name)) { 323 if (!acpi_ut_valid_acpi_name(target_name)) {
324 target_name = acpi_ut_repair_name(target_name); 324 target_name =
325 acpi_ut_repair_name(ACPI_CAST_PTR(char, &target_name));
325 326
326 /* Report warning only if in strict mode or debug mode */ 327 /* Report warning only if in strict mode or debug mode */
327 328
@@ -401,6 +402,10 @@ acpi_ns_search_and_enter(u32 target_name,
401 } 402 }
402#endif 403#endif
403 404
405 if (flags & ACPI_NS_TEMPORARY) {
406 new_node->flags |= ANOBJ_TEMPORARY;
407 }
408
404 /* Install the new object into the parent's list of children */ 409 /* Install the new object into the parent's list of children */
405 410
406 acpi_ns_install_node(walk_state, node, new_node, type); 411 acpi_ns_install_node(walk_state, node, new_node, type);
diff --git a/drivers/acpi/namespace/nsutils.c b/drivers/acpi/namespace/nsutils.c
index aa4e799d9a8c..90fd059615ff 100644
--- a/drivers/acpi/namespace/nsutils.c
+++ b/drivers/acpi/namespace/nsutils.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -770,13 +770,6 @@ void acpi_ns_terminate(void)
770 } 770 }
771 771
772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n")); 772 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Namespace freed\n"));
773
774 /*
775 * 2) Now we can delete the ACPI tables
776 */
777 acpi_tb_delete_all_tables();
778 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
779
780 return_VOID; 773 return_VOID;
781} 774}
782 775
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index c8f6bef16ed0..94eb8f332d94 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -126,7 +126,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
126 * PARAMETERS: Type - acpi_object_type to search for 126 * PARAMETERS: Type - acpi_object_type to search for
127 * start_node - Handle in namespace where search begins 127 * start_node - Handle in namespace where search begins
128 * max_depth - Depth to which search is to reach 128 * max_depth - Depth to which search is to reach
129 * unlock_before_callback- Whether to unlock the NS before invoking 129 * Flags - Whether to unlock the NS before invoking
130 * the callback routine 130 * the callback routine
131 * user_function - Called when an object of "Type" is found 131 * user_function - Called when an object of "Type" is found
132 * Context - Passed to user function 132 * Context - Passed to user function
@@ -153,7 +153,7 @@ acpi_status
153acpi_ns_walk_namespace(acpi_object_type type, 153acpi_ns_walk_namespace(acpi_object_type type,
154 acpi_handle start_node, 154 acpi_handle start_node,
155 u32 max_depth, 155 u32 max_depth,
156 u8 unlock_before_callback, 156 u32 flags,
157 acpi_walk_callback user_function, 157 acpi_walk_callback user_function,
158 void *context, void **return_value) 158 void *context, void **return_value)
159{ 159{
@@ -193,20 +193,34 @@ acpi_ns_walk_namespace(acpi_object_type type,
193 acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node, 193 acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
194 child_node); 194 child_node);
195 if (child_node) { 195 if (child_node) {
196 /* 196
197 * Found node, Get the type if we are not 197 /* Found next child, get the type if we are not searching for ANY */
198 * searching for ANY 198
199 */
200 if (type != ACPI_TYPE_ANY) { 199 if (type != ACPI_TYPE_ANY) {
201 child_type = child_node->type; 200 child_type = child_node->type;
202 } 201 }
203 202
204 if (child_type == type) { 203 /*
204 * Ignore all temporary namespace nodes (created during control
205 * method execution) unless told otherwise. These temporary nodes
206 * can cause a race condition because they can be deleted during the
207 * execution of the user function (if the namespace is unlocked before
208 * invocation of the user function.) Only the debugger namespace dump
209 * will examine the temporary nodes.
210 */
211 if ((child_node->flags & ANOBJ_TEMPORARY) &&
212 !(flags & ACPI_NS_WALK_TEMP_NODES)) {
213 status = AE_CTRL_DEPTH;
214 }
215
216 /* Type must match requested type */
217
218 else if (child_type == type) {
205 /* 219 /*
206 * Found a matching node, invoke the user 220 * Found a matching node, invoke the user callback function.
207 * callback function 221 * Unlock the namespace if flag is set.
208 */ 222 */
209 if (unlock_before_callback) { 223 if (flags & ACPI_NS_WALK_UNLOCK) {
210 mutex_status = 224 mutex_status =
211 acpi_ut_release_mutex 225 acpi_ut_release_mutex
212 (ACPI_MTX_NAMESPACE); 226 (ACPI_MTX_NAMESPACE);
@@ -216,10 +230,11 @@ acpi_ns_walk_namespace(acpi_object_type type,
216 } 230 }
217 } 231 }
218 232
219 status = user_function(child_node, level, 233 status =
220 context, return_value); 234 user_function(child_node, level, context,
235 return_value);
221 236
222 if (unlock_before_callback) { 237 if (flags & ACPI_NS_WALK_UNLOCK) {
223 mutex_status = 238 mutex_status =
224 acpi_ut_acquire_mutex 239 acpi_ut_acquire_mutex
225 (ACPI_MTX_NAMESPACE); 240 (ACPI_MTX_NAMESPACE);
@@ -251,20 +266,17 @@ acpi_ns_walk_namespace(acpi_object_type type,
251 } 266 }
252 267
253 /* 268 /*
254 * Depth first search: 269 * Depth first search: Attempt to go down another level in the
255 * Attempt to go down another level in the namespace 270 * namespace if we are allowed to. Don't go any further if we have
256 * if we are allowed to. Don't go any further if we 271 * reached the caller specified maximum depth or if the user
257 * have reached the caller specified maximum depth 272 * function has specified that the maximum depth has been reached.
258 * or if the user function has specified that the
259 * maximum depth has been reached.
260 */ 273 */
261 if ((level < max_depth) && (status != AE_CTRL_DEPTH)) { 274 if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
262 if (acpi_ns_get_next_node 275 if (acpi_ns_get_next_node
263 (ACPI_TYPE_ANY, child_node, NULL)) { 276 (ACPI_TYPE_ANY, child_node, NULL)) {
264 /* 277
265 * There is at least one child of this 278 /* There is at least one child of this node, visit it */
266 * node, visit the onde 279
267 */
268 level++; 280 level++;
269 parent_node = child_node; 281 parent_node = child_node;
270 child_node = NULL; 282 child_node = NULL;
@@ -272,9 +284,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
272 } 284 }
273 } else { 285 } else {
274 /* 286 /*
275 * No more children of this node (acpi_ns_get_next_node 287 * No more children of this node (acpi_ns_get_next_node failed), go
276 * failed), go back upwards in the namespace tree to 288 * back upwards in the namespace tree to the node's parent.
277 * the node's parent.
278 */ 289 */
279 level--; 290 level--;
280 child_node = parent_node; 291 child_node = parent_node;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index dca6799ac678..7ac6ace50059 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -170,7 +170,6 @@ acpi_evaluate_object(acpi_handle handle,
170 struct acpi_buffer *return_buffer) 170 struct acpi_buffer *return_buffer)
171{ 171{
172 acpi_status status; 172 acpi_status status;
173 acpi_status status2;
174 struct acpi_evaluate_info *info; 173 struct acpi_evaluate_info *info;
175 acpi_size buffer_space_needed; 174 acpi_size buffer_space_needed;
176 u32 i; 175 u32 i;
@@ -329,14 +328,12 @@ acpi_evaluate_object(acpi_handle handle,
329 * Delete the internal return object. NOTE: Interpreter must be 328 * Delete the internal return object. NOTE: Interpreter must be
330 * locked to avoid race condition. 329 * locked to avoid race condition.
331 */ 330 */
332 status2 = acpi_ex_enter_interpreter(); 331 acpi_ex_enter_interpreter();
333 if (ACPI_SUCCESS(status2)) {
334 332
335 /* Remove one reference on the return object (should delete it) */ 333 /* Remove one reference on the return object (should delete it) */
336 334
337 acpi_ut_remove_reference(info->return_object); 335 acpi_ut_remove_reference(info->return_object);
338 acpi_ex_exit_interpreter(); 336 acpi_ex_exit_interpreter();
339 }
340 } 337 }
341 338
342 cleanup: 339 cleanup:
diff --git a/drivers/acpi/namespace/nsxfname.c b/drivers/acpi/namespace/nsxfname.c
index 978213a6c19f..b489781b22a8 100644
--- a/drivers/acpi/namespace/nsxfname.c
+++ b/drivers/acpi/namespace/nsxfname.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -84,38 +84,41 @@ acpi_get_handle(acpi_handle parent,
84 /* Convert a parent handle to a prefix node */ 84 /* Convert a parent handle to a prefix node */
85 85
86 if (parent) { 86 if (parent) {
87 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
88 if (ACPI_FAILURE(status)) {
89 return (status);
90 }
91
92 prefix_node = acpi_ns_map_handle_to_node(parent); 87 prefix_node = acpi_ns_map_handle_to_node(parent);
93 if (!prefix_node) { 88 if (!prefix_node) {
94 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
95 return (AE_BAD_PARAMETER); 89 return (AE_BAD_PARAMETER);
96 } 90 }
91 }
92
93 /*
94 * Valid cases are:
95 * 1) Fully qualified pathname
96 * 2) Parent + Relative pathname
97 *
98 * Error for <null Parent + relative path>
99 */
100 if (acpi_ns_valid_root_prefix(pathname[0])) {
97 101
98 status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 102 /* Pathname is fully qualified (starts with '\') */
99 if (ACPI_FAILURE(status)) { 103
100 return (status); 104 /* Special case for root-only, since we can't search for it */
105
106 if (!ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH)) {
107 *ret_handle =
108 acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
109 return (AE_OK);
101 } 110 }
102 } 111 } else if (!prefix_node) {
103 112
104 /* Special case for root, since we can't search for it */ 113 /* Relative path with null prefix is disallowed */
105 114
106 if (ACPI_STRCMP(pathname, ACPI_NS_ROOT_PATH) == 0) { 115 return (AE_BAD_PARAMETER);
107 *ret_handle =
108 acpi_ns_convert_entry_to_handle(acpi_gbl_root_node);
109 return (AE_OK);
110 } 116 }
111 117
112 /* 118 /* Find the Node and convert to a handle */
113 * Find the Node and convert to a handle
114 */
115 status = acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH,
116 &node);
117 119
118 *ret_handle = NULL; 120 status =
121 acpi_ns_get_node(prefix_node, pathname, ACPI_NS_NO_UPSEARCH, &node);
119 if (ACPI_SUCCESS(status)) { 122 if (ACPI_SUCCESS(status)) {
120 *ret_handle = acpi_ns_convert_entry_to_handle(node); 123 *ret_handle = acpi_ns_convert_entry_to_handle(node);
121 } 124 }
diff --git a/drivers/acpi/namespace/nsxfobj.c b/drivers/acpi/namespace/nsxfobj.c
index a18b1c223129..faa375887201 100644
--- a/drivers/acpi/namespace/nsxfobj.c
+++ b/drivers/acpi/namespace/nsxfobj.c
@@ -6,7 +6,7 @@
6 ******************************************************************************/ 6 ******************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index bd96a7045925..4a9faff4c01d 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,7 +45,7 @@ int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS]
45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES] 45int __cpuinitdata node_to_pxm_map[MAX_NUMNODES]
46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; 46 = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
47 47
48extern int __init acpi_table_parse_madt_family(enum acpi_table_id id, 48extern int __init acpi_table_parse_madt_family(char *id,
49 unsigned long madt_size, 49 unsigned long madt_size,
50 int entry_id, 50 int entry_id,
51 acpi_madt_entry_handler handler, 51 acpi_madt_entry_handler handler,
@@ -89,7 +89,7 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
89 node_clear(node, nodes_found_map); 89 node_clear(node, nodes_found_map);
90} 90}
91 91
92void __init acpi_table_print_srat_entry(acpi_table_entry_header * header) 92void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header)
93{ 93{
94 94
95 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry"); 95 ACPI_FUNCTION_NAME("acpi_table_print_srat_entry");
@@ -99,36 +99,35 @@ void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
99 99
100 switch (header->type) { 100 switch (header->type) {
101 101
102 case ACPI_SRAT_PROCESSOR_AFFINITY: 102 case ACPI_SRAT_TYPE_CPU_AFFINITY:
103#ifdef ACPI_DEBUG_OUTPUT 103#ifdef ACPI_DEBUG_OUTPUT
104 { 104 {
105 struct acpi_table_processor_affinity *p = 105 struct acpi_srat_cpu_affinity *p =
106 (struct acpi_table_processor_affinity *)header; 106 (struct acpi_srat_cpu_affinity *)header;
107 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 107 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
108 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n", 108 "SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
109 p->apic_id, p->lsapic_eid, 109 p->apic_id, p->local_sapic_eid,
110 p->proximity_domain, 110 p->proximity_domain_lo,
111 p->flags. 111 (p->flags & ACPI_SRAT_CPU_ENABLED)?
112 enabled ? "enabled" : "disabled")); 112 "enabled" : "disabled"));
113 } 113 }
114#endif /* ACPI_DEBUG_OUTPUT */ 114#endif /* ACPI_DEBUG_OUTPUT */
115 break; 115 break;
116 116
117 case ACPI_SRAT_MEMORY_AFFINITY: 117 case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
118#ifdef ACPI_DEBUG_OUTPUT 118#ifdef ACPI_DEBUG_OUTPUT
119 { 119 {
120 struct acpi_table_memory_affinity *p = 120 struct acpi_srat_mem_affinity *p =
121 (struct acpi_table_memory_affinity *)header; 121 (struct acpi_srat_mem_affinity *)header;
122 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 122 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
123 "SRAT Memory (0x%08x%08x length 0x%08x%08x type 0x%x) in proximity domain %d %s%s\n", 123 "SRAT Memory (0x%lx length 0x%lx type 0x%x) in proximity domain %d %s%s\n",
124 p->base_addr_hi, p->base_addr_lo, 124 (unsigned long)p->base_address,
125 p->length_hi, p->length_lo, 125 (unsigned long)p->length,
126 p->memory_type, p->proximity_domain, 126 p->memory_type, p->proximity_domain,
127 p->flags. 127 (p->flags & ACPI_SRAT_MEM_ENABLED)?
128 enabled ? "enabled" : "disabled", 128 "enabled" : "disabled",
129 p->flags. 129 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)?
130 hot_pluggable ? " hot-pluggable" : 130 " hot-pluggable" : ""));
131 ""));
132 } 131 }
133#endif /* ACPI_DEBUG_OUTPUT */ 132#endif /* ACPI_DEBUG_OUTPUT */
134 break; 133 break;
@@ -141,18 +140,18 @@ void __init acpi_table_print_srat_entry(acpi_table_entry_header * header)
141 } 140 }
142} 141}
143 142
144static int __init acpi_parse_slit(unsigned long phys_addr, unsigned long size) 143static int __init acpi_parse_slit(struct acpi_table_header *table)
145{ 144{
146 struct acpi_table_slit *slit; 145 struct acpi_table_slit *slit;
147 u32 localities; 146 u32 localities;
148 147
149 if (!phys_addr || !size) 148 if (!table)
150 return -EINVAL; 149 return -EINVAL;
151 150
152 slit = (struct acpi_table_slit *)__va(phys_addr); 151 slit = (struct acpi_table_slit *)table;
153 152
154 /* downcast just for %llu vs %lu for i386/ia64 */ 153 /* downcast just for %llu vs %lu for i386/ia64 */
155 localities = (u32) slit->localities; 154 localities = (u32) slit->locality_count;
156 155
157 acpi_numa_slit_init(slit); 156 acpi_numa_slit_init(slit);
158 157
@@ -160,12 +159,12 @@ static int __init acpi_parse_slit(unsigned long phys_addr, unsigned long size)
160} 159}
161 160
162static int __init 161static int __init
163acpi_parse_processor_affinity(acpi_table_entry_header * header, 162acpi_parse_processor_affinity(struct acpi_subtable_header * header,
164 const unsigned long end) 163 const unsigned long end)
165{ 164{
166 struct acpi_table_processor_affinity *processor_affinity; 165 struct acpi_srat_cpu_affinity *processor_affinity;
167 166
168 processor_affinity = (struct acpi_table_processor_affinity *)header; 167 processor_affinity = (struct acpi_srat_cpu_affinity *)header;
169 if (!processor_affinity) 168 if (!processor_affinity)
170 return -EINVAL; 169 return -EINVAL;
171 170
@@ -178,12 +177,12 @@ acpi_parse_processor_affinity(acpi_table_entry_header * header,
178} 177}
179 178
180static int __init 179static int __init
181acpi_parse_memory_affinity(acpi_table_entry_header * header, 180acpi_parse_memory_affinity(struct acpi_subtable_header * header,
182 const unsigned long end) 181 const unsigned long end)
183{ 182{
184 struct acpi_table_memory_affinity *memory_affinity; 183 struct acpi_srat_mem_affinity *memory_affinity;
185 184
186 memory_affinity = (struct acpi_table_memory_affinity *)header; 185 memory_affinity = (struct acpi_srat_mem_affinity *)header;
187 if (!memory_affinity) 186 if (!memory_affinity)
188 return -EINVAL; 187 return -EINVAL;
189 188
@@ -195,23 +194,23 @@ acpi_parse_memory_affinity(acpi_table_entry_header * header,
195 return 0; 194 return 0;
196} 195}
197 196
198static int __init acpi_parse_srat(unsigned long phys_addr, unsigned long size) 197static int __init acpi_parse_srat(struct acpi_table_header *table)
199{ 198{
200 struct acpi_table_srat *srat; 199 struct acpi_table_srat *srat;
201 200
202 if (!phys_addr || !size) 201 if (!table)
203 return -EINVAL; 202 return -EINVAL;
204 203
205 srat = (struct acpi_table_srat *)__va(phys_addr); 204 srat = (struct acpi_table_srat *)table;
206 205
207 return 0; 206 return 0;
208} 207}
209 208
210int __init 209int __init
211acpi_table_parse_srat(enum acpi_srat_entry_id id, 210acpi_table_parse_srat(enum acpi_srat_type id,
212 acpi_madt_entry_handler handler, unsigned int max_entries) 211 acpi_madt_entry_handler handler, unsigned int max_entries)
213{ 212{
214 return acpi_table_parse_madt_family(ACPI_SRAT, 213 return acpi_table_parse_madt_family(ACPI_SIG_SRAT,
215 sizeof(struct acpi_table_srat), id, 214 sizeof(struct acpi_table_srat), id,
216 handler, max_entries); 215 handler, max_entries);
217} 216}
@@ -221,17 +220,17 @@ int __init acpi_numa_init(void)
221 int result; 220 int result;
222 221
223 /* SRAT: Static Resource Affinity Table */ 222 /* SRAT: Static Resource Affinity Table */
224 result = acpi_table_parse(ACPI_SRAT, acpi_parse_srat); 223 result = acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat);
225 224
226 if (result > 0) { 225 if (result > 0) {
227 result = acpi_table_parse_srat(ACPI_SRAT_PROCESSOR_AFFINITY, 226 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
228 acpi_parse_processor_affinity, 227 acpi_parse_processor_affinity,
229 NR_CPUS); 228 NR_CPUS);
230 result = acpi_table_parse_srat(ACPI_SRAT_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific 229 result = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); // IA64 specific
231 } 230 }
232 231
233 /* SLIT: System Locality Information Table */ 232 /* SLIT: System Locality Information Table */
234 result = acpi_table_parse(ACPI_SLIT, acpi_parse_slit); 233 result = acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
235 234
236 acpi_numa_arch_fixup(); 235 acpi_numa_arch_fixup();
237 return 0; 236 return 0;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 57ae1e5cde0a..0f6f3bcbc8eb 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38#include <linux/nmi.h> 38#include <linux/nmi.h>
39#include <linux/acpi.h>
39#include <acpi/acpi.h> 40#include <acpi/acpi.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <acpi/acpi_bus.h> 42#include <acpi/acpi_bus.h>
@@ -75,6 +76,54 @@ static acpi_osd_handler acpi_irq_handler;
75static void *acpi_irq_context; 76static void *acpi_irq_context;
76static struct workqueue_struct *kacpid_wq; 77static struct workqueue_struct *kacpid_wq;
77 78
79static void __init acpi_request_region (struct acpi_generic_address *addr,
80 unsigned int length, char *desc)
81{
82 struct resource *res;
83
84 if (!addr->address || !length)
85 return;
86
87 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
88 res = request_region(addr->address, length, desc);
89 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
90 res = request_mem_region(addr->address, length, desc);
91}
92
93static int __init acpi_reserve_resources(void)
94{
95 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
96 "ACPI PM1a_EVT_BLK");
97
98 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
99 "ACPI PM1b_EVT_BLK");
100
101 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
102 "ACPI PM1a_CNT_BLK");
103
104 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
105 "ACPI PM1b_CNT_BLK");
106
107 if (acpi_gbl_FADT.pm_timer_length == 4)
108 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
109
110 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
111 "ACPI PM2_CNT_BLK");
112
113 /* Length of GPE blocks must be a non-negative multiple of 2 */
114
115 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
116 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
117 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
118
119 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
120 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
121 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
122
123 return 0;
124}
125device_initcall(acpi_reserve_resources);
126
78acpi_status acpi_os_initialize(void) 127acpi_status acpi_os_initialize(void)
79{ 128{
80 return AE_OK; 129 return AE_OK;
@@ -136,53 +185,43 @@ void acpi_os_vprintf(const char *fmt, va_list args)
136#endif 185#endif
137} 186}
138 187
139acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) 188acpi_physical_address __init acpi_os_get_root_pointer(void)
140{ 189{
141 if (efi_enabled) { 190 if (efi_enabled) {
142 addr->pointer_type = ACPI_PHYSICAL_POINTER;
143 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 191 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
144 addr->pointer.physical = efi.acpi20; 192 return efi.acpi20;
145 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 193 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
146 addr->pointer.physical = efi.acpi; 194 return efi.acpi;
147 else { 195 else {
148 printk(KERN_ERR PREFIX 196 printk(KERN_ERR PREFIX
149 "System description tables not found\n"); 197 "System description tables not found\n");
150 return AE_NOT_FOUND; 198 return 0;
151 } 199 }
152 } else { 200 } else
153 if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) { 201 return acpi_find_rsdp();
154 printk(KERN_ERR PREFIX
155 "System description tables not found\n");
156 return AE_NOT_FOUND;
157 }
158 }
159
160 return AE_OK;
161} 202}
162 203
163acpi_status 204void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
164acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
165 void __iomem ** virt)
166{ 205{
167 if (phys > ULONG_MAX) { 206 if (phys > ULONG_MAX) {
168 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 207 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
169 return AE_BAD_PARAMETER; 208 return 0;
170 } 209 }
171 /* 210 if (acpi_gbl_permanent_mmap)
172 * ioremap checks to ensure this is in reserved space 211 /*
173 */ 212 * ioremap checks to ensure this is in reserved space
174 *virt = ioremap((unsigned long)phys, size); 213 */
175 214 return ioremap((unsigned long)phys, size);
176 if (!*virt) 215 else
177 return AE_NO_MEMORY; 216 return __acpi_map_table((unsigned long)phys, size);
178
179 return AE_OK;
180} 217}
181EXPORT_SYMBOL_GPL(acpi_os_map_memory); 218EXPORT_SYMBOL_GPL(acpi_os_map_memory);
182 219
183void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 220void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
184{ 221{
185 iounmap(virt); 222 if (acpi_gbl_permanent_mmap) {
223 iounmap(virt);
224 }
186} 225}
187EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 226EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
188 227
@@ -254,7 +293,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
254 * FADT. It may not be the same if an interrupt source override exists 293 * FADT. It may not be the same if an interrupt source override exists
255 * for the SCI. 294 * for the SCI.
256 */ 295 */
257 gsi = acpi_fadt.sci_int; 296 gsi = acpi_gbl_FADT.sci_interrupt;
258 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 297 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
259 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 298 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
260 gsi); 299 gsi);
diff --git a/drivers/acpi/parser/psargs.c b/drivers/acpi/parser/psargs.c
index bf88e076c3e9..c2b9835c890b 100644
--- a/drivers/acpi/parser/psargs.c
+++ b/drivers/acpi/parser/psargs.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psloop.c b/drivers/acpi/parser/psloop.c
index e1541db3753a..773aee82fbb8 100644
--- a/drivers/acpi/parser/psloop.c
+++ b/drivers/acpi/parser/psloop.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,11 @@
42 */ 42 */
43 43
44/* 44/*
45 * Parse the AML and build an operation tree as most interpreters, 45 * Parse the AML and build an operation tree as most interpreters, (such as
46 * like Perl, do. Parsing is done by hand rather than with a YACC 46 * Perl) do. Parsing is done by hand rather than with a YACC generated parser
47 * generated parser to tightly constrain stack and dynamic memory 47 * to tightly constrain stack and dynamic memory usage. Parsing is kept
48 * usage. At the same time, parsing is kept flexible and the code 48 * flexible and the code fairly compact by parsing based on a list of AML
49 * fairly compact by parsing based on a list of AML opcode 49 * opcode templates in aml_op_info[].
50 * templates in aml_op_info[]
51 */ 50 */
52 51
53#include <acpi/acpi.h> 52#include <acpi/acpi.h>
@@ -60,766 +59,679 @@ ACPI_MODULE_NAME("psloop")
60 59
61static u32 acpi_gbl_depth = 0; 60static u32 acpi_gbl_depth = 0;
62 61
62/* Local prototypes */
63
64static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state);
65
66static acpi_status
67acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
68 u8 * aml_op_start,
69 union acpi_parse_object *unnamed_op,
70 union acpi_parse_object **op);
71
72static acpi_status
73acpi_ps_create_op(struct acpi_walk_state *walk_state,
74 u8 * aml_op_start, union acpi_parse_object **new_op);
75
76static acpi_status
77acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
78 u8 * aml_op_start, union acpi_parse_object *op);
79
80static acpi_status
81acpi_ps_complete_op(struct acpi_walk_state *walk_state,
82 union acpi_parse_object **op, acpi_status status);
83
84static acpi_status
85acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
86 union acpi_parse_object *op, acpi_status status);
87
63/******************************************************************************* 88/*******************************************************************************
64 * 89 *
65 * FUNCTION: acpi_ps_parse_loop 90 * FUNCTION: acpi_ps_get_aml_opcode
66 * 91 *
67 * PARAMETERS: walk_state - Current state 92 * PARAMETERS: walk_state - Current state
68 * 93 *
69 * RETURN: Status 94 * RETURN: Status
70 * 95 *
71 * DESCRIPTION: Parse AML (pointed to by the current parser state) and return 96 * DESCRIPTION: Extract the next AML opcode from the input stream.
72 * a tree of ops.
73 * 97 *
74 ******************************************************************************/ 98 ******************************************************************************/
75 99
76acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) 100static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
77{ 101{
78 acpi_status status = AE_OK;
79 acpi_status status2;
80 union acpi_parse_object *op = NULL; /* current op */
81 union acpi_parse_object *arg = NULL;
82 union acpi_parse_object *pre_op = NULL;
83 struct acpi_parse_state *parser_state;
84 u8 *aml_op_start = NULL;
85 102
86 ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state); 103 ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
87 104
88 if (walk_state->descending_callback == NULL) { 105 walk_state->aml_offset =
89 return_ACPI_STATUS(AE_BAD_PARAMETER); 106 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
90 } 107 walk_state->parser_state.aml_start);
108 walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
91 109
92 parser_state = &walk_state->parser_state; 110 /*
93 walk_state->arg_types = 0; 111 * First cut to determine what we have found:
112 * 1) A valid AML opcode
113 * 2) A name string
114 * 3) An unknown/invalid opcode
115 */
116 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
94 117
95#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY)) 118 switch (walk_state->op_info->class) {
119 case AML_CLASS_ASCII:
120 case AML_CLASS_PREFIX:
121 /*
122 * Starts with a valid prefix or ASCII char, this is a name
123 * string. Convert the bare name string to a namepath.
124 */
125 walk_state->opcode = AML_INT_NAMEPATH_OP;
126 walk_state->arg_types = ARGP_NAMESTRING;
127 break;
96 128
97 if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) { 129 case AML_CLASS_UNKNOWN:
98 130
99 /* We are restarting a preempted control method */ 131 /* The opcode is unrecognized. Just skip unknown opcodes */
100 132
101 if (acpi_ps_has_completed_scope(parser_state)) { 133 ACPI_ERROR((AE_INFO,
102 /* 134 "Found unknown opcode %X at AML address %p offset %X, ignoring",
103 * We must check if a predicate to an IF or WHILE statement 135 walk_state->opcode, walk_state->parser_state.aml,
104 * was just completed 136 walk_state->aml_offset));
105 */
106 if ((parser_state->scope->parse_scope.op) &&
107 ((parser_state->scope->parse_scope.op->common.
108 aml_opcode == AML_IF_OP)
109 || (parser_state->scope->parse_scope.op->common.
110 aml_opcode == AML_WHILE_OP))
111 && (walk_state->control_state)
112 && (walk_state->control_state->common.state ==
113 ACPI_CONTROL_PREDICATE_EXECUTING)) {
114 /*
115 * A predicate was just completed, get the value of the
116 * predicate and branch based on that value
117 */
118 walk_state->op = NULL;
119 status =
120 acpi_ds_get_predicate_value(walk_state,
121 ACPI_TO_POINTER
122 (TRUE));
123 if (ACPI_FAILURE(status)
124 && ((status & AE_CODE_MASK) !=
125 AE_CODE_CONTROL)) {
126 if (status == AE_AML_NO_RETURN_VALUE) {
127 ACPI_EXCEPTION((AE_INFO, status,
128 "Invoked method did not return a value"));
129 137
130 } 138 ACPI_DUMP_BUFFER(walk_state->parser_state.aml, 128);
131 ACPI_EXCEPTION((AE_INFO, status,
132 "GetPredicate Failed"));
133 return_ACPI_STATUS(status);
134 }
135 139
136 status = 140 /* Assume one-byte bad opcode */
137 acpi_ps_next_parse_state(walk_state, op,
138 status);
139 }
140 141
141 acpi_ps_pop_scope(parser_state, &op, 142 walk_state->parser_state.aml++;
142 &walk_state->arg_types, 143 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
143 &walk_state->arg_count);
144 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
145 "Popped scope, Op=%p\n", op));
146 } else if (walk_state->prev_op) {
147 144
148 /* We were in the middle of an op */ 145 default:
149 146
150 op = walk_state->prev_op; 147 /* Found opcode info, this is a normal opcode */
151 walk_state->arg_types = walk_state->prev_arg_types; 148
152 } 149 walk_state->parser_state.aml +=
150 acpi_ps_get_opcode_size(walk_state->opcode);
151 walk_state->arg_types = walk_state->op_info->parse_args;
152 break;
153 } 153 }
154#endif
155 154
156 /* Iterative parsing loop, while there is more AML to process: */ 155 return_ACPI_STATUS(AE_OK);
156}
157 157
158 while ((parser_state->aml < parser_state->aml_end) || (op)) { 158/*******************************************************************************
159 aml_op_start = parser_state->aml; 159 *
160 if (!op) { 160 * FUNCTION: acpi_ps_build_named_op
161 *
162 * PARAMETERS: walk_state - Current state
163 * aml_op_start - Begin of named Op in AML
164 * unnamed_op - Early Op (not a named Op)
165 * Op - Returned Op
166 *
167 * RETURN: Status
168 *
169 * DESCRIPTION: Parse a named Op
170 *
171 ******************************************************************************/
161 172
162 /* Get the next opcode from the AML stream */ 173static acpi_status
174acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
175 u8 * aml_op_start,
176 union acpi_parse_object *unnamed_op,
177 union acpi_parse_object **op)
178{
179 acpi_status status = AE_OK;
180 union acpi_parse_object *arg = NULL;
163 181
164 walk_state->aml_offset = 182 ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
165 (u32) ACPI_PTR_DIFF(parser_state->aml,
166 parser_state->aml_start);
167 walk_state->opcode = acpi_ps_peek_opcode(parser_state);
168 183
169 /* 184 unnamed_op->common.value.arg = NULL;
170 * First cut to determine what we have found: 185 unnamed_op->common.aml_opcode = walk_state->opcode;
171 * 1) A valid AML opcode
172 * 2) A name string
173 * 3) An unknown/invalid opcode
174 */
175 walk_state->op_info =
176 acpi_ps_get_opcode_info(walk_state->opcode);
177 switch (walk_state->op_info->class) {
178 case AML_CLASS_ASCII:
179 case AML_CLASS_PREFIX:
180 /*
181 * Starts with a valid prefix or ASCII char, this is a name
182 * string. Convert the bare name string to a namepath.
183 */
184 walk_state->opcode = AML_INT_NAMEPATH_OP;
185 walk_state->arg_types = ARGP_NAMESTRING;
186 break;
187 186
188 case AML_CLASS_UNKNOWN: 187 /*
188 * Get and append arguments until we find the node that contains
189 * the name (the type ARGP_NAME).
190 */
191 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types) &&
192 (GET_CURRENT_ARG_TYPE(walk_state->arg_types) != ARGP_NAME)) {
193 status =
194 acpi_ps_get_next_arg(walk_state,
195 &(walk_state->parser_state),
196 GET_CURRENT_ARG_TYPE(walk_state->
197 arg_types), &arg);
198 if (ACPI_FAILURE(status)) {
199 return_ACPI_STATUS(status);
200 }
189 201
190 /* The opcode is unrecognized. Just skip unknown opcodes */ 202 acpi_ps_append_arg(unnamed_op, arg);
203 INCREMENT_ARG_LIST(walk_state->arg_types);
204 }
191 205
192 ACPI_ERROR((AE_INFO, 206 /*
193 "Found unknown opcode %X at AML address %p offset %X, ignoring", 207 * Make sure that we found a NAME and didn't run out of arguments
194 walk_state->opcode, 208 */
195 parser_state->aml, 209 if (!GET_CURRENT_ARG_TYPE(walk_state->arg_types)) {
196 walk_state->aml_offset)); 210 return_ACPI_STATUS(AE_AML_NO_OPERAND);
211 }
197 212
198 ACPI_DUMP_BUFFER(parser_state->aml, 128); 213 /* We know that this arg is a name, move to next arg */
199 214
200 /* Assume one-byte bad opcode */ 215 INCREMENT_ARG_LIST(walk_state->arg_types);
201 216
202 parser_state->aml++; 217 /*
203 continue; 218 * Find the object. This will either insert the object into
219 * the namespace or simply look it up
220 */
221 walk_state->op = NULL;
204 222
205 default: 223 status = walk_state->descending_callback(walk_state, op);
224 if (ACPI_FAILURE(status)) {
225 ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog"));
226 return_ACPI_STATUS(status);
227 }
206 228
207 /* Found opcode info, this is a normal opcode */ 229 if (!*op) {
230 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
231 }
208 232
209 parser_state->aml += 233 status = acpi_ps_next_parse_state(walk_state, *op, status);
210 acpi_ps_get_opcode_size(walk_state->opcode); 234 if (ACPI_FAILURE(status)) {
211 walk_state->arg_types = 235 if (status == AE_CTRL_PENDING) {
212 walk_state->op_info->parse_args; 236 return_ACPI_STATUS(AE_CTRL_PARSE_PENDING);
213 break; 237 }
214 } 238 return_ACPI_STATUS(status);
239 }
215 240
216 /* Create Op structure and append to parent's argument list */ 241 acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
242 acpi_gbl_depth++;
217 243
218 if (walk_state->op_info->flags & AML_NAMED) { 244 if ((*op)->common.aml_opcode == AML_REGION_OP) {
245 /*
246 * Defer final parsing of an operation_region body, because we don't
247 * have enough info in the first pass to parse it correctly (i.e.,
248 * there may be method calls within the term_arg elements of the body.)
249 *
250 * However, we must continue parsing because the opregion is not a
251 * standalone package -- we don't know where the end is at this point.
252 *
253 * (Length is unknown until parse of the body complete)
254 */
255 (*op)->named.data = aml_op_start;
256 (*op)->named.length = 0;
257 }
219 258
220 /* Allocate a new pre_op if necessary */ 259 return_ACPI_STATUS(AE_OK);
260}
221 261
222 if (!pre_op) { 262/*******************************************************************************
223 pre_op = 263 *
224 acpi_ps_alloc_op(walk_state-> 264 * FUNCTION: acpi_ps_create_op
225 opcode); 265 *
226 if (!pre_op) { 266 * PARAMETERS: walk_state - Current state
227 status = AE_NO_MEMORY; 267 * aml_op_start - Op start in AML
228 goto close_this_op; 268 * new_op - Returned Op
229 } 269 *
230 } 270 * RETURN: Status
271 *
272 * DESCRIPTION: Get Op from AML
273 *
274 ******************************************************************************/
231 275
232 pre_op->common.value.arg = NULL; 276static acpi_status
233 pre_op->common.aml_opcode = walk_state->opcode; 277acpi_ps_create_op(struct acpi_walk_state *walk_state,
278 u8 * aml_op_start, union acpi_parse_object **new_op)
279{
280 acpi_status status = AE_OK;
281 union acpi_parse_object *op;
282 union acpi_parse_object *named_op = NULL;
234 283
235 /* 284 ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
236 * Get and append arguments until we find the node that contains
237 * the name (the type ARGP_NAME).
238 */
239 while (GET_CURRENT_ARG_TYPE
240 (walk_state->arg_types)
241 &&
242 (GET_CURRENT_ARG_TYPE
243 (walk_state->arg_types) != ARGP_NAME)) {
244 status =
245 acpi_ps_get_next_arg(walk_state,
246 parser_state,
247 GET_CURRENT_ARG_TYPE
248 (walk_state->
249 arg_types),
250 &arg);
251 if (ACPI_FAILURE(status)) {
252 goto close_this_op;
253 }
254 285
255 acpi_ps_append_arg(pre_op, arg); 286 status = acpi_ps_get_aml_opcode(walk_state);
256 INCREMENT_ARG_LIST(walk_state-> 287 if (status == AE_CTRL_PARSE_CONTINUE) {
257 arg_types); 288 return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
258 } 289 }
259 290
260 /* 291 /* Create Op structure and append to parent's argument list */
261 * Make sure that we found a NAME and didn't run out of
262 * arguments
263 */
264 if (!GET_CURRENT_ARG_TYPE
265 (walk_state->arg_types)) {
266 status = AE_AML_NO_OPERAND;
267 goto close_this_op;
268 }
269 292
270 /* We know that this arg is a name, move to next arg */ 293 walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
294 op = acpi_ps_alloc_op(walk_state->opcode);
295 if (!op) {
296 return_ACPI_STATUS(AE_NO_MEMORY);
297 }
271 298
272 INCREMENT_ARG_LIST(walk_state->arg_types); 299 if (walk_state->op_info->flags & AML_NAMED) {
300 status =
301 acpi_ps_build_named_op(walk_state, aml_op_start, op,
302 &named_op);
303 acpi_ps_free_op(op);
304 if (ACPI_FAILURE(status)) {
305 return_ACPI_STATUS(status);
306 }
273 307
274 /* 308 *new_op = named_op;
275 * Find the object. This will either insert the object into 309 return_ACPI_STATUS(AE_OK);
276 * the namespace or simply look it up 310 }
277 */
278 walk_state->op = NULL;
279 311
280 status = 312 /* Not a named opcode, just allocate Op and append to parent */
281 walk_state->descending_callback(walk_state,
282 &op);
283 if (ACPI_FAILURE(status)) {
284 ACPI_EXCEPTION((AE_INFO, status,
285 "During name lookup/catalog"));
286 goto close_this_op;
287 }
288 313
289 if (!op) { 314 if (walk_state->op_info->flags & AML_CREATE) {
290 continue; 315 /*
291 } 316 * Backup to beginning of create_xXXfield declaration
317 * body_length is unknown until we parse the body
318 */
319 op->named.data = aml_op_start;
320 op->named.length = 0;
321 }
292 322
293 status = 323 acpi_ps_append_arg(acpi_ps_get_parent_scope
294 acpi_ps_next_parse_state(walk_state, op, 324 (&(walk_state->parser_state)), op);
295 status);
296 if (status == AE_CTRL_PENDING) {
297 status = AE_OK;
298 goto close_this_op;
299 }
300 325
301 if (ACPI_FAILURE(status)) { 326 if (walk_state->descending_callback != NULL) {
302 goto close_this_op; 327 /*
303 } 328 * Find the object. This will either insert the object into
329 * the namespace or simply look it up
330 */
331 walk_state->op = *new_op = op;
304 332
305 acpi_ps_append_arg(op, 333 status = walk_state->descending_callback(walk_state, &op);
306 pre_op->common.value.arg); 334 status = acpi_ps_next_parse_state(walk_state, op, status);
307 acpi_gbl_depth++; 335 if (status == AE_CTRL_PENDING) {
308 336 status = AE_CTRL_PARSE_PENDING;
309 if (op->common.aml_opcode == AML_REGION_OP) { 337 }
310 /* 338 }
311 * Defer final parsing of an operation_region body,
312 * because we don't have enough info in the first pass
313 * to parse it correctly (i.e., there may be method
314 * calls within the term_arg elements of the body.)
315 *
316 * However, we must continue parsing because
317 * the opregion is not a standalone package --
318 * we don't know where the end is at this point.
319 *
320 * (Length is unknown until parse of the body complete)
321 */
322 op->named.data = aml_op_start;
323 op->named.length = 0;
324 }
325 } else {
326 /* Not a named opcode, just allocate Op and append to parent */
327 339
328 walk_state->op_info = 340 return_ACPI_STATUS(status);
329 acpi_ps_get_opcode_info(walk_state->opcode); 341}
330 op = acpi_ps_alloc_op(walk_state->opcode);
331 if (!op) {
332 status = AE_NO_MEMORY;
333 goto close_this_op;
334 }
335 342
336 if (walk_state->op_info->flags & AML_CREATE) { 343/*******************************************************************************
337 /* 344 *
338 * Backup to beginning of create_xXXfield declaration 345 * FUNCTION: acpi_ps_get_arguments
339 * body_length is unknown until we parse the body 346 *
340 */ 347 * PARAMETERS: walk_state - Current state
341 op->named.data = aml_op_start; 348 * aml_op_start - Op start in AML
342 op->named.length = 0; 349 * Op - Current Op
343 } 350 *
351 * RETURN: Status
352 *
353 * DESCRIPTION: Get arguments for passed Op.
354 *
355 ******************************************************************************/
344 356
345 acpi_ps_append_arg(acpi_ps_get_parent_scope 357static acpi_status
346 (parser_state), op); 358acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
359 u8 * aml_op_start, union acpi_parse_object *op)
360{
361 acpi_status status = AE_OK;
362 union acpi_parse_object *arg = NULL;
347 363
348 if ((walk_state->descending_callback != NULL)) { 364 ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state);
349 /*
350 * Find the object. This will either insert the object into
351 * the namespace or simply look it up
352 */
353 walk_state->op = op;
354 365
355 status = 366 switch (op->common.aml_opcode) {
356 walk_state-> 367 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
357 descending_callback(walk_state, 368 case AML_WORD_OP: /* AML_WORDDATA_ARG */
358 &op); 369 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
359 status = 370 case AML_QWORD_OP: /* AML_QWORDATA_ARG */
360 acpi_ps_next_parse_state(walk_state, 371 case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
361 op,
362 status);
363 if (status == AE_CTRL_PENDING) {
364 status = AE_OK;
365 goto close_this_op;
366 }
367 372
368 if (ACPI_FAILURE(status)) { 373 /* Fill in constant or string argument directly */
369 goto close_this_op;
370 }
371 }
372 }
373 374
374 op->common.aml_offset = walk_state->aml_offset; 375 acpi_ps_get_next_simple_arg(&(walk_state->parser_state),
376 GET_CURRENT_ARG_TYPE(walk_state->
377 arg_types),
378 op);
379 break;
375 380
376 if (walk_state->op_info) { 381 case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
377 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 382
378 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n", 383 status =
379 (u32) op->common.aml_opcode, 384 acpi_ps_get_next_namepath(walk_state,
380 walk_state->op_info->name, op, 385 &(walk_state->parser_state), op,
381 parser_state->aml, 386 1);
382 op->common.aml_offset)); 387 if (ACPI_FAILURE(status)) {
383 } 388 return_ACPI_STATUS(status);
384 } 389 }
385 390
391 walk_state->arg_types = 0;
392 break;
393
394 default:
386 /* 395 /*
387 * Start arg_count at zero because we don't know if there are 396 * Op is not a constant or string, append each argument to the Op
388 * any args yet
389 */ 397 */
390 walk_state->arg_count = 0; 398 while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
391 399 && !walk_state->arg_count) {
392 /* Are there any arguments that must be processed? */ 400 walk_state->aml_offset =
393 401 (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
394 if (walk_state->arg_types) { 402 walk_state->parser_state.
395 403 aml_start);
396 /* Get arguments */
397
398 switch (op->common.aml_opcode) {
399 case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
400 case AML_WORD_OP: /* AML_WORDDATA_ARG */
401 case AML_DWORD_OP: /* AML_DWORDATA_ARG */
402 case AML_QWORD_OP: /* AML_QWORDATA_ARG */
403 case AML_STRING_OP: /* AML_ASCIICHARLIST_ARG */
404
405 /* Fill in constant or string argument directly */
406
407 acpi_ps_get_next_simple_arg(parser_state,
408 GET_CURRENT_ARG_TYPE
409 (walk_state->
410 arg_types), op);
411 break;
412
413 case AML_INT_NAMEPATH_OP: /* AML_NAMESTRING_ARG */
414
415 status =
416 acpi_ps_get_next_namepath(walk_state,
417 parser_state, op,
418 1);
419 if (ACPI_FAILURE(status)) {
420 goto close_this_op;
421 }
422
423 walk_state->arg_types = 0;
424 break;
425 404
426 default: 405 status =
427 /* 406 acpi_ps_get_next_arg(walk_state,
428 * Op is not a constant or string, append each argument 407 &(walk_state->parser_state),
429 * to the Op 408 GET_CURRENT_ARG_TYPE
430 */ 409 (walk_state->arg_types), &arg);
431 while (GET_CURRENT_ARG_TYPE 410 if (ACPI_FAILURE(status)) {
432 (walk_state->arg_types) 411 return_ACPI_STATUS(status);
433 && !walk_state->arg_count) { 412 }
434 walk_state->aml_offset = (u32)
435 ACPI_PTR_DIFF(parser_state->aml,
436 parser_state->
437 aml_start);
438 413
439 status = 414 if (arg) {
440 acpi_ps_get_next_arg(walk_state, 415 arg->common.aml_offset = walk_state->aml_offset;
441 parser_state, 416 acpi_ps_append_arg(op, arg);
442 GET_CURRENT_ARG_TYPE 417 }
443 (walk_state->
444 arg_types),
445 &arg);
446 if (ACPI_FAILURE(status)) {
447 goto close_this_op;
448 }
449 418
450 if (arg) { 419 INCREMENT_ARG_LIST(walk_state->arg_types);
451 arg->common.aml_offset = 420 }
452 walk_state->aml_offset;
453 acpi_ps_append_arg(op, arg);
454 }
455 INCREMENT_ARG_LIST(walk_state->
456 arg_types);
457 }
458 421
459 /* Special processing for certain opcodes */ 422 /* Special processing for certain opcodes */
460 423
461 /* TBD (remove): Temporary mechanism to disable this code if needed */ 424 /* TBD (remove): Temporary mechanism to disable this code if needed */
462 425
463#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE 426#ifdef ACPI_ENABLE_MODULE_LEVEL_CODE
464 427
465 if ((walk_state->pass_number <= 428 if ((walk_state->pass_number <= ACPI_IMODE_LOAD_PASS1) &&
466 ACPI_IMODE_LOAD_PASS1) 429 ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) {
467 && 430 /*
468 ((walk_state-> 431 * We want to skip If/Else/While constructs during Pass1 because we
469 parse_flags & ACPI_PARSE_DISASSEMBLE) == 432 * want to actually conditionally execute the code during Pass2.
470 0)) { 433 *
471 /* 434 * Except for disassembly, where we always want to walk the
472 * We want to skip If/Else/While constructs during Pass1 435 * If/Else/While packages
473 * because we want to actually conditionally execute the 436 */
474 * code during Pass2. 437 switch (op->common.aml_opcode) {
475 * 438 case AML_IF_OP:
476 * Except for disassembly, where we always want to 439 case AML_ELSE_OP:
477 * walk the If/Else/While packages 440 case AML_WHILE_OP:
478 */
479 switch (op->common.aml_opcode) {
480 case AML_IF_OP:
481 case AML_ELSE_OP:
482 case AML_WHILE_OP:
483
484 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
485 "Pass1: Skipping an If/Else/While body\n"));
486
487 /* Skip body of if/else/while in pass 1 */
488
489 parser_state->aml =
490 parser_state->pkg_end;
491 walk_state->arg_count = 0;
492 break;
493
494 default:
495 break;
496 }
497 }
498#endif
499 switch (op->common.aml_opcode) {
500 case AML_METHOD_OP:
501
502 /*
503 * Skip parsing of control method
504 * because we don't have enough info in the first pass
505 * to parse it correctly.
506 *
507 * Save the length and address of the body
508 */
509 op->named.data = parser_state->aml;
510 op->named.length =
511 (u32) (parser_state->pkg_end -
512 parser_state->aml);
513
514 /* Skip body of method */
515
516 parser_state->aml =
517 parser_state->pkg_end;
518 walk_state->arg_count = 0;
519 break;
520
521 case AML_BUFFER_OP:
522 case AML_PACKAGE_OP:
523 case AML_VAR_PACKAGE_OP:
524
525 if ((op->common.parent) &&
526 (op->common.parent->common.
527 aml_opcode == AML_NAME_OP)
528 && (walk_state->pass_number <=
529 ACPI_IMODE_LOAD_PASS2)) {
530 /*
531 * Skip parsing of Buffers and Packages
532 * because we don't have enough info in the first pass
533 * to parse them correctly.
534 */
535 op->named.data = aml_op_start;
536 op->named.length =
537 (u32) (parser_state->
538 pkg_end -
539 aml_op_start);
540
541 /* Skip body */
542
543 parser_state->aml =
544 parser_state->pkg_end;
545 walk_state->arg_count = 0;
546 }
547 break;
548 441
549 case AML_WHILE_OP: 442 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
443 "Pass1: Skipping an If/Else/While body\n"));
550 444
551 if (walk_state->control_state) { 445 /* Skip body of if/else/while in pass 1 */
552 walk_state->control_state->
553 control.package_end =
554 parser_state->pkg_end;
555 }
556 break;
557 446
558 default: 447 walk_state->parser_state.aml =
448 walk_state->parser_state.pkg_end;
449 walk_state->arg_count = 0;
450 break;
559 451
560 /* No action for all other opcodes */ 452 default:
561 break;
562 }
563 break; 453 break;
564 } 454 }
565 } 455 }
456#endif
566 457
567 /* Check for arguments that need to be processed */ 458 switch (op->common.aml_opcode) {
568 459 case AML_METHOD_OP:
569 if (walk_state->arg_count) {
570 /* 460 /*
571 * There are arguments (complex ones), push Op and 461 * Skip parsing of control method because we don't have enough
572 * prepare for argument 462 * info in the first pass to parse it correctly.
463 *
464 * Save the length and address of the body
573 */ 465 */
574 status = acpi_ps_push_scope(parser_state, op, 466 op->named.data = walk_state->parser_state.aml;
575 walk_state->arg_types, 467 op->named.length = (u32)
576 walk_state->arg_count); 468 (walk_state->parser_state.pkg_end -
577 if (ACPI_FAILURE(status)) { 469 walk_state->parser_state.aml);
578 goto close_this_op;
579 }
580 op = NULL;
581 continue;
582 }
583 470
584 /* 471 /* Skip body of method */
585 * All arguments have been processed -- Op is complete,
586 * prepare for next
587 */
588 walk_state->op_info =
589 acpi_ps_get_opcode_info(op->common.aml_opcode);
590 if (walk_state->op_info->flags & AML_NAMED) {
591 if (acpi_gbl_depth) {
592 acpi_gbl_depth--;
593 }
594 472
595 if (op->common.aml_opcode == AML_REGION_OP) { 473 walk_state->parser_state.aml =
474 walk_state->parser_state.pkg_end;
475 walk_state->arg_count = 0;
476 break;
477
478 case AML_BUFFER_OP:
479 case AML_PACKAGE_OP:
480 case AML_VAR_PACKAGE_OP:
481
482 if ((op->common.parent) &&
483 (op->common.parent->common.aml_opcode ==
484 AML_NAME_OP)
485 && (walk_state->pass_number <=
486 ACPI_IMODE_LOAD_PASS2)) {
596 /* 487 /*
597 * Skip parsing of control method or opregion body, 488 * Skip parsing of Buffers and Packages because we don't have
598 * because we don't have enough info in the first pass 489 * enough info in the first pass to parse them correctly.
599 * to parse them correctly.
600 *
601 * Completed parsing an op_region declaration, we now
602 * know the length.
603 */ 490 */
604 op->named.length = 491 op->named.data = aml_op_start;
605 (u32) (parser_state->aml - op->named.data); 492 op->named.length = (u32)
606 } 493 (walk_state->parser_state.pkg_end -
607 } 494 aml_op_start);
608 495
609 if (walk_state->op_info->flags & AML_CREATE) { 496 /* Skip body */
610 /*
611 * Backup to beginning of create_xXXfield declaration (1 for
612 * Opcode)
613 *
614 * body_length is unknown until we parse the body
615 */
616 op->named.length =
617 (u32) (parser_state->aml - op->named.data);
618 }
619 497
620 /* This op complete, notify the dispatcher */ 498 walk_state->parser_state.aml =
499 walk_state->parser_state.pkg_end;
500 walk_state->arg_count = 0;
501 }
502 break;
621 503
622 if (walk_state->ascending_callback != NULL) { 504 case AML_WHILE_OP:
623 walk_state->op = op;
624 walk_state->opcode = op->common.aml_opcode;
625 505
626 status = walk_state->ascending_callback(walk_state); 506 if (walk_state->control_state) {
627 status = 507 walk_state->control_state->control.package_end =
628 acpi_ps_next_parse_state(walk_state, op, status); 508 walk_state->parser_state.pkg_end;
629 if (status == AE_CTRL_PENDING) {
630 status = AE_OK;
631 goto close_this_op;
632 } 509 }
633 } 510 break;
634
635 close_this_op:
636 /*
637 * Finished one argument of the containing scope
638 */
639 parser_state->scope->parse_scope.arg_count--;
640 511
641 /* Finished with pre_op */ 512 default:
642 513
643 if (pre_op) { 514 /* No action for all other opcodes */
644 acpi_ps_free_op(pre_op); 515 break;
645 pre_op = NULL;
646 } 516 }
647 517
648 /* Close this Op (will result in parse subtree deletion) */ 518 break;
519 }
649 520
650 status2 = acpi_ps_complete_this_op(walk_state, op); 521 return_ACPI_STATUS(AE_OK);
651 if (ACPI_FAILURE(status2)) { 522}
652 return_ACPI_STATUS(status2);
653 }
654 op = NULL;
655 523
656 switch (status) { 524/*******************************************************************************
657 case AE_OK: 525 *
658 break; 526 * FUNCTION: acpi_ps_complete_op
527 *
528 * PARAMETERS: walk_state - Current state
529 * Op - Returned Op
530 * Status - Parse status before complete Op
531 *
532 * RETURN: Status
533 *
534 * DESCRIPTION: Complete Op
535 *
536 ******************************************************************************/
659 537
660 case AE_CTRL_TRANSFER: 538static acpi_status
539acpi_ps_complete_op(struct acpi_walk_state *walk_state,
540 union acpi_parse_object **op, acpi_status status)
541{
542 acpi_status status2;
661 543
662 /* We are about to transfer to a called method. */ 544 ACPI_FUNCTION_TRACE_PTR(ps_complete_op, walk_state);
663 545
664 walk_state->prev_op = op; 546 /*
665 walk_state->prev_arg_types = walk_state->arg_types; 547 * Finished one argument of the containing scope
666 return_ACPI_STATUS(status); 548 */
549 walk_state->parser_state.scope->parse_scope.arg_count--;
667 550
668 case AE_CTRL_END: 551 /* Close this Op (will result in parse subtree deletion) */
669 552
670 acpi_ps_pop_scope(parser_state, &op, 553 status2 = acpi_ps_complete_this_op(walk_state, *op);
671 &walk_state->arg_types, 554 if (ACPI_FAILURE(status2)) {
672 &walk_state->arg_count); 555 return_ACPI_STATUS(status2);
556 }
673 557
674 if (op) { 558 *op = NULL;
675 walk_state->op = op;
676 walk_state->op_info =
677 acpi_ps_get_opcode_info(op->common.
678 aml_opcode);
679 walk_state->opcode = op->common.aml_opcode;
680 559
681 status = 560 switch (status) {
682 walk_state->ascending_callback(walk_state); 561 case AE_OK:
683 status = 562 break;
684 acpi_ps_next_parse_state(walk_state, op,
685 status);
686 563
687 status2 = 564 case AE_CTRL_TRANSFER:
688 acpi_ps_complete_this_op(walk_state, op);
689 if (ACPI_FAILURE(status2)) {
690 return_ACPI_STATUS(status2);
691 }
692 op = NULL;
693 }
694 status = AE_OK;
695 break;
696 565
697 case AE_CTRL_BREAK: 566 /* We are about to transfer to a called method */
698 case AE_CTRL_CONTINUE:
699 567
700 /* Pop off scopes until we find the While */ 568 walk_state->prev_op = NULL;
569 walk_state->prev_arg_types = walk_state->arg_types;
570 return_ACPI_STATUS(status);
701 571
702 while (!op || (op->common.aml_opcode != AML_WHILE_OP)) { 572 case AE_CTRL_END:
703 acpi_ps_pop_scope(parser_state, &op,
704 &walk_state->arg_types,
705 &walk_state->arg_count);
706 573
707 if (op->common.aml_opcode != AML_WHILE_OP) { 574 acpi_ps_pop_scope(&(walk_state->parser_state), op,
708 status2 = 575 &walk_state->arg_types,
709 acpi_ds_result_stack_pop 576 &walk_state->arg_count);
710 (walk_state);
711 if (ACPI_FAILURE(status2)) {
712 return_ACPI_STATUS(status2);
713 }
714 }
715 }
716
717 /* Close this iteration of the While loop */
718 577
719 walk_state->op = op; 578 if (*op) {
579 walk_state->op = *op;
720 walk_state->op_info = 580 walk_state->op_info =
721 acpi_ps_get_opcode_info(op->common.aml_opcode); 581 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
722 walk_state->opcode = op->common.aml_opcode; 582 walk_state->opcode = (*op)->common.aml_opcode;
723 583
724 status = walk_state->ascending_callback(walk_state); 584 status = walk_state->ascending_callback(walk_state);
725 status = 585 status =
726 acpi_ps_next_parse_state(walk_state, op, status); 586 acpi_ps_next_parse_state(walk_state, *op, status);
727 587
728 status2 = acpi_ps_complete_this_op(walk_state, op); 588 status2 = acpi_ps_complete_this_op(walk_state, *op);
729 if (ACPI_FAILURE(status2)) { 589 if (ACPI_FAILURE(status2)) {
730 return_ACPI_STATUS(status2); 590 return_ACPI_STATUS(status2);
731 } 591 }
732 op = NULL; 592 }
733
734 status = AE_OK;
735 break;
736 593
737 case AE_CTRL_TERMINATE: 594 status = AE_OK;
595 break;
738 596
739 status = AE_OK; 597 case AE_CTRL_BREAK:
598 case AE_CTRL_CONTINUE:
740 599
741 /* Clean up */ 600 /* Pop off scopes until we find the While */
742 do {
743 if (op) {
744 status2 =
745 acpi_ps_complete_this_op(walk_state,
746 op);
747 if (ACPI_FAILURE(status2)) {
748 return_ACPI_STATUS(status2);
749 }
750 601
751 status2 = 602 while (!(*op) || ((*op)->common.aml_opcode != AML_WHILE_OP)) {
752 acpi_ds_result_stack_pop 603 acpi_ps_pop_scope(&(walk_state->parser_state), op,
753 (walk_state); 604 &walk_state->arg_types,
754 if (ACPI_FAILURE(status2)) { 605 &walk_state->arg_count);
755 return_ACPI_STATUS(status2);
756 }
757 606
758 acpi_ut_delete_generic_state 607 if ((*op)->common.aml_opcode != AML_WHILE_OP) {
759 (acpi_ut_pop_generic_state 608 status2 = acpi_ds_result_stack_pop(walk_state);
760 (&walk_state->control_state)); 609 if (ACPI_FAILURE(status2)) {
610 return_ACPI_STATUS(status2);
761 } 611 }
612 }
613 }
762 614
763 acpi_ps_pop_scope(parser_state, &op, 615 /* Close this iteration of the While loop */
764 &walk_state->arg_types,
765 &walk_state->arg_count);
766 616
767 } while (op); 617 walk_state->op = *op;
618 walk_state->op_info =
619 acpi_ps_get_opcode_info((*op)->common.aml_opcode);
620 walk_state->opcode = (*op)->common.aml_opcode;
768 621
769 return_ACPI_STATUS(status); 622 status = walk_state->ascending_callback(walk_state);
623 status = acpi_ps_next_parse_state(walk_state, *op, status);
770 624
771 default: /* All other non-AE_OK status */ 625 status2 = acpi_ps_complete_this_op(walk_state, *op);
626 if (ACPI_FAILURE(status2)) {
627 return_ACPI_STATUS(status2);
628 }
772 629
773 do { 630 status = AE_OK;
774 if (op) { 631 break;
775 status2 = 632
776 acpi_ps_complete_this_op(walk_state, 633 case AE_CTRL_TERMINATE:
777 op); 634
778 if (ACPI_FAILURE(status2)) { 635 /* Clean up */
779 return_ACPI_STATUS(status2); 636 do {
780 } 637 if (*op) {
638 status2 =
639 acpi_ps_complete_this_op(walk_state, *op);
640 if (ACPI_FAILURE(status2)) {
641 return_ACPI_STATUS(status2);
642 }
643 status2 = acpi_ds_result_stack_pop(walk_state);
644 if (ACPI_FAILURE(status2)) {
645 return_ACPI_STATUS(status2);
781 } 646 }
782 647
783 acpi_ps_pop_scope(parser_state, &op, 648 acpi_ut_delete_generic_state
784 &walk_state->arg_types, 649 (acpi_ut_pop_generic_state
785 &walk_state->arg_count); 650 (&walk_state->control_state));
651 }
786 652
787 } while (op); 653 acpi_ps_pop_scope(&(walk_state->parser_state), op,
654 &walk_state->arg_types,
655 &walk_state->arg_count);
788 656
789 /* 657 } while (*op);
790 * TBD: Cleanup parse ops on error 658
791 */ 659 return_ACPI_STATUS(AE_OK);
792#if 0 660
793 if (op == NULL) { 661 default: /* All other non-AE_OK status */
794 acpi_ps_pop_scope(parser_state, &op, 662
795 &walk_state->arg_types, 663 do {
796 &walk_state->arg_count); 664 if (*op) {
665 status2 =
666 acpi_ps_complete_this_op(walk_state, *op);
667 if (ACPI_FAILURE(status2)) {
668 return_ACPI_STATUS(status2);
669 }
797 } 670 }
798#endif
799 walk_state->prev_op = op;
800 walk_state->prev_arg_types = walk_state->arg_types;
801 return_ACPI_STATUS(status);
802 }
803 671
804 /* This scope complete? */ 672 acpi_ps_pop_scope(&(walk_state->parser_state), op,
673 &walk_state->arg_types,
674 &walk_state->arg_count);
805 675
806 if (acpi_ps_has_completed_scope(parser_state)) { 676 } while (*op);
807 acpi_ps_pop_scope(parser_state, &op, 677
678#if 0
679 /*
680 * TBD: Cleanup parse ops on error
681 */
682 if (*op == NULL) {
683 acpi_ps_pop_scope(parser_state, op,
808 &walk_state->arg_types, 684 &walk_state->arg_types,
809 &walk_state->arg_count); 685 &walk_state->arg_count);
810 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
811 "Popped scope, Op=%p\n", op));
812 } else {
813 op = NULL;
814 } 686 }
687#endif
688 walk_state->prev_op = NULL;
689 walk_state->prev_arg_types = walk_state->arg_types;
690 return_ACPI_STATUS(status);
691 }
815 692
816 } /* while parser_state->Aml */ 693 /* This scope complete? */
694
695 if (acpi_ps_has_completed_scope(&(walk_state->parser_state))) {
696 acpi_ps_pop_scope(&(walk_state->parser_state), op,
697 &walk_state->arg_types,
698 &walk_state->arg_count);
699 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "Popped scope, Op=%p\n", *op));
700 } else {
701 *op = NULL;
702 }
703
704 return_ACPI_STATUS(AE_OK);
705}
706
707/*******************************************************************************
708 *
709 * FUNCTION: acpi_ps_complete_final_op
710 *
711 * PARAMETERS: walk_state - Current state
712 * Op - Current Op
713 * Status - Current parse status before complete last
714 * Op
715 *
716 * RETURN: Status
717 *
718 * DESCRIPTION: Complete last Op.
719 *
720 ******************************************************************************/
721
722static acpi_status
723acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
724 union acpi_parse_object *op, acpi_status status)
725{
726 acpi_status status2;
727
728 ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
817 729
818 /* 730 /*
819 * Complete the last Op (if not completed), and clear the scope stack. 731 * Complete the last Op (if not completed), and clear the scope stack.
820 * It is easily possible to end an AML "package" with an unbounded number 732 * It is easily possible to end an AML "package" with an unbounded number
821 * of open scopes (such as when several ASL blocks are closed with 733 * of open scopes (such as when several ASL blocks are closed with
822 * sequential closing braces). We want to terminate each one cleanly. 734 * sequential closing braces). We want to terminate each one cleanly.
823 */ 735 */
824 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n", 736 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "AML package complete at Op %p\n",
825 op)); 737 op));
@@ -838,8 +750,12 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
838 acpi_ps_next_parse_state(walk_state, op, 750 acpi_ps_next_parse_state(walk_state, op,
839 status); 751 status);
840 if (status == AE_CTRL_PENDING) { 752 if (status == AE_CTRL_PENDING) {
841 status = AE_OK; 753 status =
842 goto close_this_op; 754 acpi_ps_complete_op(walk_state, &op,
755 AE_OK);
756 if (ACPI_FAILURE(status)) {
757 return_ACPI_STATUS(status);
758 }
843 } 759 }
844 760
845 if (status == AE_CTRL_TERMINATE) { 761 if (status == AE_CTRL_TERMINATE) {
@@ -858,7 +774,9 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
858 } 774 }
859 } 775 }
860 776
861 acpi_ps_pop_scope(parser_state, 777 acpi_ps_pop_scope(&
778 (walk_state->
779 parser_state),
862 &op, 780 &op,
863 &walk_state-> 781 &walk_state->
864 arg_types, 782 arg_types,
@@ -887,10 +805,252 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
887 } 805 }
888 } 806 }
889 807
890 acpi_ps_pop_scope(parser_state, &op, &walk_state->arg_types, 808 acpi_ps_pop_scope(&(walk_state->parser_state), &op,
809 &walk_state->arg_types,
891 &walk_state->arg_count); 810 &walk_state->arg_count);
892 811
893 } while (op); 812 } while (op);
894 813
895 return_ACPI_STATUS(status); 814 return_ACPI_STATUS(status);
896} 815}
816
817/*******************************************************************************
818 *
819 * FUNCTION: acpi_ps_parse_loop
820 *
821 * PARAMETERS: walk_state - Current state
822 *
823 * RETURN: Status
824 *
825 * DESCRIPTION: Parse AML (pointed to by the current parser state) and return
826 * a tree of ops.
827 *
828 ******************************************************************************/
829
830acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
831{
832 acpi_status status = AE_OK;
833 union acpi_parse_object *op = NULL; /* current op */
834 struct acpi_parse_state *parser_state;
835 u8 *aml_op_start = NULL;
836
837 ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
838
839 if (walk_state->descending_callback == NULL) {
840 return_ACPI_STATUS(AE_BAD_PARAMETER);
841 }
842
843 parser_state = &walk_state->parser_state;
844 walk_state->arg_types = 0;
845
846#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
847
848 if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
849
850 /* We are restarting a preempted control method */
851
852 if (acpi_ps_has_completed_scope(parser_state)) {
853 /*
854 * We must check if a predicate to an IF or WHILE statement
855 * was just completed
856 */
857 if ((parser_state->scope->parse_scope.op) &&
858 ((parser_state->scope->parse_scope.op->common.
859 aml_opcode == AML_IF_OP)
860 || (parser_state->scope->parse_scope.op->common.
861 aml_opcode == AML_WHILE_OP))
862 && (walk_state->control_state)
863 && (walk_state->control_state->common.state ==
864 ACPI_CONTROL_PREDICATE_EXECUTING)) {
865 /*
866 * A predicate was just completed, get the value of the
867 * predicate and branch based on that value
868 */
869 walk_state->op = NULL;
870 status =
871 acpi_ds_get_predicate_value(walk_state,
872 ACPI_TO_POINTER
873 (TRUE));
874 if (ACPI_FAILURE(status)
875 && ((status & AE_CODE_MASK) !=
876 AE_CODE_CONTROL)) {
877 if (status == AE_AML_NO_RETURN_VALUE) {
878 ACPI_EXCEPTION((AE_INFO, status,
879 "Invoked method did not return a value"));
880
881 }
882
883 ACPI_EXCEPTION((AE_INFO, status,
884 "GetPredicate Failed"));
885 return_ACPI_STATUS(status);
886 }
887
888 status =
889 acpi_ps_next_parse_state(walk_state, op,
890 status);
891 }
892
893 acpi_ps_pop_scope(parser_state, &op,
894 &walk_state->arg_types,
895 &walk_state->arg_count);
896 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
897 "Popped scope, Op=%p\n", op));
898 } else if (walk_state->prev_op) {
899
900 /* We were in the middle of an op */
901
902 op = walk_state->prev_op;
903 walk_state->arg_types = walk_state->prev_arg_types;
904 }
905 }
906#endif
907
908 /* Iterative parsing loop, while there is more AML to process: */
909
910 while ((parser_state->aml < parser_state->aml_end) || (op)) {
911 aml_op_start = parser_state->aml;
912 if (!op) {
913 status =
914 acpi_ps_create_op(walk_state, aml_op_start, &op);
915 if (ACPI_FAILURE(status)) {
916 if (status == AE_CTRL_PARSE_CONTINUE) {
917 continue;
918 }
919
920 if (status == AE_CTRL_PARSE_PENDING) {
921 status = AE_OK;
922 }
923
924 status =
925 acpi_ps_complete_op(walk_state, &op,
926 status);
927 if (ACPI_FAILURE(status)) {
928 return_ACPI_STATUS(status);
929 }
930
931 continue;
932 }
933
934 op->common.aml_offset = walk_state->aml_offset;
935
936 if (walk_state->op_info) {
937 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
938 "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
939 (u32) op->common.aml_opcode,
940 walk_state->op_info->name, op,
941 parser_state->aml,
942 op->common.aml_offset));
943 }
944 }
945
946 /*
947 * Start arg_count at zero because we don't know if there are
948 * any args yet
949 */
950 walk_state->arg_count = 0;
951
952 /* Are there any arguments that must be processed? */
953
954 if (walk_state->arg_types) {
955
956 /* Get arguments */
957
958 status =
959 acpi_ps_get_arguments(walk_state, aml_op_start, op);
960 if (ACPI_FAILURE(status)) {
961 status =
962 acpi_ps_complete_op(walk_state, &op,
963 status);
964 if (ACPI_FAILURE(status)) {
965 return_ACPI_STATUS(status);
966 }
967
968 continue;
969 }
970 }
971
972 /* Check for arguments that need to be processed */
973
974 if (walk_state->arg_count) {
975 /*
976 * There are arguments (complex ones), push Op and
977 * prepare for argument
978 */
979 status = acpi_ps_push_scope(parser_state, op,
980 walk_state->arg_types,
981 walk_state->arg_count);
982 if (ACPI_FAILURE(status)) {
983 status =
984 acpi_ps_complete_op(walk_state, &op,
985 status);
986 if (ACPI_FAILURE(status)) {
987 return_ACPI_STATUS(status);
988 }
989
990 continue;
991 }
992
993 op = NULL;
994 continue;
995 }
996
997 /*
998 * All arguments have been processed -- Op is complete,
999 * prepare for next
1000 */
1001 walk_state->op_info =
1002 acpi_ps_get_opcode_info(op->common.aml_opcode);
1003 if (walk_state->op_info->flags & AML_NAMED) {
1004 if (acpi_gbl_depth) {
1005 acpi_gbl_depth--;
1006 }
1007
1008 if (op->common.aml_opcode == AML_REGION_OP) {
1009 /*
1010 * Skip parsing of control method or opregion body,
1011 * because we don't have enough info in the first pass
1012 * to parse them correctly.
1013 *
1014 * Completed parsing an op_region declaration, we now
1015 * know the length.
1016 */
1017 op->named.length =
1018 (u32) (parser_state->aml - op->named.data);
1019 }
1020 }
1021
1022 if (walk_state->op_info->flags & AML_CREATE) {
1023 /*
1024 * Backup to beginning of create_xXXfield declaration (1 for
1025 * Opcode)
1026 *
1027 * body_length is unknown until we parse the body
1028 */
1029 op->named.length =
1030 (u32) (parser_state->aml - op->named.data);
1031 }
1032
1033 /* This op complete, notify the dispatcher */
1034
1035 if (walk_state->ascending_callback != NULL) {
1036 walk_state->op = op;
1037 walk_state->opcode = op->common.aml_opcode;
1038
1039 status = walk_state->ascending_callback(walk_state);
1040 status =
1041 acpi_ps_next_parse_state(walk_state, op, status);
1042 if (status == AE_CTRL_PENDING) {
1043 status = AE_OK;
1044 }
1045 }
1046
1047 status = acpi_ps_complete_op(walk_state, &op, status);
1048 if (ACPI_FAILURE(status)) {
1049 return_ACPI_STATUS(status);
1050 }
1051
1052 } /* while parser_state->Aml */
1053
1054 status = acpi_ps_complete_final_op(walk_state, op, status);
1055 return_ACPI_STATUS(status);
1056}
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 4bd25e32769f..16d8b6cc3c22 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c
index a02aa62fe1e5..5d63f48e56b5 100644
--- a/drivers/acpi/parser/psparse.c
+++ b/drivers/acpi/parser/psparse.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -540,6 +540,11 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
540 540
541 if ((status == AE_ALREADY_EXISTS) && 541 if ((status == AE_ALREADY_EXISTS) &&
542 (!walk_state->method_desc->method.mutex)) { 542 (!walk_state->method_desc->method.mutex)) {
543 ACPI_INFO((AE_INFO,
544 "Marking method %4.4s as Serialized",
545 walk_state->method_node->name.
546 ascii));
547
543 /* 548 /*
544 * Method tried to create an object twice. The probable cause is 549 * Method tried to create an object twice. The probable cause is
545 * that the method cannot handle reentrancy. 550 * that the method cannot handle reentrancy.
diff --git a/drivers/acpi/parser/psscope.c b/drivers/acpi/parser/psscope.c
index a3e0314de24d..77cfa4ed0cfe 100644
--- a/drivers/acpi/parser/psscope.c
+++ b/drivers/acpi/parser/psscope.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pstree.c b/drivers/acpi/parser/pstree.c
index 0015717ef096..966e7ea2a0c4 100644
--- a/drivers/acpi/parser/pstree.c
+++ b/drivers/acpi/parser/pstree.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psutils.c b/drivers/acpi/parser/psutils.c
index d405387b7414..8ca52002db55 100644
--- a/drivers/acpi/parser/psutils.c
+++ b/drivers/acpi/parser/psutils.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/pswalk.c b/drivers/acpi/parser/pswalk.c
index a84a547a0f1b..49f9757434e4 100644
--- a/drivers/acpi/parser/pswalk.c
+++ b/drivers/acpi/parser/pswalk.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/parser/psxface.c b/drivers/acpi/parser/psxface.c
index 5d996c1140af..94103bced75e 100644
--- a/drivers/acpi/parser/psxface.c
+++ b/drivers/acpi/parser/psxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -54,8 +54,6 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
54 54
55static void acpi_ps_stop_trace(struct acpi_evaluate_info *info); 55static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
56 56
57static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info);
58
59static void 57static void
60acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action); 58acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
61 59
@@ -215,6 +213,8 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
215acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info) 213acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
216{ 214{
217 acpi_status status; 215 acpi_status status;
216 union acpi_parse_object *op;
217 struct acpi_walk_state *walk_state;
218 218
219 ACPI_FUNCTION_TRACE(ps_execute_method); 219 ACPI_FUNCTION_TRACE(ps_execute_method);
220 220
@@ -234,8 +234,7 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
234 } 234 }
235 235
236 /* 236 /*
237 * The caller "owns" the parameters, so give each one an extra 237 * The caller "owns" the parameters, so give each one an extra reference
238 * reference
239 */ 238 */
240 acpi_ps_update_parameter_list(info, REF_INCREMENT); 239 acpi_ps_update_parameter_list(info, REF_INCREMENT);
241 240
@@ -244,30 +243,50 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
244 acpi_ps_start_trace(info); 243 acpi_ps_start_trace(info);
245 244
246 /* 245 /*
247 * 1) Perform the first pass parse of the method to enter any 246 * Execute the method. Performs parse simultaneously
248 * named objects that it creates into the namespace
249 */ 247 */
250 ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 248 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
251 "**** Begin Method Parse **** Entry=%p obj=%p\n", 249 "**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
252 info->resolved_node, info->obj_desc)); 250 info->resolved_node->name.ascii, info->resolved_node,
251 info->obj_desc));
252
253 /* Create and init a Root Node */
254
255 op = acpi_ps_create_scope_op();
256 if (!op) {
257 status = AE_NO_MEMORY;
258 goto cleanup;
259 }
260
261 /* Create and initialize a new walk state */
262
263 info->pass_number = ACPI_IMODE_EXECUTE;
264 walk_state =
265 acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
266 NULL, NULL);
267 if (!walk_state) {
268 status = AE_NO_MEMORY;
269 goto cleanup;
270 }
253 271
254 info->pass_number = 1; 272 status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
255 status = acpi_ps_execute_pass(info); 273 info->obj_desc->method.aml_start,
274 info->obj_desc->method.aml_length, info,
275 info->pass_number);
256 if (ACPI_FAILURE(status)) { 276 if (ACPI_FAILURE(status)) {
277 acpi_ds_delete_walk_state(walk_state);
257 goto cleanup; 278 goto cleanup;
258 } 279 }
259 280
260 /* 281 /* Parse the AML */
261 * 2) Execute the method. Performs second pass parse simultaneously
262 */
263 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
264 "**** Begin Method Execution **** Entry=%p obj=%p\n",
265 info->resolved_node, info->obj_desc));
266 282
267 info->pass_number = 3; 283 status = acpi_ps_parse_aml(walk_state);
268 status = acpi_ps_execute_pass(info); 284
285 /* walk_state was deleted by parse_aml */
269 286
270 cleanup: 287 cleanup:
288 acpi_ps_delete_parse_tree(op);
289
271 /* End optional tracing */ 290 /* End optional tracing */
272 291
273 acpi_ps_stop_trace(info); 292 acpi_ps_stop_trace(info);
@@ -330,62 +349,3 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action)
330 } 349 }
331 } 350 }
332} 351}
333
334/*******************************************************************************
335 *
336 * FUNCTION: acpi_ps_execute_pass
337 *
338 * PARAMETERS: Info - See struct acpi_evaluate_info
339 * (Used: pass_number, Node, and obj_desc)
340 *
341 * RETURN: Status
342 *
343 * DESCRIPTION: Single AML pass: Parse or Execute a control method
344 *
345 ******************************************************************************/
346
347static acpi_status acpi_ps_execute_pass(struct acpi_evaluate_info *info)
348{
349 acpi_status status;
350 union acpi_parse_object *op;
351 struct acpi_walk_state *walk_state;
352
353 ACPI_FUNCTION_TRACE(ps_execute_pass);
354
355 /* Create and init a Root Node */
356
357 op = acpi_ps_create_scope_op();
358 if (!op) {
359 return_ACPI_STATUS(AE_NO_MEMORY);
360 }
361
362 /* Create and initialize a new walk state */
363
364 walk_state =
365 acpi_ds_create_walk_state(info->obj_desc->method.owner_id, NULL,
366 NULL, NULL);
367 if (!walk_state) {
368 status = AE_NO_MEMORY;
369 goto cleanup;
370 }
371
372 status = acpi_ds_init_aml_walk(walk_state, op, info->resolved_node,
373 info->obj_desc->method.aml_start,
374 info->obj_desc->method.aml_length,
375 info->pass_number == 1 ? NULL : info,
376 info->pass_number);
377 if (ACPI_FAILURE(status)) {
378 acpi_ds_delete_walk_state(walk_state);
379 goto cleanup;
380 }
381
382 /* Parse the AML */
383
384 status = acpi_ps_parse_aml(walk_state);
385
386 /* Walk state was deleted by parse_aml */
387
388 cleanup:
389 acpi_ps_delete_parse_tree(op);
390 return_ACPI_STATUS(status);
391}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 481e633bbf41..0f683c8c6fbc 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -513,7 +513,7 @@ int __init acpi_irq_penalty_init(void)
513 } 513 }
514 } 514 }
515 /* Add a penalty for the SCI */ 515 /* Add a penalty for the SCI */
516 acpi_irq_penalty[acpi_fadt.sci_int] += PIRQ_PENALTY_PCI_USING; 516 acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
517 517
518 return 0; 518 return 0;
519} 519}
@@ -785,7 +785,7 @@ static int irqrouter_resume(struct sys_device *dev)
785 785
786 786
787 /* Make sure SCI is enabled again (Apple firmware bug?) */ 787 /* Make sure SCI is enabled again (Apple firmware bug?) */
788 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1, ACPI_MTX_DO_NOT_LOCK); 788 acpi_set_register(ACPI_BITREG_SCI_ENABLE, 1);
789 789
790 list_for_each(node, &acpi_link.entries) { 790 list_for_each(node, &acpi_link.entries) {
791 link = list_entry(node, struct acpi_pci_link, node); 791 link = list_entry(node, struct acpi_pci_link, node);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index a860efa2c562..4ecf701687e8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -117,6 +117,19 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
117 117
118EXPORT_SYMBOL(acpi_pci_unregister_driver); 118EXPORT_SYMBOL(acpi_pci_unregister_driver);
119 119
120acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
121{
122 struct acpi_pci_root *tmp;
123
124 list_for_each_entry(tmp, &acpi_pci_roots, node) {
125 if ((tmp->id.segment == (u16) seg) && (tmp->id.bus == (u16) bus))
126 return tmp->device->handle;
127 }
128 return NULL;
129}
130
131EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
132
120static acpi_status 133static acpi_status
121get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) 134get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
122{ 135{
@@ -152,6 +165,21 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle, int *busnum)
152 return AE_OK; 165 return AE_OK;
153} 166}
154 167
168static void acpi_pci_bridge_scan(struct acpi_device *device)
169{
170 int status;
171 struct acpi_device *child = NULL;
172
173 if (device->flags.bus_address)
174 if (device->parent && device->parent->ops.bind) {
175 status = device->parent->ops.bind(device);
176 if (!status) {
177 list_for_each_entry(child, &device->children, node)
178 acpi_pci_bridge_scan(child);
179 }
180 }
181}
182
155static int acpi_pci_root_add(struct acpi_device *device) 183static int acpi_pci_root_add(struct acpi_device *device)
156{ 184{
157 int result = 0; 185 int result = 0;
@@ -160,6 +188,7 @@ static int acpi_pci_root_add(struct acpi_device *device)
160 acpi_status status = AE_OK; 188 acpi_status status = AE_OK;
161 unsigned long value = 0; 189 unsigned long value = 0;
162 acpi_handle handle = NULL; 190 acpi_handle handle = NULL;
191 struct acpi_device *child;
163 192
164 193
165 if (!device) 194 if (!device)
@@ -175,9 +204,6 @@ static int acpi_pci_root_add(struct acpi_device *device)
175 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 204 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
176 acpi_driver_data(device) = root; 205 acpi_driver_data(device) = root;
177 206
178 /*
179 * TBD: Doesn't the bus driver automatically set this?
180 */
181 device->ops.bind = acpi_pci_bind; 207 device->ops.bind = acpi_pci_bind;
182 208
183 /* 209 /*
@@ -299,6 +325,12 @@ static int acpi_pci_root_add(struct acpi_device *device)
299 result = acpi_pci_irq_add_prt(device->handle, root->id.segment, 325 result = acpi_pci_irq_add_prt(device->handle, root->id.segment,
300 root->id.bus); 326 root->id.bus);
301 327
328 /*
329 * Scan and bind all _ADR-Based Devices
330 */
331 list_for_each_entry(child, &device->children, node)
332 acpi_pci_bridge_scan(child);
333
302 end: 334 end:
303 if (result) { 335 if (result) {
304 if (!list_empty(&root->node)) 336 if (!list_empty(&root->node))
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 5f9496d59ed6..0079bc51082c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -375,30 +375,126 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
375} 375}
376 376
377/* Use the acpiid in MADT to map cpus in case of SMP */ 377/* Use the acpiid in MADT to map cpus in case of SMP */
378
378#ifndef CONFIG_SMP 379#ifndef CONFIG_SMP
379#define convert_acpiid_to_cpu(acpi_id) (-1) 380static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
380#else 381#else
381 382
383static struct acpi_table_madt *madt;
384
385static int map_lapic_id(struct acpi_subtable_header *entry,
386 u32 acpi_id, int *apic_id)
387{
388 struct acpi_madt_local_apic *lapic =
389 (struct acpi_madt_local_apic *)entry;
390 if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
391 lapic->processor_id == acpi_id) {
392 *apic_id = lapic->id;
393 return 1;
394 }
395 return 0;
396}
397
398static int map_lsapic_id(struct acpi_subtable_header *entry,
399 u32 acpi_id, int *apic_id)
400{
401 struct acpi_madt_local_sapic *lsapic =
402 (struct acpi_madt_local_sapic *)entry;
403 /* Only check enabled APICs*/
404 if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
405 /* First check against id */
406 if (lsapic->processor_id == acpi_id) {
407 *apic_id = lsapic->id;
408 return 1;
409 /* Check against optional uid */
410 } else if (entry->length >= 16 &&
411 lsapic->uid == acpi_id) {
412 *apic_id = lsapic->uid;
413 return 1;
414 }
415 }
416 return 0;
417}
418
382#ifdef CONFIG_IA64 419#ifdef CONFIG_IA64
383#define arch_acpiid_to_apicid ia64_acpiid_to_sapicid
384#define arch_cpu_to_apicid ia64_cpu_to_sapicid 420#define arch_cpu_to_apicid ia64_cpu_to_sapicid
385#define ARCH_BAD_APICID (0xffff)
386#else 421#else
387#define arch_acpiid_to_apicid x86_acpiid_to_apicid
388#define arch_cpu_to_apicid x86_cpu_to_apicid 422#define arch_cpu_to_apicid x86_cpu_to_apicid
389#define ARCH_BAD_APICID (0xff)
390#endif 423#endif
391 424
392static int convert_acpiid_to_cpu(u8 acpi_id) 425static int map_madt_entry(u32 acpi_id)
426{
427 unsigned long madt_end, entry;
428 int apic_id = -1;
429
430 if (!madt)
431 return apic_id;
432
433 entry = (unsigned long)madt;
434 madt_end = entry + madt->header.length;
435
436 /* Parse all entries looking for a match. */
437
438 entry += sizeof(struct acpi_table_madt);
439 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
440 struct acpi_subtable_header *header =
441 (struct acpi_subtable_header *)entry;
442 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
443 if (map_lapic_id(header, acpi_id, &apic_id))
444 break;
445 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
446 if (map_lsapic_id(header, acpi_id, &apic_id))
447 break;
448 }
449 entry += header->length;
450 }
451 return apic_id;
452}
453
454static int map_mat_entry(acpi_handle handle, u32 acpi_id)
455{
456 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
457 union acpi_object *obj;
458 struct acpi_subtable_header *header;
459 int apic_id = -1;
460
461 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
462 goto exit;
463
464 if (!buffer.length || !buffer.pointer)
465 goto exit;
466
467 obj = buffer.pointer;
468 if (obj->type != ACPI_TYPE_BUFFER ||
469 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
470 goto exit;
471 }
472
473 header = (struct acpi_subtable_header *)obj->buffer.pointer;
474 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
475 map_lapic_id(header, acpi_id, &apic_id);
476 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
477 map_lsapic_id(header, acpi_id, &apic_id);
478 }
479
480exit:
481 if (buffer.pointer)
482 kfree(buffer.pointer);
483 return apic_id;
484}
485
486static int get_cpu_id(acpi_handle handle, u32 acpi_id)
393{ 487{
394 u16 apic_id;
395 int i; 488 int i;
489 int apic_id = -1;
396 490
397 apic_id = arch_acpiid_to_apicid[acpi_id]; 491 apic_id = map_mat_entry(handle, acpi_id);
398 if (apic_id == ARCH_BAD_APICID) 492 if (apic_id == -1)
399 return -1; 493 apic_id = map_madt_entry(acpi_id);
494 if (apic_id == -1)
495 return apic_id;
400 496
401 for (i = 0; i < NR_CPUS; i++) { 497 for (i = 0; i < NR_CPUS; ++i) {
402 if (arch_cpu_to_apicid[i] == apic_id) 498 if (arch_cpu_to_apicid[i] == apic_id)
403 return i; 499 return i;
404 } 500 }
@@ -410,7 +506,7 @@ static int convert_acpiid_to_cpu(u8 acpi_id)
410 Driver Interface 506 Driver Interface
411 -------------------------------------------------------------------------- */ 507 -------------------------------------------------------------------------- */
412 508
413static int acpi_processor_get_info(struct acpi_processor *pr) 509static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
414{ 510{
415 acpi_status status = 0; 511 acpi_status status = 0;
416 union acpi_object object = { 0 }; 512 union acpi_object object = { 0 };
@@ -431,7 +527,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
431 * Check to see if we have bus mastering arbitration control. This 527 * Check to see if we have bus mastering arbitration control. This
432 * is required for proper C3 usage (to maintain cache coherency). 528 * is required for proper C3 usage (to maintain cache coherency).
433 */ 529 */
434 if (acpi_fadt.V1_pm2_cnt_blk && acpi_fadt.pm2_cnt_len) { 530 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
435 pr->flags.bm_control = 1; 531 pr->flags.bm_control = 1;
436 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 532 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
437 "Bus mastering arbitration control present\n")); 533 "Bus mastering arbitration control present\n"));
@@ -439,24 +535,35 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
439 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 535 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
440 "No bus mastering arbitration control\n")); 536 "No bus mastering arbitration control\n"));
441 537
442 /* 538 /* Check if it is a Device with HID and UID */
443 * Evalute the processor object. Note that it is common on SMP to 539 if (has_uid) {
444 * have the first (boot) processor with a valid PBLK address while 540 unsigned long value;
445 * all others have a NULL address. 541 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
446 */ 542 NULL, &value);
447 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 543 if (ACPI_FAILURE(status)) {
448 if (ACPI_FAILURE(status)) { 544 printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
449 printk(KERN_ERR PREFIX "Evaluating processor object\n"); 545 return -ENODEV;
450 return -ENODEV; 546 }
451 } 547 pr->acpi_id = value;
452 548 } else {
453 /* 549 /*
454 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 550 * Evalute the processor object. Note that it is common on SMP to
455 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c 551 * have the first (boot) processor with a valid PBLK address while
456 */ 552 * all others have a NULL address.
457 pr->acpi_id = object.processor.proc_id; 553 */
554 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
555 if (ACPI_FAILURE(status)) {
556 printk(KERN_ERR PREFIX "Evaluating processor object\n");
557 return -ENODEV;
558 }
458 559
459 cpu_index = convert_acpiid_to_cpu(pr->acpi_id); 560 /*
561 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
562 * >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
563 */
564 pr->acpi_id = object.processor.proc_id;
565 }
566 cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
460 567
461 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 568 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
462 if (!cpu0_initialized && (cpu_index == -1) && 569 if (!cpu0_initialized && (cpu_index == -1) &&
@@ -473,7 +580,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
473 * less than the max # of CPUs. They should be ignored _iff 580 * less than the max # of CPUs. They should be ignored _iff
474 * they are physically not present. 581 * they are physically not present.
475 */ 582 */
476 if (cpu_index == -1) { 583 if (pr->id == -1) {
477 if (ACPI_FAILURE 584 if (ACPI_FAILURE
478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 585 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
479 return -ENODEV; 586 return -ENODEV;
@@ -490,8 +597,8 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
490 object.processor.pblk_length); 597 object.processor.pblk_length);
491 else { 598 else {
492 pr->throttling.address = object.processor.pblk_address; 599 pr->throttling.address = object.processor.pblk_address;
493 pr->throttling.duty_offset = acpi_fadt.duty_offset; 600 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
494 pr->throttling.duty_width = acpi_fadt.duty_width; 601 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
495 602
496 pr->pblk = object.processor.pblk_address; 603 pr->pblk = object.processor.pblk_address;
497 604
@@ -525,7 +632,7 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
525 632
526 pr = acpi_driver_data(device); 633 pr = acpi_driver_data(device);
527 634
528 result = acpi_processor_get_info(pr); 635 result = acpi_processor_get_info(pr, device->flags.unique_id);
529 if (result) { 636 if (result) {
530 /* Processor is physically not present */ 637 /* Processor is physically not present */
531 return 0; 638 return 0;
@@ -707,7 +814,7 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
707 return -ENODEV; 814 return -ENODEV;
708 815
709 if ((pr->id >= 0) && (pr->id < NR_CPUS)) { 816 if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
710 kobject_uevent(&(*device)->kobj, KOBJ_ONLINE); 817 kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
711 } 818 }
712 return 0; 819 return 0;
713} 820}
@@ -745,13 +852,13 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
745 } 852 }
746 853
747 if (pr->id >= 0 && (pr->id < NR_CPUS)) { 854 if (pr->id >= 0 && (pr->id < NR_CPUS)) {
748 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 855 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
749 break; 856 break;
750 } 857 }
751 858
752 result = acpi_processor_start(device); 859 result = acpi_processor_start(device);
753 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) { 860 if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
754 kobject_uevent(&device->kobj, KOBJ_ONLINE); 861 kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
755 } else { 862 } else {
756 printk(KERN_ERR PREFIX "Device [%s] failed to start\n", 863 printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
757 acpi_device_bid(device)); 864 acpi_device_bid(device));
@@ -774,7 +881,7 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
774 } 881 }
775 882
776 if ((pr->id < NR_CPUS) && (cpu_present(pr->id))) 883 if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
777 kobject_uevent(&device->kobj, KOBJ_OFFLINE); 884 kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
778 break; 885 break;
779 default: 886 default:
780 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 887 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -895,6 +1002,12 @@ static int __init acpi_processor_init(void)
895 memset(&processors, 0, sizeof(processors)); 1002 memset(&processors, 0, sizeof(processors));
896 memset(&errata, 0, sizeof(errata)); 1003 memset(&errata, 0, sizeof(errata));
897 1004
1005#ifdef CONFIG_SMP
1006 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1007 (struct acpi_table_header **)&madt)))
1008 madt = 0;
1009#endif
1010
898 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 1011 acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
899 if (!acpi_processor_dir) 1012 if (!acpi_processor_dir)
900 return -ENOMEM; 1013 return -ENOMEM;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3f30af21574e..6c6751b1405b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -160,7 +160,7 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
160{ 160{
161 if (t2 >= t1) 161 if (t2 >= t1)
162 return (t2 - t1); 162 return (t2 - t1);
163 else if (!acpi_fadt.tmr_val_ext) 163 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 164 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
165 else 165 else
166 return ((0xFFFFFFFF - t1) + t2); 166 return ((0xFFFFFFFF - t1) + t2);
@@ -187,8 +187,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
187 case ACPI_STATE_C3: 187 case ACPI_STATE_C3:
188 /* Disable bus master reload */ 188 /* Disable bus master reload */
189 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 189 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
190 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 190 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
191 ACPI_MTX_DO_NOT_LOCK);
192 break; 191 break;
193 } 192 }
194 } 193 }
@@ -198,8 +197,7 @@ acpi_processor_power_activate(struct acpi_processor *pr,
198 case ACPI_STATE_C3: 197 case ACPI_STATE_C3:
199 /* Enable bus master reload */ 198 /* Enable bus master reload */
200 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 199 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
201 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, 200 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
202 ACPI_MTX_DO_NOT_LOCK);
203 break; 201 break;
204 } 202 }
205 203
@@ -236,7 +234,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
236 /* Dummy wait op - must do something useless after P_LVL2 read 234 /* Dummy wait op - must do something useless after P_LVL2 read
237 because chipsets cannot guarantee that STPCLK# signal 235 because chipsets cannot guarantee that STPCLK# signal
238 gets asserted in time to freeze execution properly. */ 236 gets asserted in time to freeze execution properly. */
239 unused = inl(acpi_fadt.xpm_tmr_blk.address); 237 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
240 } 238 }
241} 239}
242 240
@@ -291,12 +289,10 @@ static void acpi_processor_idle(void)
291 289
292 pr->power.bm_activity <<= diff; 290 pr->power.bm_activity <<= diff;
293 291
294 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, 292 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
295 &bm_status, ACPI_MTX_DO_NOT_LOCK);
296 if (bm_status) { 293 if (bm_status) {
297 pr->power.bm_activity |= 0x1; 294 pr->power.bm_activity |= 0x1;
298 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 295 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
299 1, ACPI_MTX_DO_NOT_LOCK);
300 } 296 }
301 /* 297 /*
302 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 298 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
@@ -338,7 +334,7 @@ static void acpi_processor_idle(void)
338 * detection phase, to work cleanly with logical CPU hotplug. 334 * detection phase, to work cleanly with logical CPU hotplug.
339 */ 335 */
340 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 336 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
341 !pr->flags.has_cst && !acpi_fadt.plvl2_up) 337 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
342 cx = &pr->power.states[ACPI_STATE_C1]; 338 cx = &pr->power.states[ACPI_STATE_C1];
343#endif 339#endif
344 340
@@ -384,11 +380,11 @@ static void acpi_processor_idle(void)
384 380
385 case ACPI_STATE_C2: 381 case ACPI_STATE_C2:
386 /* Get start time (ticks) */ 382 /* Get start time (ticks) */
387 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 383 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
388 /* Invoke C2 */ 384 /* Invoke C2 */
389 acpi_cstate_enter(cx); 385 acpi_cstate_enter(cx);
390 /* Get end time (ticks) */ 386 /* Get end time (ticks) */
391 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 387 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
392 388
393#ifdef CONFIG_GENERIC_TIME 389#ifdef CONFIG_GENERIC_TIME
394 /* TSC halts in C2, so notify users */ 390 /* TSC halts in C2, so notify users */
@@ -411,8 +407,7 @@ static void acpi_processor_idle(void)
411 * All CPUs are trying to go to C3 407 * All CPUs are trying to go to C3
412 * Disable bus master arbitration 408 * Disable bus master arbitration
413 */ 409 */
414 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 410 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
415 ACPI_MTX_DO_NOT_LOCK);
416 } 411 }
417 } else { 412 } else {
418 /* SMP with no shared cache... Invalidate cache */ 413 /* SMP with no shared cache... Invalidate cache */
@@ -420,16 +415,15 @@ static void acpi_processor_idle(void)
420 } 415 }
421 416
422 /* Get start time (ticks) */ 417 /* Get start time (ticks) */
423 t1 = inl(acpi_fadt.xpm_tmr_blk.address); 418 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
424 /* Invoke C3 */ 419 /* Invoke C3 */
425 acpi_cstate_enter(cx); 420 acpi_cstate_enter(cx);
426 /* Get end time (ticks) */ 421 /* Get end time (ticks) */
427 t2 = inl(acpi_fadt.xpm_tmr_blk.address); 422 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
428 if (pr->flags.bm_check) { 423 if (pr->flags.bm_check) {
429 /* Enable bus master arbitration */ 424 /* Enable bus master arbitration */
430 atomic_dec(&c3_cpu_count); 425 atomic_dec(&c3_cpu_count);
431 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 426 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
432 ACPI_MTX_DO_NOT_LOCK);
433 } 427 }
434 428
435#ifdef CONFIG_GENERIC_TIME 429#ifdef CONFIG_GENERIC_TIME
@@ -457,7 +451,7 @@ static void acpi_processor_idle(void)
457#ifdef CONFIG_HOTPLUG_CPU 451#ifdef CONFIG_HOTPLUG_CPU
458 /* Don't do promotion/demotion */ 452 /* Don't do promotion/demotion */
459 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 453 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
460 !pr->flags.has_cst && !acpi_fadt.plvl2_up) { 454 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
461 next_state = cx; 455 next_state = cx;
462 goto end; 456 goto end;
463 } 457 }
@@ -627,7 +621,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
627 * Check for P_LVL2_UP flag before entering C2 and above on 621 * Check for P_LVL2_UP flag before entering C2 and above on
628 * an SMP system. 622 * an SMP system.
629 */ 623 */
630 if ((num_online_cpus() > 1) && !acpi_fadt.plvl2_up) 624 if ((num_online_cpus() > 1) &&
625 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
631 return -ENODEV; 626 return -ENODEV;
632#endif 627#endif
633 628
@@ -636,8 +631,8 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
636 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 631 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
637 632
638 /* determine latencies from FADT */ 633 /* determine latencies from FADT */
639 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat; 634 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
640 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat; 635 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
641 636
642 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 637 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
643 "lvl2[0x%08x] lvl3[0x%08x]\n", 638 "lvl2[0x%08x] lvl3[0x%08x]\n",
@@ -883,14 +878,13 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
883 * WBINVD should be set in fadt, for C3 state to be 878 * WBINVD should be set in fadt, for C3 state to be
884 * supported on when bm_check is not required. 879 * supported on when bm_check is not required.
885 */ 880 */
886 if (acpi_fadt.wb_invd != 1) { 881 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
887 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 882 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
888 "Cache invalidation should work properly" 883 "Cache invalidation should work properly"
889 " for C3 to be enabled on SMP systems\n")); 884 " for C3 to be enabled on SMP systems\n"));
890 return; 885 return;
891 } 886 }
892 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 887 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
893 0, ACPI_MTX_DO_NOT_LOCK);
894 } 888 }
895 889
896 /* 890 /*
@@ -1096,7 +1090,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1096 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 1090 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1097 pr->power.states[i].latency, 1091 pr->power.states[i].latency,
1098 pr->power.states[i].usage, 1092 pr->power.states[i].usage,
1099 pr->power.states[i].time); 1093 (unsigned long long)pr->power.states[i].time);
1100 } 1094 }
1101 1095
1102 end: 1096 end:
@@ -1164,9 +1158,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1164 if (!pr) 1158 if (!pr)
1165 return -EINVAL; 1159 return -EINVAL;
1166 1160
1167 if (acpi_fadt.cst_cnt && !nocst) { 1161 if (acpi_gbl_FADT.cst_control && !nocst) {
1168 status = 1162 status =
1169 acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8); 1163 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1170 if (ACPI_FAILURE(status)) { 1164 if (ACPI_FAILURE(status)) {
1171 ACPI_EXCEPTION((AE_INFO, status, 1165 ACPI_EXCEPTION((AE_INFO, status,
1172 "Notifying BIOS of _CST ability failed")); 1166 "Notifying BIOS of _CST ability failed"));
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cbb6f0814ce2..058f13cf3b79 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -352,31 +352,24 @@ int acpi_processor_notify_smm(struct module *calling_module)
352 352
353 is_done = -EIO; 353 is_done = -EIO;
354 354
355 /* Can't write pstate_cnt to smi_cmd if either value is zero */ 355 /* Can't write pstate_control to smi_command if either value is zero */
356 if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) { 356 if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
357 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n")); 357 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
358 module_put(calling_module); 358 module_put(calling_module);
359 return 0; 359 return 0;
360 } 360 }
361 361
362 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 362 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
363 "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n", 363 "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
364 acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd)); 364 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
365 365
366 /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use 366 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
367 * it anyway, so we need to support it... */ 367 (u32) acpi_gbl_FADT.pstate_control, 8);
368 if (acpi_fadt_is_v1) {
369 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
370 "Using v1.0 FADT reserved value for pstate_cnt\n"));
371 }
372
373 status = acpi_os_write_port(acpi_fadt.smi_cmd,
374 (u32) acpi_fadt.pstate_cnt, 8);
375 if (ACPI_FAILURE(status)) { 368 if (ACPI_FAILURE(status)) {
376 ACPI_EXCEPTION((AE_INFO, status, 369 ACPI_EXCEPTION((AE_INFO, status,
377 "Failed to write pstate_cnt [0x%x] to " 370 "Failed to write pstate_control [0x%x] to "
378 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt, 371 "smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
379 acpi_fadt.smi_cmd)); 372 acpi_gbl_FADT.smi_command));
380 module_put(calling_module); 373 module_put(calling_module);
381 return status; 374 return status;
382 } 375 }
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 0ec7dcde0063..89dff3639abe 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -125,7 +125,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
125 /* Used to clear all duty_value bits */ 125 /* Used to clear all duty_value bits */
126 duty_mask = pr->throttling.state_count - 1; 126 duty_mask = pr->throttling.state_count - 1;
127 127
128 duty_mask <<= acpi_fadt.duty_offset; 128 duty_mask <<= acpi_gbl_FADT.duty_offset;
129 duty_mask = ~duty_mask; 129 duty_mask = ~duty_mask;
130 } 130 }
131 131
@@ -208,7 +208,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
208 return 0; 208 return 0;
209 } 209 }
210 210
211 pr->throttling.state_count = 1 << acpi_fadt.duty_width; 211 pr->throttling.state_count = 1 << acpi_gbl_FADT.duty_width;
212 212
213 /* 213 /*
214 * Compute state values. Note that throttling displays a linear power/ 214 * Compute state values. Note that throttling displays a linear power/
diff --git a/drivers/acpi/resources/rsaddr.c b/drivers/acpi/resources/rsaddr.c
index 8fa3213ce000..271e61509eeb 100644
--- a/drivers/acpi/resources/rsaddr.c
+++ b/drivers/acpi/resources/rsaddr.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index cf87b0230026..8c6d3fdec38a 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index 008058acdd39..1358c06a969c 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index 9c99a723a860..de20a5d6decf 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 9e7ae2f8a1d3..7e3c335ab320 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsio.c b/drivers/acpi/resources/rsio.c
index ea567167c4f2..b297bc3e4419 100644
--- a/drivers/acpi/resources/rsio.c
+++ b/drivers/acpi/resources/rsio.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsirq.c b/drivers/acpi/resources/rsirq.c
index 1fa63bc2e36f..5657f7b95039 100644
--- a/drivers/acpi/resources/rsirq.c
+++ b/drivers/acpi/resources/rsirq.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index 29423ce030ca..a92755c8877d 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmemory.c b/drivers/acpi/resources/rsmemory.c
index a5131936d690..521eab7dd8df 100644
--- a/drivers/acpi/resources/rsmemory.c
+++ b/drivers/acpi/resources/rsmemory.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index faf6e106b785..3b63b561b94e 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index a9cbee8e8b44..2442a8f8df57 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 1999e2ab7daa..991f8901498c 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 283d87522c5d..64f26db10c8e 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -21,101 +21,305 @@ extern struct acpi_device *acpi_root;
21#define ACPI_BUS_DEVICE_NAME "System Bus" 21#define ACPI_BUS_DEVICE_NAME "System Bus"
22 22
23static LIST_HEAD(acpi_device_list); 23static LIST_HEAD(acpi_device_list);
24static LIST_HEAD(acpi_bus_id_list);
24DEFINE_SPINLOCK(acpi_device_lock); 25DEFINE_SPINLOCK(acpi_device_lock);
25LIST_HEAD(acpi_wakeup_device_list); 26LIST_HEAD(acpi_wakeup_device_list);
26 27
28struct acpi_device_bus_id{
29 char bus_id[15];
30 unsigned int instance_no;
31 struct list_head node;
32};
33static int acpi_eject_operation(acpi_handle handle, int lockable)
34{
35 struct acpi_object_list arg_list;
36 union acpi_object arg;
37 acpi_status status = AE_OK;
38
39 /*
40 * TBD: evaluate _PS3?
41 */
42
43 if (lockable) {
44 arg_list.count = 1;
45 arg_list.pointer = &arg;
46 arg.type = ACPI_TYPE_INTEGER;
47 arg.integer.value = 0;
48 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
49 }
50
51 arg_list.count = 1;
52 arg_list.pointer = &arg;
53 arg.type = ACPI_TYPE_INTEGER;
54 arg.integer.value = 1;
55
56 /*
57 * TBD: _EJD support.
58 */
59
60 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
61 if (ACPI_FAILURE(status)) {
62 return (-ENODEV);
63 }
64
65 return (0);
66}
27 67
28static void acpi_device_release(struct kobject *kobj) 68static ssize_t
69acpi_eject_store(struct device *d, struct device_attribute *attr,
70 const char *buf, size_t count)
29{ 71{
30 struct acpi_device *dev = container_of(kobj, struct acpi_device, kobj); 72 int result;
31 kfree(dev->pnp.cid_list); 73 int ret = count;
32 kfree(dev); 74 int islockable;
75 acpi_status status;
76 acpi_handle handle;
77 acpi_object_type type = 0;
78 struct acpi_device *acpi_device = to_acpi_device(d);
79
80 if ((!count) || (buf[0] != '1')) {
81 return -EINVAL;
82 }
83#ifndef FORCE_EJECT
84 if (acpi_device->driver == NULL) {
85 ret = -ENODEV;
86 goto err;
87 }
88#endif
89 status = acpi_get_type(acpi_device->handle, &type);
90 if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
91 ret = -ENODEV;
92 goto err;
93 }
94
95 islockable = acpi_device->flags.lockable;
96 handle = acpi_device->handle;
97
98 result = acpi_bus_trim(acpi_device, 1);
99
100 if (!result)
101 result = acpi_eject_operation(handle, islockable);
102
103 if (result) {
104 ret = -EBUSY;
105 }
106 err:
107 return ret;
33} 108}
34 109
35struct acpi_device_attribute { 110static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
36 struct attribute attr; 111
37 ssize_t(*show) (struct acpi_device *, char *); 112static ssize_t
38 ssize_t(*store) (struct acpi_device *, const char *, size_t); 113acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
39}; 114 struct acpi_device *acpi_dev = to_acpi_device(dev);
115
116 return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id);
117}
118static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
119
120static ssize_t
121acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
122 struct acpi_device *acpi_dev = to_acpi_device(dev);
123 struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
124 int result;
125
126 result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
127 if(result)
128 goto end;
129
130 result = sprintf(buf, "%s\n", (char*)path.pointer);
131 kfree(path.pointer);
132 end:
133 return result;
134}
135static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
136
137static int acpi_device_setup_files(struct acpi_device *dev)
138{
139 acpi_status status;
140 acpi_handle temp;
141 int result = 0;
142
143 /*
144 * Devices gotten from FADT don't have a "path" attribute
145 */
146 if(dev->handle) {
147 result = device_create_file(&dev->dev, &dev_attr_path);
148 if(result)
149 goto end;
150 }
40 151
41typedef void acpi_device_sysfs_files(struct kobject *, 152 if(dev->flags.hardware_id) {
42 const struct attribute *); 153 result = device_create_file(&dev->dev, &dev_attr_hid);
154 if(result)
155 goto end;
156 }
43 157
44static void setup_sys_fs_device_files(struct acpi_device *dev, 158 /*
45 acpi_device_sysfs_files * func); 159 * If device has _EJ0, 'eject' file is created that is used to trigger
160 * hot-removal function from userland.
161 */
162 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
163 if (ACPI_SUCCESS(status))
164 result = device_create_file(&dev->dev, &dev_attr_eject);
165 end:
166 return result;
167}
46 168
47#define create_sysfs_device_files(dev) \ 169static void acpi_device_remove_files(struct acpi_device *dev)
48 setup_sys_fs_device_files(dev, (acpi_device_sysfs_files *)&sysfs_create_file) 170{
49#define remove_sysfs_device_files(dev) \ 171 acpi_status status;
50 setup_sys_fs_device_files(dev, (acpi_device_sysfs_files *)&sysfs_remove_file) 172 acpi_handle temp;
51 173
52#define to_acpi_device(n) container_of(n, struct acpi_device, kobj) 174 /*
53#define to_handle_attr(n) container_of(n, struct acpi_device_attribute, attr); 175 * If device has _EJ0, 'eject' file is created that is used to trigger
176 * hot-removal function from userland.
177 */
178 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
179 if (ACPI_SUCCESS(status))
180 device_remove_file(&dev->dev, &dev_attr_eject);
54 181
55static ssize_t acpi_device_attr_show(struct kobject *kobj, 182 if(dev->flags.hardware_id)
56 struct attribute *attr, char *buf) 183 device_remove_file(&dev->dev, &dev_attr_hid);
184 if(dev->handle)
185 device_remove_file(&dev->dev, &dev_attr_path);
186}
187/* --------------------------------------------------------------------------
188 ACPI Bus operations
189 -------------------------------------------------------------------------- */
190static void acpi_device_release(struct device *dev)
57{ 191{
58 struct acpi_device *device = to_acpi_device(kobj); 192 struct acpi_device *acpi_dev = to_acpi_device(dev);
59 struct acpi_device_attribute *attribute = to_handle_attr(attr); 193
60 return attribute->show ? attribute->show(device, buf) : -EIO; 194 kfree(acpi_dev->pnp.cid_list);
195 kfree(acpi_dev);
61} 196}
62static ssize_t acpi_device_attr_store(struct kobject *kobj, 197
63 struct attribute *attr, const char *buf, 198static int acpi_device_suspend(struct device *dev, pm_message_t state)
64 size_t len)
65{ 199{
66 struct acpi_device *device = to_acpi_device(kobj); 200 struct acpi_device *acpi_dev = to_acpi_device(dev);
67 struct acpi_device_attribute *attribute = to_handle_attr(attr); 201 struct acpi_driver *acpi_drv = acpi_dev->driver;
68 return attribute->store ? attribute->store(device, buf, len) : -EIO; 202
203 if (acpi_drv && acpi_drv->ops.suspend)
204 return acpi_drv->ops.suspend(acpi_dev, state);
205 return 0;
69} 206}
70 207
71static struct sysfs_ops acpi_device_sysfs_ops = { 208static int acpi_device_resume(struct device *dev)
72 .show = acpi_device_attr_show, 209{
73 .store = acpi_device_attr_store, 210 struct acpi_device *acpi_dev = to_acpi_device(dev);
74}; 211 struct acpi_driver *acpi_drv = acpi_dev->driver;
75 212
76static struct kobj_type ktype_acpi_ns = { 213 if (acpi_drv && acpi_drv->ops.resume)
77 .sysfs_ops = &acpi_device_sysfs_ops, 214 return acpi_drv->ops.resume(acpi_dev);
78 .release = acpi_device_release, 215 return 0;
79}; 216}
80 217
81static int namespace_uevent(struct kset *kset, struct kobject *kobj, 218static int acpi_bus_match(struct device *dev, struct device_driver *drv)
82 char **envp, int num_envp, char *buffer,
83 int buffer_size)
84{ 219{
85 struct acpi_device *dev = to_acpi_device(kobj); 220 struct acpi_device *acpi_dev = to_acpi_device(dev);
86 int i = 0; 221 struct acpi_driver *acpi_drv = to_acpi_driver(drv);
87 int len = 0;
88 222
89 if (!dev->driver) 223 return !acpi_match_ids(acpi_dev, acpi_drv->ids);
90 return 0; 224}
91 225
92 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &len, 226static int acpi_device_uevent(struct device *dev, char **envp, int num_envp,
93 "PHYSDEVDRIVER=%s", dev->driver->name)) 227 char *buffer, int buffer_size)
228{
229 struct acpi_device *acpi_dev = to_acpi_device(dev);
230 int i = 0, length = 0, ret = 0;
231
232 if (acpi_dev->flags.hardware_id)
233 ret = add_uevent_var(envp, num_envp, &i,
234 buffer, buffer_size, &length,
235 "HWID=%s", acpi_dev->pnp.hardware_id);
236 if (ret)
94 return -ENOMEM; 237 return -ENOMEM;
238 if (acpi_dev->flags.compatible_ids) {
239 int j;
240 struct acpi_compatible_id_list *cid_list;
241
242 cid_list = acpi_dev->pnp.cid_list;
243
244 for (j = 0; j < cid_list->count; j++) {
245 ret = add_uevent_var(envp, num_envp, &i, buffer,
246 buffer_size, &length, "COMPTID=%s",
247 cid_list->id[j].value);
248 if (ret)
249 return -ENOMEM;
250 }
251 }
95 252
96 envp[i] = NULL; 253 envp[i] = NULL;
254 return 0;
255}
256
257static int acpi_bus_driver_init(struct acpi_device *, struct acpi_driver *);
258static int acpi_start_single_object(struct acpi_device *);
259static int acpi_device_probe(struct device * dev)
260{
261 struct acpi_device *acpi_dev = to_acpi_device(dev);
262 struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
263 int ret;
264
265 ret = acpi_bus_driver_init(acpi_dev, acpi_drv);
266 if (!ret) {
267 if (acpi_dev->bus_ops.acpi_op_start)
268 acpi_start_single_object(acpi_dev);
269 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
270 "Found driver [%s] for device [%s]\n",
271 acpi_drv->name, acpi_dev->pnp.bus_id));
272 get_device(dev);
273 }
274 return ret;
275}
97 276
277static int acpi_device_remove(struct device * dev)
278{
279 struct acpi_device *acpi_dev = to_acpi_device(dev);
280 struct acpi_driver *acpi_drv = acpi_dev->driver;
281
282 if (acpi_drv) {
283 if (acpi_drv->ops.stop)
284 acpi_drv->ops.stop(acpi_dev, acpi_dev->removal_type);
285 if (acpi_drv->ops.remove)
286 acpi_drv->ops.remove(acpi_dev, acpi_dev->removal_type);
287 }
288 acpi_dev->driver = NULL;
289 acpi_driver_data(dev) = NULL;
290
291 put_device(dev);
98 return 0; 292 return 0;
99} 293}
100 294
101static struct kset_uevent_ops namespace_uevent_ops = { 295static void acpi_device_shutdown(struct device *dev)
102 .uevent = &namespace_uevent, 296{
103}; 297 struct acpi_device *acpi_dev = to_acpi_device(dev);
298 struct acpi_driver *acpi_drv = acpi_dev->driver;
299
300 if (acpi_drv && acpi_drv->ops.shutdown)
301 acpi_drv->ops.shutdown(acpi_dev);
104 302
105static struct kset acpi_namespace_kset = { 303 return ;
106 .kobj = { 304}
107 .name = "namespace", 305
108 }, 306static struct bus_type acpi_bus_type = {
109 .subsys = &acpi_subsys, 307 .name = "acpi",
110 .ktype = &ktype_acpi_ns, 308 .suspend = acpi_device_suspend,
111 .uevent_ops = &namespace_uevent_ops, 309 .resume = acpi_device_resume,
310 .shutdown = acpi_device_shutdown,
311 .match = acpi_bus_match,
312 .probe = acpi_device_probe,
313 .remove = acpi_device_remove,
314 .uevent = acpi_device_uevent,
112}; 315};
113 316
114static void acpi_device_register(struct acpi_device *device, 317static int acpi_device_register(struct acpi_device *device,
115 struct acpi_device *parent) 318 struct acpi_device *parent)
116{ 319{
117 int err; 320 int result;
118 321 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
322 int found = 0;
119 /* 323 /*
120 * Linkage 324 * Linkage
121 * ------- 325 * -------
@@ -126,7 +330,33 @@ static void acpi_device_register(struct acpi_device *device,
126 INIT_LIST_HEAD(&device->g_list); 330 INIT_LIST_HEAD(&device->g_list);
127 INIT_LIST_HEAD(&device->wakeup_list); 331 INIT_LIST_HEAD(&device->wakeup_list);
128 332
333 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
334 if (!new_bus_id) {
335 printk(KERN_ERR PREFIX "Memory allocation error\n");
336 return -ENOMEM;
337 }
338
129 spin_lock(&acpi_device_lock); 339 spin_lock(&acpi_device_lock);
340 /*
341 * Find suitable bus_id and instance number in acpi_bus_id_list
342 * If failed, create one and link it into acpi_bus_id_list
343 */
344 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
345 if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) {
346 acpi_device_bus_id->instance_no ++;
347 found = 1;
348 kfree(new_bus_id);
349 break;
350 }
351 }
352 if(!found) {
353 acpi_device_bus_id = new_bus_id;
354 strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device");
355 acpi_device_bus_id->instance_no = 0;
356 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
357 }
358 sprintf(device->dev.bus_id, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
359
130 if (device->parent) { 360 if (device->parent) {
131 list_add_tail(&device->node, &device->parent->children); 361 list_add_tail(&device->node, &device->parent->children);
132 list_add_tail(&device->g_list, &device->parent->g_list); 362 list_add_tail(&device->g_list, &device->parent->g_list);
@@ -136,16 +366,33 @@ static void acpi_device_register(struct acpi_device *device,
136 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); 366 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
137 spin_unlock(&acpi_device_lock); 367 spin_unlock(&acpi_device_lock);
138 368
139 strlcpy(device->kobj.name, device->pnp.bus_id, KOBJ_NAME_LEN); 369 if (device->parent)
140 if (parent) 370 device->dev.parent = &parent->dev;
141 device->kobj.parent = &parent->kobj; 371 device->dev.bus = &acpi_bus_type;
142 device->kobj.ktype = &ktype_acpi_ns; 372 device_initialize(&device->dev);
143 device->kobj.kset = &acpi_namespace_kset; 373 device->dev.release = &acpi_device_release;
144 err = kobject_register(&device->kobj); 374 result = device_add(&device->dev);
145 if (err < 0) 375 if(result) {
146 printk(KERN_WARNING "%s: kobject_register error: %d\n", 376 printk("Error adding device %s", device->dev.bus_id);
147 __FUNCTION__, err); 377 goto end;
148 create_sysfs_device_files(device); 378 }
379
380 result = acpi_device_setup_files(device);
381 if(result)
382 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error creating sysfs interface for device %s\n", device->dev.bus_id));
383
384 device->removal_type = ACPI_BUS_REMOVAL_NORMAL;
385 return 0;
386 end:
387 spin_lock(&acpi_device_lock);
388 if (device->parent) {
389 list_del(&device->node);
390 list_del(&device->g_list);
391 } else
392 list_del(&device->g_list);
393 list_del(&device->wakeup_list);
394 spin_unlock(&acpi_device_lock);
395 return result;
149} 396}
150 397
151static void acpi_device_unregister(struct acpi_device *device, int type) 398static void acpi_device_unregister(struct acpi_device *device, int type)
@@ -158,81 +405,143 @@ static void acpi_device_unregister(struct acpi_device *device, int type)
158 list_del(&device->g_list); 405 list_del(&device->g_list);
159 406
160 list_del(&device->wakeup_list); 407 list_del(&device->wakeup_list);
161
162 spin_unlock(&acpi_device_lock); 408 spin_unlock(&acpi_device_lock);
163 409
164 acpi_detach_data(device->handle, acpi_bus_data_handler); 410 acpi_detach_data(device->handle, acpi_bus_data_handler);
165 remove_sysfs_device_files(device); 411
166 kobject_unregister(&device->kobj); 412 acpi_device_remove_files(device);
413 device_unregister(&device->dev);
167} 414}
168 415
169void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context) 416/* --------------------------------------------------------------------------
417 Driver Management
418 -------------------------------------------------------------------------- */
419/**
420 * acpi_bus_driver_init - add a device to a driver
421 * @device: the device to add and initialize
422 * @driver: driver for the device
423 *
424 * Used to initialize a device via its device driver. Called whenever a
425 * driver is bound to a device. Invokes the driver's add() ops.
426 */
427static int
428acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
170{ 429{
430 int result = 0;
171 431
172 /* TBD */
173 432
174 return; 433 if (!device || !driver)
175} 434 return -EINVAL;
176 435
177static int acpi_bus_get_power_flags(struct acpi_device *device) 436 if (!driver->ops.add)
178{ 437 return -ENOSYS;
179 acpi_status status = 0;
180 acpi_handle handle = NULL;
181 u32 i = 0;
182 438
439 result = driver->ops.add(device);
440 if (result) {
441 device->driver = NULL;
442 acpi_driver_data(device) = NULL;
443 return result;
444 }
183 445
184 /* 446 device->driver = driver;
185 * Power Management Flags
186 */
187 status = acpi_get_handle(device->handle, "_PSC", &handle);
188 if (ACPI_SUCCESS(status))
189 device->power.flags.explicit_get = 1;
190 status = acpi_get_handle(device->handle, "_IRC", &handle);
191 if (ACPI_SUCCESS(status))
192 device->power.flags.inrush_current = 1;
193 447
194 /* 448 /*
195 * Enumerate supported power management states 449 * TBD - Configuration Management: Assign resources to device based
450 * upon possible configuration and currently allocated resources.
196 */ 451 */
197 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
198 struct acpi_device_power_state *ps = &device->power.states[i];
199 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
200 452
201 /* Evaluate "_PRx" to se if power resources are referenced */ 453 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
202 acpi_evaluate_reference(device->handle, object_name, NULL, 454 "Driver successfully bound to device\n"));
203 &ps->resources); 455 return 0;
204 if (ps->resources.count) { 456}
205 device->power.flags.power_resources = 1;
206 ps->flags.valid = 1;
207 }
208 457
209 /* Evaluate "_PSx" to see if we can do explicit sets */ 458static int acpi_start_single_object(struct acpi_device *device)
210 object_name[2] = 'S'; 459{
211 status = acpi_get_handle(device->handle, object_name, &handle); 460 int result = 0;
212 if (ACPI_SUCCESS(status)) { 461 struct acpi_driver *driver;
213 ps->flags.explicit_set = 1;
214 ps->flags.valid = 1;
215 }
216 462
217 /* State is valid if we have some power control */
218 if (ps->resources.count || ps->flags.explicit_set)
219 ps->flags.valid = 1;
220 463
221 ps->power = -1; /* Unknown - driver assigned */ 464 if (!(driver = device->driver))
222 ps->latency = -1; /* Unknown - driver assigned */ 465 return 0;
466
467 if (driver->ops.start) {
468 result = driver->ops.start(device);
469 if (result && driver->ops.remove)
470 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
223 } 471 }
224 472
225 /* Set defaults for D0 and D3 states (always valid) */ 473 return result;
226 device->power.states[ACPI_STATE_D0].flags.valid = 1; 474}
227 device->power.states[ACPI_STATE_D0].power = 100;
228 device->power.states[ACPI_STATE_D3].flags.valid = 1;
229 device->power.states[ACPI_STATE_D3].power = 0;
230 475
231 /* TBD: System wake support and resource requirements. */ 476/**
477 * acpi_bus_register_driver - register a driver with the ACPI bus
478 * @driver: driver being registered
479 *
480 * Registers a driver with the ACPI bus. Searches the namespace for all
481 * devices that match the driver's criteria and binds. Returns zero for
482 * success or a negative error status for failure.
483 */
484int acpi_bus_register_driver(struct acpi_driver *driver)
485{
486 int ret;
232 487
233 device->power.state = ACPI_STATE_UNKNOWN; 488 if (acpi_disabled)
489 return -ENODEV;
490 driver->drv.name = driver->name;
491 driver->drv.bus = &acpi_bus_type;
492 driver->drv.owner = driver->owner;
234 493
235 return 0; 494 ret = driver_register(&driver->drv);
495 return ret;
496}
497
498EXPORT_SYMBOL(acpi_bus_register_driver);
499
500/**
501 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
502 * @driver: driver to unregister
503 *
504 * Unregisters a driver with the ACPI bus. Searches the namespace for all
505 * devices that match the driver's criteria and unbinds.
506 */
507void acpi_bus_unregister_driver(struct acpi_driver *driver)
508{
509 driver_unregister(&driver->drv);
510}
511
512EXPORT_SYMBOL(acpi_bus_unregister_driver);
513
514/* --------------------------------------------------------------------------
515 Device Enumeration
516 -------------------------------------------------------------------------- */
517acpi_status
518acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
519{
520 acpi_status status;
521 acpi_handle tmp;
522 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
523 union acpi_object *obj;
524
525 status = acpi_get_handle(handle, "_EJD", &tmp);
526 if (ACPI_FAILURE(status))
527 return status;
528
529 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
530 if (ACPI_SUCCESS(status)) {
531 obj = buffer.pointer;
532 status = acpi_get_handle(NULL, obj->string.pointer, ejd);
533 kfree(buffer.pointer);
534 }
535 return status;
536}
537EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
538
539void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context)
540{
541
542 /* TBD */
543
544 return;
236} 545}
237 546
238int acpi_match_ids(struct acpi_device *device, char *ids) 547int acpi_match_ids(struct acpi_device *device, char *ids)
@@ -254,6 +563,12 @@ int acpi_match_ids(struct acpi_device *device, char *ids)
254 return -ENOENT; 563 return -ENOENT;
255} 564}
256 565
566static int acpi_bus_get_perf_flags(struct acpi_device *device)
567{
568 device->performance.state = ACPI_STATE_UNKNOWN;
569 return 0;
570}
571
257static acpi_status 572static acpi_status
258acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device, 573acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
259 union acpi_object *package) 574 union acpi_object *package)
@@ -338,359 +653,66 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
338 return 0; 653 return 0;
339} 654}
340 655
341/* -------------------------------------------------------------------------- 656static int acpi_bus_get_power_flags(struct acpi_device *device)
342 ACPI sysfs device file support
343 -------------------------------------------------------------------------- */
344static ssize_t acpi_eject_store(struct acpi_device *device,
345 const char *buf, size_t count);
346
347#define ACPI_DEVICE_ATTR(_name,_mode,_show,_store) \
348static struct acpi_device_attribute acpi_device_attr_##_name = \
349 __ATTR(_name, _mode, _show, _store)
350
351ACPI_DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
352
353/**
354 * setup_sys_fs_device_files - sets up the device files under device namespace
355 * @dev: acpi_device object
356 * @func: function pointer to create or destroy the device file
357 */
358static void
359setup_sys_fs_device_files(struct acpi_device *dev,
360 acpi_device_sysfs_files * func)
361{
362 acpi_status status;
363 acpi_handle temp = NULL;
364
365 /*
366 * If device has _EJ0, 'eject' file is created that is used to trigger
367 * hot-removal function from userland.
368 */
369 status = acpi_get_handle(dev->handle, "_EJ0", &temp);
370 if (ACPI_SUCCESS(status))
371 (*(func)) (&dev->kobj, &acpi_device_attr_eject.attr);
372}
373
374static int acpi_eject_operation(acpi_handle handle, int lockable)
375{ 657{
376 struct acpi_object_list arg_list; 658 acpi_status status = 0;
377 union acpi_object arg; 659 acpi_handle handle = NULL;
378 acpi_status status = AE_OK; 660 u32 i = 0;
379
380 /*
381 * TBD: evaluate _PS3?
382 */
383
384 if (lockable) {
385 arg_list.count = 1;
386 arg_list.pointer = &arg;
387 arg.type = ACPI_TYPE_INTEGER;
388 arg.integer.value = 0;
389 acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
390 }
391 661
392 arg_list.count = 1;
393 arg_list.pointer = &arg;
394 arg.type = ACPI_TYPE_INTEGER;
395 arg.integer.value = 1;
396 662
397 /* 663 /*
398 * TBD: _EJD support. 664 * Power Management Flags
399 */ 665 */
400 666 status = acpi_get_handle(device->handle, "_PSC", &handle);
401 status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL); 667 if (ACPI_SUCCESS(status))
402 if (ACPI_FAILURE(status)) { 668 device->power.flags.explicit_get = 1;
403 return (-ENODEV); 669 status = acpi_get_handle(device->handle, "_IRC", &handle);
404 } 670 if (ACPI_SUCCESS(status))
405 671 device->power.flags.inrush_current = 1;
406 return (0);
407}
408
409static ssize_t
410acpi_eject_store(struct acpi_device *device, const char *buf, size_t count)
411{
412 int result;
413 int ret = count;
414 int islockable;
415 acpi_status status;
416 acpi_handle handle;
417 acpi_object_type type = 0;
418
419 if ((!count) || (buf[0] != '1')) {
420 return -EINVAL;
421 }
422#ifndef FORCE_EJECT
423 if (device->driver == NULL) {
424 ret = -ENODEV;
425 goto err;
426 }
427#endif
428 status = acpi_get_type(device->handle, &type);
429 if (ACPI_FAILURE(status) || (!device->flags.ejectable)) {
430 ret = -ENODEV;
431 goto err;
432 }
433
434 islockable = device->flags.lockable;
435 handle = device->handle;
436
437 result = acpi_bus_trim(device, 1);
438
439 if (!result)
440 result = acpi_eject_operation(handle, islockable);
441
442 if (result) {
443 ret = -EBUSY;
444 }
445 err:
446 return ret;
447}
448
449/* --------------------------------------------------------------------------
450 Performance Management
451 -------------------------------------------------------------------------- */
452
453static int acpi_bus_get_perf_flags(struct acpi_device *device)
454{
455 device->performance.state = ACPI_STATE_UNKNOWN;
456 return 0;
457}
458
459/* --------------------------------------------------------------------------
460 Driver Management
461 -------------------------------------------------------------------------- */
462
463static LIST_HEAD(acpi_bus_drivers);
464
465/**
466 * acpi_bus_match - match device IDs to driver's supported IDs
467 * @device: the device that we are trying to match to a driver
468 * @driver: driver whose device id table is being checked
469 *
470 * Checks the device's hardware (_HID) or compatible (_CID) ids to see if it
471 * matches the specified driver's criteria.
472 */
473static int
474acpi_bus_match(struct acpi_device *device, struct acpi_driver *driver)
475{
476 if (driver && driver->ops.match)
477 return driver->ops.match(device, driver);
478 return acpi_match_ids(device, driver->ids);
479}
480
481/**
482 * acpi_bus_driver_init - add a device to a driver
483 * @device: the device to add and initialize
484 * @driver: driver for the device
485 *
486 * Used to initialize a device via its device driver. Called whenever a
487 * driver is bound to a device. Invokes the driver's add() and start() ops.
488 */
489static int
490acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
491{
492 int result = 0;
493
494
495 if (!device || !driver)
496 return -EINVAL;
497
498 if (!driver->ops.add)
499 return -ENOSYS;
500
501 result = driver->ops.add(device);
502 if (result) {
503 device->driver = NULL;
504 acpi_driver_data(device) = NULL;
505 return result;
506 }
507
508 device->driver = driver;
509 672
510 /* 673 /*
511 * TBD - Configuration Management: Assign resources to device based 674 * Enumerate supported power management states
512 * upon possible configuration and currently allocated resources.
513 */ 675 */
676 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
677 struct acpi_device_power_state *ps = &device->power.states[i];
678 char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
514 679
515 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 680 /* Evaluate "_PRx" to se if power resources are referenced */
516 "Driver successfully bound to device\n")); 681 acpi_evaluate_reference(device->handle, object_name, NULL,
517 return 0; 682 &ps->resources);
518} 683 if (ps->resources.count) {
519 684 device->power.flags.power_resources = 1;
520static int acpi_start_single_object(struct acpi_device *device) 685 ps->flags.valid = 1;
521{
522 int result = 0;
523 struct acpi_driver *driver;
524
525
526 if (!(driver = device->driver))
527 return 0;
528
529 if (driver->ops.start) {
530 result = driver->ops.start(device);
531 if (result && driver->ops.remove)
532 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
533 }
534
535 return result;
536}
537
538static void acpi_driver_attach(struct acpi_driver *drv)
539{
540 struct list_head *node, *next;
541
542
543 spin_lock(&acpi_device_lock);
544 list_for_each_safe(node, next, &acpi_device_list) {
545 struct acpi_device *dev =
546 container_of(node, struct acpi_device, g_list);
547
548 if (dev->driver || !dev->status.present)
549 continue;
550 spin_unlock(&acpi_device_lock);
551
552 if (!acpi_bus_match(dev, drv)) {
553 if (!acpi_bus_driver_init(dev, drv)) {
554 acpi_start_single_object(dev);
555 atomic_inc(&drv->references);
556 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
557 "Found driver [%s] for device [%s]\n",
558 drv->name, dev->pnp.bus_id));
559 }
560 } 686 }
561 spin_lock(&acpi_device_lock);
562 }
563 spin_unlock(&acpi_device_lock);
564}
565
566static void acpi_driver_detach(struct acpi_driver *drv)
567{
568 struct list_head *node, *next;
569 687
570 688 /* Evaluate "_PSx" to see if we can do explicit sets */
571 spin_lock(&acpi_device_lock); 689 object_name[2] = 'S';
572 list_for_each_safe(node, next, &acpi_device_list) { 690 status = acpi_get_handle(device->handle, object_name, &handle);
573 struct acpi_device *dev = 691 if (ACPI_SUCCESS(status)) {
574 container_of(node, struct acpi_device, g_list); 692 ps->flags.explicit_set = 1;
575 693 ps->flags.valid = 1;
576 if (dev->driver == drv) {
577 spin_unlock(&acpi_device_lock);
578 if (drv->ops.remove)
579 drv->ops.remove(dev, ACPI_BUS_REMOVAL_NORMAL);
580 spin_lock(&acpi_device_lock);
581 dev->driver = NULL;
582 dev->driver_data = NULL;
583 atomic_dec(&drv->references);
584 } 694 }
585 }
586 spin_unlock(&acpi_device_lock);
587}
588
589/**
590 * acpi_bus_register_driver - register a driver with the ACPI bus
591 * @driver: driver being registered
592 *
593 * Registers a driver with the ACPI bus. Searches the namespace for all
594 * devices that match the driver's criteria and binds. Returns zero for
595 * success or a negative error status for failure.
596 */
597int acpi_bus_register_driver(struct acpi_driver *driver)
598{
599
600 if (acpi_disabled)
601 return -ENODEV;
602
603 spin_lock(&acpi_device_lock);
604 list_add_tail(&driver->node, &acpi_bus_drivers);
605 spin_unlock(&acpi_device_lock);
606 acpi_driver_attach(driver);
607
608 return 0;
609}
610
611EXPORT_SYMBOL(acpi_bus_register_driver);
612
613/**
614 * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
615 * @driver: driver to unregister
616 *
617 * Unregisters a driver with the ACPI bus. Searches the namespace for all
618 * devices that match the driver's criteria and unbinds.
619 */
620void acpi_bus_unregister_driver(struct acpi_driver *driver)
621{
622 acpi_driver_detach(driver);
623
624 if (!atomic_read(&driver->references)) {
625 spin_lock(&acpi_device_lock);
626 list_del_init(&driver->node);
627 spin_unlock(&acpi_device_lock);
628 }
629 return;
630}
631
632EXPORT_SYMBOL(acpi_bus_unregister_driver);
633
634/**
635 * acpi_bus_find_driver - check if there is a driver installed for the device
636 * @device: device that we are trying to find a supporting driver for
637 *
638 * Parses the list of registered drivers looking for a driver applicable for
639 * the specified device.
640 */
641static int acpi_bus_find_driver(struct acpi_device *device)
642{
643 int result = 0;
644 struct list_head *node, *next;
645 695
696 /* State is valid if we have some power control */
697 if (ps->resources.count || ps->flags.explicit_set)
698 ps->flags.valid = 1;
646 699
647 spin_lock(&acpi_device_lock); 700 ps->power = -1; /* Unknown - driver assigned */
648 list_for_each_safe(node, next, &acpi_bus_drivers) { 701 ps->latency = -1; /* Unknown - driver assigned */
649 struct acpi_driver *driver =
650 container_of(node, struct acpi_driver, node);
651
652 atomic_inc(&driver->references);
653 spin_unlock(&acpi_device_lock);
654 if (!acpi_bus_match(device, driver)) {
655 result = acpi_bus_driver_init(device, driver);
656 if (!result)
657 goto Done;
658 }
659 atomic_dec(&driver->references);
660 spin_lock(&acpi_device_lock);
661 } 702 }
662 spin_unlock(&acpi_device_lock);
663
664 Done:
665 return result;
666}
667 703
668/* -------------------------------------------------------------------------- 704 /* Set defaults for D0 and D3 states (always valid) */
669 Device Enumeration 705 device->power.states[ACPI_STATE_D0].flags.valid = 1;
670 -------------------------------------------------------------------------- */ 706 device->power.states[ACPI_STATE_D0].power = 100;
707 device->power.states[ACPI_STATE_D3].flags.valid = 1;
708 device->power.states[ACPI_STATE_D3].power = 0;
671 709
672acpi_status 710 /* TBD: System wake support and resource requirements. */
673acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
674{
675 acpi_status status;
676 acpi_handle tmp;
677 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
678 union acpi_object *obj;
679 711
680 status = acpi_get_handle(handle, "_EJD", &tmp); 712 device->power.state = ACPI_STATE_UNKNOWN;
681 if (ACPI_FAILURE(status))
682 return status;
683 713
684 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); 714 return 0;
685 if (ACPI_SUCCESS(status)) {
686 obj = buffer.pointer;
687 status = acpi_get_handle(NULL, obj->string.pointer, ejd);
688 kfree(buffer.pointer);
689 }
690 return status;
691} 715}
692EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
693
694 716
695static int acpi_bus_get_flags(struct acpi_device *device) 717static int acpi_bus_get_flags(struct acpi_device *device)
696{ 718{
@@ -782,6 +804,75 @@ static void acpi_device_get_busid(struct acpi_device *device,
782 } 804 }
783} 805}
784 806
807static int
808acpi_video_bus_match(struct acpi_device *device)
809{
810 acpi_handle h_dummy1;
811 acpi_handle h_dummy2;
812 acpi_handle h_dummy3;
813
814
815 if (!device)
816 return -EINVAL;
817
818 /* Since there is no HID, CID for ACPI Video drivers, we have
819 * to check well known required nodes for each feature we support.
820 */
821
822 /* Does this device able to support video switching ? */
823 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
824 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
825 return 0;
826
827 /* Does this device able to retrieve a video ROM ? */
828 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
829 return 0;
830
831 /* Does this device able to configure which video head to be POSTed ? */
832 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
833 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
834 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
835 return 0;
836
837 return -ENODEV;
838}
839
840/*
841 * acpi_bay_match - see if a device is an ejectable driver bay
842 *
843 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
844 * then we can safely call it an ejectable drive bay
845 */
846static int acpi_bay_match(struct acpi_device *device){
847 acpi_status status;
848 acpi_handle handle;
849 acpi_handle tmp;
850 acpi_handle phandle;
851
852 handle = device->handle;
853
854 status = acpi_get_handle(handle, "_EJ0", &tmp);
855 if (ACPI_FAILURE(status))
856 return -ENODEV;
857
858 if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
859 (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
860 (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
861 (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
862 return 0;
863
864 if (acpi_get_parent(handle, &phandle))
865 return -ENODEV;
866
867 if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
868 (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
869 (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
870 (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
871 return 0;
872
873 return -ENODEV;
874}
875
785static void acpi_device_set_id(struct acpi_device *device, 876static void acpi_device_set_id(struct acpi_device *device,
786 struct acpi_device *parent, acpi_handle handle, 877 struct acpi_device *parent, acpi_handle handle,
787 int type) 878 int type)
@@ -812,6 +903,16 @@ static void acpi_device_set_id(struct acpi_device *device,
812 device->pnp.bus_address = info->address; 903 device->pnp.bus_address = info->address;
813 device->flags.bus_address = 1; 904 device->flags.bus_address = 1;
814 } 905 }
906
907 if(!(info->valid & (ACPI_VALID_HID | ACPI_VALID_CID))){
908 status = acpi_video_bus_match(device);
909 if(ACPI_SUCCESS(status))
910 hid = ACPI_VIDEO_HID;
911
912 status = acpi_bay_match(device);
913 if (ACPI_SUCCESS(status))
914 hid = ACPI_BAY_HID;
915 }
815 break; 916 break;
816 case ACPI_BUS_TYPE_POWER: 917 case ACPI_BUS_TYPE_POWER:
817 hid = ACPI_POWER_HID; 918 hid = ACPI_POWER_HID;
@@ -888,86 +989,24 @@ static int acpi_device_set_context(struct acpi_device *device, int type)
888 return result; 989 return result;
889} 990}
890 991
891static void acpi_device_get_debug_info(struct acpi_device *device,
892 acpi_handle handle, int type)
893{
894#ifdef CONFIG_ACPI_DEBUG_OUTPUT
895 char *type_string = NULL;
896 char name[80] = { '?', '\0' };
897 struct acpi_buffer buffer = { sizeof(name), name };
898
899 switch (type) {
900 case ACPI_BUS_TYPE_DEVICE:
901 type_string = "Device";
902 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
903 break;
904 case ACPI_BUS_TYPE_POWER:
905 type_string = "Power Resource";
906 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
907 break;
908 case ACPI_BUS_TYPE_PROCESSOR:
909 type_string = "Processor";
910 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
911 break;
912 case ACPI_BUS_TYPE_SYSTEM:
913 type_string = "System";
914 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
915 break;
916 case ACPI_BUS_TYPE_THERMAL:
917 type_string = "Thermal Zone";
918 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
919 break;
920 case ACPI_BUS_TYPE_POWER_BUTTON:
921 type_string = "Power Button";
922 sprintf(name, "PWRB");
923 break;
924 case ACPI_BUS_TYPE_SLEEP_BUTTON:
925 type_string = "Sleep Button";
926 sprintf(name, "SLPB");
927 break;
928 }
929
930 printk(KERN_DEBUG "Found %s %s [%p]\n", type_string, name, handle);
931#endif /*CONFIG_ACPI_DEBUG_OUTPUT */
932}
933
934static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 992static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
935{ 993{
936 int result = 0;
937 struct acpi_driver *driver;
938
939
940 if (!dev) 994 if (!dev)
941 return -EINVAL; 995 return -EINVAL;
942 996
943 driver = dev->driver; 997 dev->removal_type = ACPI_BUS_REMOVAL_EJECT;
944 998 device_release_driver(&dev->dev);
945 if ((driver) && (driver->ops.remove)) {
946
947 if (driver->ops.stop) {
948 result = driver->ops.stop(dev, ACPI_BUS_REMOVAL_EJECT);
949 if (result)
950 return result;
951 }
952
953 result = dev->driver->ops.remove(dev, ACPI_BUS_REMOVAL_EJECT);
954 if (result) {
955 return result;
956 }
957
958 atomic_dec(&dev->driver->references);
959 dev->driver = NULL;
960 acpi_driver_data(dev) = NULL;
961 }
962 999
963 if (!rmdevice) 1000 if (!rmdevice)
964 return 0; 1001 return 0;
965 1002
1003 /*
1004 * unbind _ADR-Based Devices when hot removal
1005 */
966 if (dev->flags.bus_address) { 1006 if (dev->flags.bus_address) {
967 if ((dev->parent) && (dev->parent->ops.unbind)) 1007 if ((dev->parent) && (dev->parent->ops.unbind))
968 dev->parent->ops.unbind(dev); 1008 dev->parent->ops.unbind(dev);
969 } 1009 }
970
971 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT); 1010 acpi_device_unregister(dev, ACPI_BUS_REMOVAL_EJECT);
972 1011
973 return 0; 1012 return 0;
@@ -975,7 +1014,8 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
975 1014
976static int 1015static int
977acpi_add_single_object(struct acpi_device **child, 1016acpi_add_single_object(struct acpi_device **child,
978 struct acpi_device *parent, acpi_handle handle, int type) 1017 struct acpi_device *parent, acpi_handle handle, int type,
1018 struct acpi_bus_ops *ops)
979{ 1019{
980 int result = 0; 1020 int result = 0;
981 struct acpi_device *device = NULL; 1021 struct acpi_device *device = NULL;
@@ -992,6 +1032,8 @@ acpi_add_single_object(struct acpi_device **child,
992 1032
993 device->handle = handle; 1033 device->handle = handle;
994 device->parent = parent; 1034 device->parent = parent;
1035 device->bus_ops = *ops; /* workround for not call .start */
1036
995 1037
996 acpi_device_get_busid(device, handle, type); 1038 acpi_device_get_busid(device, handle, type);
997 1039
@@ -1076,33 +1118,16 @@ acpi_add_single_object(struct acpi_device **child,
1076 if ((result = acpi_device_set_context(device, type))) 1118 if ((result = acpi_device_set_context(device, type)))
1077 goto end; 1119 goto end;
1078 1120
1079 acpi_device_get_debug_info(device, handle, type); 1121 result = acpi_device_register(device, parent);
1080
1081 acpi_device_register(device, parent);
1082 1122
1083 /* 1123 /*
1084 * Bind _ADR-Based Devices 1124 * Bind _ADR-Based Devices when hot add
1085 * -----------------------
1086 * If there's a a bus address (_ADR) then we utilize the parent's
1087 * 'bind' function (if exists) to bind the ACPI- and natively-
1088 * enumerated device representations.
1089 */ 1125 */
1090 if (device->flags.bus_address) { 1126 if (device->flags.bus_address) {
1091 if (device->parent && device->parent->ops.bind) 1127 if (device->parent && device->parent->ops.bind)
1092 device->parent->ops.bind(device); 1128 device->parent->ops.bind(device);
1093 } 1129 }
1094 1130
1095 /*
1096 * Locate & Attach Driver
1097 * ----------------------
1098 * If there's a hardware id (_HID) or compatible ids (_CID) we check
1099 * to see if there's a driver installed for this kind of device. Note
1100 * that drivers can install before or after a device is enumerated.
1101 *
1102 * TBD: Assumes LDM provides driver hot-plug capability.
1103 */
1104 acpi_bus_find_driver(device);
1105
1106 end: 1131 end:
1107 if (!result) 1132 if (!result)
1108 *child = device; 1133 *child = device;
@@ -1188,14 +1213,14 @@ static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops)
1188 1213
1189 if (ops->acpi_op_add) 1214 if (ops->acpi_op_add)
1190 status = acpi_add_single_object(&child, parent, 1215 status = acpi_add_single_object(&child, parent,
1191 chandle, type); 1216 chandle, type, ops);
1192 else 1217 else
1193 status = acpi_bus_get_device(chandle, &child); 1218 status = acpi_bus_get_device(chandle, &child);
1194 1219
1195 if (ACPI_FAILURE(status)) 1220 if (ACPI_FAILURE(status))
1196 continue; 1221 continue;
1197 1222
1198 if (ops->acpi_op_start) { 1223 if (ops->acpi_op_start && !(ops->acpi_op_add)) {
1199 status = acpi_start_single_object(child); 1224 status = acpi_start_single_object(child);
1200 if (ACPI_FAILURE(status)) 1225 if (ACPI_FAILURE(status))
1201 continue; 1226 continue;
@@ -1233,13 +1258,13 @@ acpi_bus_add(struct acpi_device **child,
1233 int result; 1258 int result;
1234 struct acpi_bus_ops ops; 1259 struct acpi_bus_ops ops;
1235 1260
1261 memset(&ops, 0, sizeof(ops));
1262 ops.acpi_op_add = 1;
1236 1263
1237 result = acpi_add_single_object(child, parent, handle, type); 1264 result = acpi_add_single_object(child, parent, handle, type, &ops);
1238 if (!result) { 1265 if (!result)
1239 memset(&ops, 0, sizeof(ops));
1240 ops.acpi_op_add = 1;
1241 result = acpi_bus_scan(*child, &ops); 1266 result = acpi_bus_scan(*child, &ops);
1242 } 1267
1243 return result; 1268 return result;
1244} 1269}
1245 1270
@@ -1325,127 +1350,35 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1325{ 1350{
1326 int result = 0; 1351 int result = 0;
1327 struct acpi_device *device = NULL; 1352 struct acpi_device *device = NULL;
1328 1353 struct acpi_bus_ops ops;
1329 1354
1330 if (!root) 1355 if (!root)
1331 return -ENODEV; 1356 return -ENODEV;
1332 1357
1358 memset(&ops, 0, sizeof(ops));
1359 ops.acpi_op_add = 1;
1360 ops.acpi_op_start = 1;
1361
1333 /* 1362 /*
1334 * Enumerate all fixed-feature devices. 1363 * Enumerate all fixed-feature devices.
1335 */ 1364 */
1336 if (acpi_fadt.pwr_button == 0) { 1365 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
1337 result = acpi_add_single_object(&device, acpi_root, 1366 result = acpi_add_single_object(&device, acpi_root,
1338 NULL, 1367 NULL,
1339 ACPI_BUS_TYPE_POWER_BUTTON); 1368 ACPI_BUS_TYPE_POWER_BUTTON,
1340 if (!result) 1369 &ops);
1341 result = acpi_start_single_object(device);
1342 } 1370 }
1343 1371
1344 if (acpi_fadt.sleep_button == 0) { 1372 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1345 result = acpi_add_single_object(&device, acpi_root, 1373 result = acpi_add_single_object(&device, acpi_root,
1346 NULL, 1374 NULL,
1347 ACPI_BUS_TYPE_SLEEP_BUTTON); 1375 ACPI_BUS_TYPE_SLEEP_BUTTON,
1348 if (!result) 1376 &ops);
1349 result = acpi_start_single_object(device);
1350 } 1377 }
1351 1378
1352 return result; 1379 return result;
1353} 1380}
1354 1381
1355
1356static inline struct acpi_device * to_acpi_dev(struct device * dev)
1357{
1358 return container_of(dev, struct acpi_device, dev);
1359}
1360
1361
1362static int root_suspend(struct acpi_device * acpi_dev, pm_message_t state)
1363{
1364 struct acpi_device * dev, * next;
1365 int result;
1366
1367 spin_lock(&acpi_device_lock);
1368 list_for_each_entry_safe_reverse(dev, next, &acpi_device_list, g_list) {
1369 if (dev->driver && dev->driver->ops.suspend) {
1370 spin_unlock(&acpi_device_lock);
1371 result = dev->driver->ops.suspend(dev, 0);
1372 if (result) {
1373 printk(KERN_ERR PREFIX "[%s - %s] Suspend failed: %d\n",
1374 acpi_device_name(dev),
1375 acpi_device_bid(dev), result);
1376 }
1377 spin_lock(&acpi_device_lock);
1378 }
1379 }
1380 spin_unlock(&acpi_device_lock);
1381 return 0;
1382}
1383
1384
1385static int acpi_device_suspend(struct device * dev, pm_message_t state)
1386{
1387 struct acpi_device * acpi_dev = to_acpi_dev(dev);
1388
1389 /*
1390 * For now, we should only register 1 generic device -
1391 * the ACPI root device - and from there, we walk the
1392 * tree of ACPI devices to suspend each one using the
1393 * ACPI driver methods.
1394 */
1395 if (acpi_dev->handle == ACPI_ROOT_OBJECT)
1396 root_suspend(acpi_dev, state);
1397 return 0;
1398}
1399
1400
1401
1402static int root_resume(struct acpi_device * acpi_dev)
1403{
1404 struct acpi_device * dev, * next;
1405 int result;
1406
1407 spin_lock(&acpi_device_lock);
1408 list_for_each_entry_safe(dev, next, &acpi_device_list, g_list) {
1409 if (dev->driver && dev->driver->ops.resume) {
1410 spin_unlock(&acpi_device_lock);
1411 result = dev->driver->ops.resume(dev, 0);
1412 if (result) {
1413 printk(KERN_ERR PREFIX "[%s - %s] resume failed: %d\n",
1414 acpi_device_name(dev),
1415 acpi_device_bid(dev), result);
1416 }
1417 spin_lock(&acpi_device_lock);
1418 }
1419 }
1420 spin_unlock(&acpi_device_lock);
1421 return 0;
1422}
1423
1424
1425static int acpi_device_resume(struct device * dev)
1426{
1427 struct acpi_device * acpi_dev = to_acpi_dev(dev);
1428
1429 /*
1430 * For now, we should only register 1 generic device -
1431 * the ACPI root device - and from there, we walk the
1432 * tree of ACPI devices to resume each one using the
1433 * ACPI driver methods.
1434 */
1435 if (acpi_dev->handle == ACPI_ROOT_OBJECT)
1436 root_resume(acpi_dev);
1437 return 0;
1438}
1439
1440
1441static struct bus_type acpi_bus_type = {
1442 .name = "acpi",
1443 .suspend = acpi_device_suspend,
1444 .resume = acpi_device_resume,
1445};
1446
1447
1448
1449static int __init acpi_scan_init(void) 1382static int __init acpi_scan_init(void)
1450{ 1383{
1451 int result; 1384 int result;
@@ -1455,9 +1388,9 @@ static int __init acpi_scan_init(void)
1455 if (acpi_disabled) 1388 if (acpi_disabled)
1456 return 0; 1389 return 0;
1457 1390
1458 result = kset_register(&acpi_namespace_kset); 1391 memset(&ops, 0, sizeof(ops));
1459 if (result < 0) 1392 ops.acpi_op_add = 1;
1460 printk(KERN_ERR PREFIX "kset_register error: %d\n", result); 1393 ops.acpi_op_start = 1;
1461 1394
1462 result = bus_register(&acpi_bus_type); 1395 result = bus_register(&acpi_bus_type);
1463 if (result) { 1396 if (result) {
@@ -1469,32 +1402,16 @@ static int __init acpi_scan_init(void)
1469 * Create the root device in the bus's device tree 1402 * Create the root device in the bus's device tree
1470 */ 1403 */
1471 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT, 1404 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1472 ACPI_BUS_TYPE_SYSTEM); 1405 ACPI_BUS_TYPE_SYSTEM, &ops);
1473 if (result) 1406 if (result)
1474 goto Done; 1407 goto Done;
1475 1408
1476 result = acpi_start_single_object(acpi_root);
1477 if (result)
1478 goto Done;
1479
1480 acpi_root->dev.bus = &acpi_bus_type;
1481 snprintf(acpi_root->dev.bus_id, BUS_ID_SIZE, "%s", acpi_bus_type.name);
1482 result = device_register(&acpi_root->dev);
1483 if (result) {
1484 /* We don't want to quit even if we failed to add suspend/resume */
1485 printk(KERN_ERR PREFIX "Could not register device\n");
1486 }
1487
1488 /* 1409 /*
1489 * Enumerate devices in the ACPI namespace. 1410 * Enumerate devices in the ACPI namespace.
1490 */ 1411 */
1491 result = acpi_bus_scan_fixed(acpi_root); 1412 result = acpi_bus_scan_fixed(acpi_root);
1492 if (!result) { 1413 if (!result)
1493 memset(&ops, 0, sizeof(ops));
1494 ops.acpi_op_add = 1;
1495 ops.acpi_op_start = 1;
1496 result = acpi_bus_scan(acpi_root, &ops); 1414 result = acpi_bus_scan(acpi_root, &ops);
1497 }
1498 1415
1499 if (result) 1416 if (result)
1500 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1417 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 34962578039d..ccc11b33d89c 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -73,7 +73,7 @@ acpi_system_write_sleep(struct file *file,
73static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset) 73static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
74{ 74{
75 u32 sec, min, hr; 75 u32 sec, min, hr;
76 u32 day, mo, yr; 76 u32 day, mo, yr, cent = 0;
77 unsigned char rtc_control = 0; 77 unsigned char rtc_control = 0;
78 unsigned long flags; 78 unsigned long flags;
79 79
@@ -87,20 +87,19 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
87 rtc_control = CMOS_READ(RTC_CONTROL); 87 rtc_control = CMOS_READ(RTC_CONTROL);
88 88
89 /* If we ever get an FACP with proper values... */ 89 /* If we ever get an FACP with proper values... */
90 if (acpi_gbl_FADT->day_alrm) 90 if (acpi_gbl_FADT.day_alarm)
91 /* ACPI spec: only low 6 its should be cared */ 91 /* ACPI spec: only low 6 its should be cared */
92 day = CMOS_READ(acpi_gbl_FADT->day_alrm) & 0x3F; 92 day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
93 else 93 else
94 day = CMOS_READ(RTC_DAY_OF_MONTH); 94 day = CMOS_READ(RTC_DAY_OF_MONTH);
95 if (acpi_gbl_FADT->mon_alrm) 95 if (acpi_gbl_FADT.month_alarm)
96 mo = CMOS_READ(acpi_gbl_FADT->mon_alrm); 96 mo = CMOS_READ(acpi_gbl_FADT.month_alarm);
97 else 97 else
98 mo = CMOS_READ(RTC_MONTH); 98 mo = CMOS_READ(RTC_MONTH);
99 if (acpi_gbl_FADT->century) 99 if (acpi_gbl_FADT.century)
100 yr = CMOS_READ(acpi_gbl_FADT->century) * 100 + 100 cent = CMOS_READ(acpi_gbl_FADT.century);
101 CMOS_READ(RTC_YEAR); 101
102 else 102 yr = CMOS_READ(RTC_YEAR);
103 yr = CMOS_READ(RTC_YEAR);
104 103
105 spin_unlock_irqrestore(&rtc_lock, flags); 104 spin_unlock_irqrestore(&rtc_lock, flags);
106 105
@@ -111,10 +110,11 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
111 BCD_TO_BIN(day); 110 BCD_TO_BIN(day);
112 BCD_TO_BIN(mo); 111 BCD_TO_BIN(mo);
113 BCD_TO_BIN(yr); 112 BCD_TO_BIN(yr);
113 BCD_TO_BIN(cent);
114 } 114 }
115 115
116 /* we're trusting the FADT (see above) */ 116 /* we're trusting the FADT (see above) */
117 if (!acpi_gbl_FADT->century) 117 if (!acpi_gbl_FADT.century)
118 /* If we're not trusting the FADT, we should at least make it 118 /* If we're not trusting the FADT, we should at least make it
119 * right for _this_ century... ehm, what is _this_ century? 119 * right for _this_ century... ehm, what is _this_ century?
120 * 120 *
@@ -134,6 +134,8 @@ static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
134 * 134 *
135 */ 135 */
136 yr += 2000; 136 yr += 2000;
137 else
138 yr += cent * 100;
137 139
138 seq_printf(seq, "%4.4u-", yr); 140 seq_printf(seq, "%4.4u-", yr);
139 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo); 141 (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
@@ -317,12 +319,12 @@ acpi_system_write_alarm(struct file *file,
317 * offsets into the CMOS RAM here -- which for some reason are pointing 319 * offsets into the CMOS RAM here -- which for some reason are pointing
318 * to the RTC area of memory. 320 * to the RTC area of memory.
319 */ 321 */
320 if (acpi_gbl_FADT->day_alrm) 322 if (acpi_gbl_FADT.day_alarm)
321 CMOS_WRITE(day, acpi_gbl_FADT->day_alrm); 323 CMOS_WRITE(day, acpi_gbl_FADT.day_alarm);
322 if (acpi_gbl_FADT->mon_alrm) 324 if (acpi_gbl_FADT.month_alarm)
323 CMOS_WRITE(mo, acpi_gbl_FADT->mon_alrm); 325 CMOS_WRITE(mo, acpi_gbl_FADT.month_alarm);
324 if (acpi_gbl_FADT->century) 326 if (acpi_gbl_FADT.century)
325 CMOS_WRITE(yr / 100, acpi_gbl_FADT->century); 327 CMOS_WRITE(yr / 100, acpi_gbl_FADT.century);
326 /* enable the rtc alarm interrupt */ 328 /* enable the rtc alarm interrupt */
327 rtc_control |= RTC_AIE; 329 rtc_control |= RTC_AIE;
328 CMOS_WRITE(rtc_control, RTC_CONTROL); 330 CMOS_WRITE(rtc_control, RTC_CONTROL);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index d86dcb3c2366..7147b0bdab0a 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -32,6 +32,11 @@
32 32
33#define _COMPONENT ACPI_SYSTEM_COMPONENT 33#define _COMPONENT ACPI_SYSTEM_COMPONENT
34ACPI_MODULE_NAME("acpi_system") 34ACPI_MODULE_NAME("acpi_system")
35#ifdef MODULE_PARAM_PREFIX
36#undef MODULE_PARAM_PREFIX
37#endif
38#define MODULE_PARAM_PREFIX "acpi."
39
35#define ACPI_SYSTEM_CLASS "system" 40#define ACPI_SYSTEM_CLASS "system"
36#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver" 41#define ACPI_SYSTEM_DRIVER_NAME "ACPI System Driver"
37#define ACPI_SYSTEM_DEVICE_NAME "System" 42#define ACPI_SYSTEM_DEVICE_NAME "System"
@@ -39,11 +44,24 @@ ACPI_MODULE_NAME("acpi_system")
39#define ACPI_SYSTEM_FILE_EVENT "event" 44#define ACPI_SYSTEM_FILE_EVENT "event"
40#define ACPI_SYSTEM_FILE_DSDT "dsdt" 45#define ACPI_SYSTEM_FILE_DSDT "dsdt"
41#define ACPI_SYSTEM_FILE_FADT "fadt" 46#define ACPI_SYSTEM_FILE_FADT "fadt"
42extern struct fadt_descriptor acpi_fadt; 47
48/*
49 * Make ACPICA version work as module param
50 */
51static int param_get_acpica_version(char *buffer, struct kernel_param *kp) {
52 int result;
53
54 result = sprintf(buffer, "%x", ACPI_CA_VERSION);
55
56 return result;
57}
58
59module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
43 60
44/* -------------------------------------------------------------------------- 61/* --------------------------------------------------------------------------
45 FS Interface (/proc) 62 FS Interface (/proc)
46 -------------------------------------------------------------------------- */ 63 -------------------------------------------------------------------------- */
64#ifdef CONFIG_ACPI_PROCFS
47 65
48static int acpi_system_read_info(struct seq_file *seq, void *offset) 66static int acpi_system_read_info(struct seq_file *seq, void *offset)
49{ 67{
@@ -63,6 +81,7 @@ static const struct file_operations acpi_system_info_ops = {
63 .llseek = seq_lseek, 81 .llseek = seq_lseek,
64 .release = single_release, 82 .release = single_release,
65}; 83};
84#endif
66 85
67static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, 86static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
68 loff_t *); 87 loff_t *);
@@ -76,17 +95,16 @@ acpi_system_read_dsdt(struct file *file,
76 char __user * buffer, size_t count, loff_t * ppos) 95 char __user * buffer, size_t count, loff_t * ppos)
77{ 96{
78 acpi_status status = AE_OK; 97 acpi_status status = AE_OK;
79 struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL }; 98 struct acpi_table_header *dsdt = NULL;
80 ssize_t res; 99 ssize_t res;
81 100
82 101
83 status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); 102 status = acpi_get_table(ACPI_SIG_DSDT, 1, &dsdt);
84 if (ACPI_FAILURE(status)) 103 if (ACPI_FAILURE(status))
85 return -ENODEV; 104 return -ENODEV;
86 105
87 res = simple_read_from_buffer(buffer, count, ppos, 106 res = simple_read_from_buffer(buffer, count, ppos,
88 dsdt.pointer, dsdt.length); 107 dsdt, dsdt->length);
89 kfree(dsdt.pointer);
90 108
91 return res; 109 return res;
92} 110}
@@ -103,17 +121,16 @@ acpi_system_read_fadt(struct file *file,
103 char __user * buffer, size_t count, loff_t * ppos) 121 char __user * buffer, size_t count, loff_t * ppos)
104{ 122{
105 acpi_status status = AE_OK; 123 acpi_status status = AE_OK;
106 struct acpi_buffer fadt = { ACPI_ALLOCATE_BUFFER, NULL }; 124 struct acpi_table_header *fadt = NULL;
107 ssize_t res; 125 ssize_t res;
108 126
109 127
110 status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &fadt); 128 status = acpi_get_table(ACPI_SIG_FADT, 1, &fadt);
111 if (ACPI_FAILURE(status)) 129 if (ACPI_FAILURE(status))
112 return -ENODEV; 130 return -ENODEV;
113 131
114 res = simple_read_from_buffer(buffer, count, ppos, 132 res = simple_read_from_buffer(buffer, count, ppos,
115 fadt.pointer, fadt.length); 133 fadt, fadt->length);
116 kfree(fadt.pointer);
117 134
118 return res; 135 return res;
119} 136}
@@ -128,6 +145,7 @@ static int __init acpi_system_init(void)
128 if (acpi_disabled) 145 if (acpi_disabled)
129 return 0; 146 return 0;
130 147
148#ifdef CONFIG_ACPI_PROCFS
131 /* 'info' [R] */ 149 /* 'info' [R] */
132 name = ACPI_SYSTEM_FILE_INFO; 150 name = ACPI_SYSTEM_FILE_INFO;
133 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir); 151 entry = create_proc_entry(name, S_IRUGO, acpi_root_dir);
@@ -136,6 +154,7 @@ static int __init acpi_system_init(void)
136 else { 154 else {
137 entry->proc_fops = &acpi_system_info_ops; 155 entry->proc_fops = &acpi_system_info_ops;
138 } 156 }
157#endif
139 158
140 /* 'dsdt' [R] */ 159 /* 'dsdt' [R] */
141 name = ACPI_SYSTEM_FILE_DSDT; 160 name = ACPI_SYSTEM_FILE_DSDT;
@@ -159,7 +178,9 @@ static int __init acpi_system_init(void)
159 Error: 178 Error:
160 remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir); 179 remove_proc_entry(ACPI_SYSTEM_FILE_FADT, acpi_root_dir);
161 remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir); 180 remove_proc_entry(ACPI_SYSTEM_FILE_DSDT, acpi_root_dir);
181#ifdef CONFIG_ACPI_PROCFS
162 remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir); 182 remove_proc_entry(ACPI_SYSTEM_FILE_INFO, acpi_root_dir);
183#endif
163 184
164 error = -EFAULT; 185 error = -EFAULT;
165 goto Done; 186 goto Done;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ffa30c9fccbf..ba4cb200314a 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -38,154 +38,97 @@
38 38
39#define ACPI_MAX_TABLES 128 39#define ACPI_MAX_TABLES 128
40 40
41static char *acpi_table_signatures[ACPI_TABLE_COUNT] = {
42 [ACPI_TABLE_UNKNOWN] = "????",
43 [ACPI_APIC] = "APIC",
44 [ACPI_BOOT] = "BOOT",
45 [ACPI_DBGP] = "DBGP",
46 [ACPI_DSDT] = "DSDT",
47 [ACPI_ECDT] = "ECDT",
48 [ACPI_ETDT] = "ETDT",
49 [ACPI_FADT] = "FACP",
50 [ACPI_FACS] = "FACS",
51 [ACPI_OEMX] = "OEM",
52 [ACPI_PSDT] = "PSDT",
53 [ACPI_SBST] = "SBST",
54 [ACPI_SLIT] = "SLIT",
55 [ACPI_SPCR] = "SPCR",
56 [ACPI_SRAT] = "SRAT",
57 [ACPI_SSDT] = "SSDT",
58 [ACPI_SPMI] = "SPMI",
59 [ACPI_HPET] = "HPET",
60 [ACPI_MCFG] = "MCFG",
61};
62
63static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" }; 41static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
64static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" }; 42static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
65 43
66/* System Description Table (RSDT/XSDT) */ 44static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
67struct acpi_table_sdt {
68 unsigned long pa;
69 enum acpi_table_id id;
70 unsigned long size;
71} __attribute__ ((packed));
72
73static unsigned long sdt_pa; /* Physical Address */
74static unsigned long sdt_count; /* Table count */
75 45
76static struct acpi_table_sdt sdt_entry[ACPI_MAX_TABLES] __initdata; 46void acpi_table_print_madt_entry(struct acpi_subtable_header * header)
77
78void acpi_table_print(struct acpi_table_header *header, unsigned long phys_addr)
79{
80 char *name = NULL;
81
82 if (!header)
83 return;
84
85 /* Some table signatures aren't good table names */
86
87 if (!strncmp((char *)&header->signature,
88 acpi_table_signatures[ACPI_APIC],
89 sizeof(header->signature))) {
90 name = "MADT";
91 } else if (!strncmp((char *)&header->signature,
92 acpi_table_signatures[ACPI_FADT],
93 sizeof(header->signature))) {
94 name = "FADT";
95 } else
96 name = header->signature;
97
98 printk(KERN_DEBUG PREFIX
99 "%.4s (v%3.3d %6.6s %8.8s 0x%08x %.4s 0x%08x) @ 0x%p\n", name,
100 header->revision, header->oem_id, header->oem_table_id,
101 header->oem_revision, header->asl_compiler_id,
102 header->asl_compiler_revision, (void *)phys_addr);
103}
104
105void acpi_table_print_madt_entry(acpi_table_entry_header * header)
106{ 47{
107 if (!header) 48 if (!header)
108 return; 49 return;
109 50
110 switch (header->type) { 51 switch (header->type) {
111 52
112 case ACPI_MADT_LAPIC: 53 case ACPI_MADT_TYPE_LOCAL_APIC:
113 { 54 {
114 struct acpi_table_lapic *p = 55 struct acpi_madt_local_apic *p =
115 (struct acpi_table_lapic *)header; 56 (struct acpi_madt_local_apic *)header;
116 printk(KERN_INFO PREFIX 57 printk(KERN_INFO PREFIX
117 "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n", 58 "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
118 p->acpi_id, p->id, 59 p->processor_id, p->id,
119 p->flags.enabled ? "enabled" : "disabled"); 60 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
120 } 61 }
121 break; 62 break;
122 63
123 case ACPI_MADT_IOAPIC: 64 case ACPI_MADT_TYPE_IO_APIC:
124 { 65 {
125 struct acpi_table_ioapic *p = 66 struct acpi_madt_io_apic *p =
126 (struct acpi_table_ioapic *)header; 67 (struct acpi_madt_io_apic *)header;
127 printk(KERN_INFO PREFIX 68 printk(KERN_INFO PREFIX
128 "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n", 69 "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
129 p->id, p->address, p->global_irq_base); 70 p->id, p->address, p->global_irq_base);
130 } 71 }
131 break; 72 break;
132 73
133 case ACPI_MADT_INT_SRC_OVR: 74 case ACPI_MADT_TYPE_INTERRUPT_OVERRIDE:
134 { 75 {
135 struct acpi_table_int_src_ovr *p = 76 struct acpi_madt_interrupt_override *p =
136 (struct acpi_table_int_src_ovr *)header; 77 (struct acpi_madt_interrupt_override *)header;
137 printk(KERN_INFO PREFIX 78 printk(KERN_INFO PREFIX
138 "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n", 79 "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
139 p->bus, p->bus_irq, p->global_irq, 80 p->bus, p->source_irq, p->global_irq,
140 mps_inti_flags_polarity[p->flags.polarity], 81 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
141 mps_inti_flags_trigger[p->flags.trigger]); 82 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
142 if (p->flags.reserved) 83 if (p->inti_flags &
84 ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
143 printk(KERN_INFO PREFIX 85 printk(KERN_INFO PREFIX
144 "INT_SRC_OVR unexpected reserved flags: 0x%x\n", 86 "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
145 p->flags.reserved); 87 p->inti_flags &
88 ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
146 89
147 } 90 }
148 break; 91 break;
149 92
150 case ACPI_MADT_NMI_SRC: 93 case ACPI_MADT_TYPE_NMI_SOURCE:
151 { 94 {
152 struct acpi_table_nmi_src *p = 95 struct acpi_madt_nmi_source *p =
153 (struct acpi_table_nmi_src *)header; 96 (struct acpi_madt_nmi_source *)header;
154 printk(KERN_INFO PREFIX 97 printk(KERN_INFO PREFIX
155 "NMI_SRC (%s %s global_irq %d)\n", 98 "NMI_SRC (%s %s global_irq %d)\n",
156 mps_inti_flags_polarity[p->flags.polarity], 99 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
157 mps_inti_flags_trigger[p->flags.trigger], 100 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
158 p->global_irq); 101 p->global_irq);
159 } 102 }
160 break; 103 break;
161 104
162 case ACPI_MADT_LAPIC_NMI: 105 case ACPI_MADT_TYPE_LOCAL_APIC_NMI:
163 { 106 {
164 struct acpi_table_lapic_nmi *p = 107 struct acpi_madt_local_apic_nmi *p =
165 (struct acpi_table_lapic_nmi *)header; 108 (struct acpi_madt_local_apic_nmi *)header;
166 printk(KERN_INFO PREFIX 109 printk(KERN_INFO PREFIX
167 "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n", 110 "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
168 p->acpi_id, 111 p->processor_id,
169 mps_inti_flags_polarity[p->flags.polarity], 112 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
170 mps_inti_flags_trigger[p->flags.trigger], 113 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
171 p->lint); 114 p->lint);
172 } 115 }
173 break; 116 break;
174 117
175 case ACPI_MADT_LAPIC_ADDR_OVR: 118 case ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE:
176 { 119 {
177 struct acpi_table_lapic_addr_ovr *p = 120 struct acpi_madt_local_apic_override *p =
178 (struct acpi_table_lapic_addr_ovr *)header; 121 (struct acpi_madt_local_apic_override *)header;
179 printk(KERN_INFO PREFIX 122 printk(KERN_INFO PREFIX
180 "LAPIC_ADDR_OVR (address[%p])\n", 123 "LAPIC_ADDR_OVR (address[%p])\n",
181 (void *)(unsigned long)p->address); 124 (void *)(unsigned long)p->address);
182 } 125 }
183 break; 126 break;
184 127
185 case ACPI_MADT_IOSAPIC: 128 case ACPI_MADT_TYPE_IO_SAPIC:
186 { 129 {
187 struct acpi_table_iosapic *p = 130 struct acpi_madt_io_sapic *p =
188 (struct acpi_table_iosapic *)header; 131 (struct acpi_madt_io_sapic *)header;
189 printk(KERN_INFO PREFIX 132 printk(KERN_INFO PREFIX
190 "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n", 133 "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
191 p->id, (void *)(unsigned long)p->address, 134 p->id, (void *)(unsigned long)p->address,
@@ -193,26 +136,26 @@ void acpi_table_print_madt_entry(acpi_table_entry_header * header)
193 } 136 }
194 break; 137 break;
195 138
196 case ACPI_MADT_LSAPIC: 139 case ACPI_MADT_TYPE_LOCAL_SAPIC:
197 { 140 {
198 struct acpi_table_lsapic *p = 141 struct acpi_madt_local_sapic *p =
199 (struct acpi_table_lsapic *)header; 142 (struct acpi_madt_local_sapic *)header;
200 printk(KERN_INFO PREFIX 143 printk(KERN_INFO PREFIX
201 "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n", 144 "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
202 p->acpi_id, p->id, p->eid, 145 p->processor_id, p->id, p->eid,
203 p->flags.enabled ? "enabled" : "disabled"); 146 (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
204 } 147 }
205 break; 148 break;
206 149
207 case ACPI_MADT_PLAT_INT_SRC: 150 case ACPI_MADT_TYPE_INTERRUPT_SOURCE:
208 { 151 {
209 struct acpi_table_plat_int_src *p = 152 struct acpi_madt_interrupt_source *p =
210 (struct acpi_table_plat_int_src *)header; 153 (struct acpi_madt_interrupt_source *)header;
211 printk(KERN_INFO PREFIX 154 printk(KERN_INFO PREFIX
212 "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n", 155 "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
213 mps_inti_flags_polarity[p->flags.polarity], 156 mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
214 mps_inti_flags_trigger[p->flags.trigger], 157 mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
215 p->type, p->id, p->eid, p->iosapic_vector, 158 p->type, p->id, p->eid, p->io_sapic_vector,
216 p->global_irq); 159 p->global_irq);
217 } 160 }
218 break; 161 break;
@@ -225,342 +168,76 @@ void acpi_table_print_madt_entry(acpi_table_entry_header * header)
225 } 168 }
226} 169}
227 170
228static int
229acpi_table_compute_checksum(void *table_pointer, unsigned long length)
230{
231 u8 *p = table_pointer;
232 unsigned long remains = length;
233 unsigned long sum = 0;
234
235 if (!p || !length)
236 return -EINVAL;
237
238 while (remains--)
239 sum += *p++;
240
241 return (sum & 0xFF);
242}
243 171
244/*
245 * acpi_get_table_header_early()
246 * for acpi_blacklisted(), acpi_table_get_sdt()
247 */
248int __init 172int __init
249acpi_get_table_header_early(enum acpi_table_id id, 173acpi_table_parse_madt_family(char *id,
250 struct acpi_table_header **header)
251{
252 unsigned int i;
253 enum acpi_table_id temp_id;
254
255 /* DSDT is different from the rest */
256 if (id == ACPI_DSDT)
257 temp_id = ACPI_FADT;
258 else
259 temp_id = id;
260
261 /* Locate the table. */
262
263 for (i = 0; i < sdt_count; i++) {
264 if (sdt_entry[i].id != temp_id)
265 continue;
266 *header = (void *)
267 __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
268 if (!*header) {
269 printk(KERN_WARNING PREFIX "Unable to map %s\n",
270 acpi_table_signatures[temp_id]);
271 return -ENODEV;
272 }
273 break;
274 }
275
276 if (!*header) {
277 printk(KERN_WARNING PREFIX "%s not present\n",
278 acpi_table_signatures[id]);
279 return -ENODEV;
280 }
281
282 /* Map the DSDT header via the pointer in the FADT */
283 if (id == ACPI_DSDT) {
284 struct fadt_descriptor *fadt =
285 (struct fadt_descriptor *)*header;
286
287 if (fadt->revision == 3 && fadt->Xdsdt) {
288 *header = (void *)__acpi_map_table(fadt->Xdsdt,
289 sizeof(struct
290 acpi_table_header));
291 } else if (fadt->V1_dsdt) {
292 *header = (void *)__acpi_map_table(fadt->V1_dsdt,
293 sizeof(struct
294 acpi_table_header));
295 } else
296 *header = NULL;
297
298 if (!*header) {
299 printk(KERN_WARNING PREFIX "Unable to map DSDT\n");
300 return -ENODEV;
301 }
302 }
303
304 return 0;
305}
306
307int __init
308acpi_table_parse_madt_family(enum acpi_table_id id,
309 unsigned long madt_size, 174 unsigned long madt_size,
310 int entry_id, 175 int entry_id,
311 acpi_madt_entry_handler handler, 176 acpi_madt_entry_handler handler,
312 unsigned int max_entries) 177 unsigned int max_entries)
313{ 178{
314 void *madt = NULL; 179 struct acpi_table_header *madt = NULL;
315 acpi_table_entry_header *entry; 180 struct acpi_subtable_header *entry;
316 unsigned int count = 0; 181 unsigned int count = 0;
317 unsigned long madt_end; 182 unsigned long madt_end;
318 unsigned int i;
319 183
320 if (!handler) 184 if (!handler)
321 return -EINVAL; 185 return -EINVAL;
322 186
323 /* Locate the MADT (if exists). There should only be one. */ 187 /* Locate the MADT (if exists). There should only be one. */
324 188 acpi_get_table(id, 0, &madt);
325 for (i = 0; i < sdt_count; i++) {
326 if (sdt_entry[i].id != id)
327 continue;
328 madt = (void *)
329 __acpi_map_table(sdt_entry[i].pa, sdt_entry[i].size);
330 if (!madt) {
331 printk(KERN_WARNING PREFIX "Unable to map %s\n",
332 acpi_table_signatures[id]);
333 return -ENODEV;
334 }
335 break;
336 }
337 189
338 if (!madt) { 190 if (!madt) {
339 printk(KERN_WARNING PREFIX "%s not present\n", 191 printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
340 acpi_table_signatures[id]);
341 return -ENODEV; 192 return -ENODEV;
342 } 193 }
343 194
344 madt_end = (unsigned long)madt + sdt_entry[i].size; 195 madt_end = (unsigned long)madt + madt->length;
345 196
346 /* Parse all entries looking for a match. */ 197 /* Parse all entries looking for a match. */
347 198
348 entry = (acpi_table_entry_header *) 199 entry = (struct acpi_subtable_header *)
349 ((unsigned long)madt + madt_size); 200 ((unsigned long)madt + madt_size);
350 201
351 while (((unsigned long)entry) + sizeof(acpi_table_entry_header) < 202 while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
352 madt_end) { 203 madt_end) {
353 if (entry->type == entry_id 204 if (entry->type == entry_id
354 && (!max_entries || count++ < max_entries)) 205 && (!max_entries || count++ < max_entries))
355 if (handler(entry, madt_end)) 206 if (handler(entry, madt_end))
356 return -EINVAL; 207 return -EINVAL;
357 208
358 entry = (acpi_table_entry_header *) 209 entry = (struct acpi_subtable_header *)
359 ((unsigned long)entry + entry->length); 210 ((unsigned long)entry + entry->length);
360 } 211 }
361 if (max_entries && count > max_entries) { 212 if (max_entries && count > max_entries) {
362 printk(KERN_WARNING PREFIX "[%s:0x%02x] ignored %i entries of " 213 printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
363 "%i found\n", acpi_table_signatures[id], entry_id, 214 "%i found\n", id, entry_id, count - max_entries, count);
364 count - max_entries, count);
365 } 215 }
366 216
367 return count; 217 return count;
368} 218}
369 219
370int __init 220int __init
371acpi_table_parse_madt(enum acpi_madt_entry_id id, 221acpi_table_parse_madt(enum acpi_madt_type id,
372 acpi_madt_entry_handler handler, unsigned int max_entries) 222 acpi_madt_entry_handler handler, unsigned int max_entries)
373{ 223{
374 return acpi_table_parse_madt_family(ACPI_APIC, 224 return acpi_table_parse_madt_family(ACPI_SIG_MADT,
375 sizeof(struct acpi_table_madt), id, 225 sizeof(struct acpi_table_madt), id,
376 handler, max_entries); 226 handler, max_entries);
377} 227}
378 228
379int __init acpi_table_parse(enum acpi_table_id id, acpi_table_handler handler) 229int __init acpi_table_parse(char *id, acpi_table_handler handler)
380{ 230{
381 int count = 0; 231 struct acpi_table_header *table = NULL;
382 unsigned int i = 0;
383
384 if (!handler) 232 if (!handler)
385 return -EINVAL; 233 return -EINVAL;
386 234
387 for (i = 0; i < sdt_count; i++) { 235 acpi_get_table(id, 0, &table);
388 if (sdt_entry[i].id != id) 236 if (table) {
389 continue; 237 handler(table);
390 count++; 238 return 1;
391 if (count == 1) 239 } else
392 handler(sdt_entry[i].pa, sdt_entry[i].size); 240 return 0;
393
394 else
395 printk(KERN_WARNING PREFIX
396 "%d duplicate %s table ignored.\n", count,
397 acpi_table_signatures[id]);
398 }
399
400 return count;
401}
402
403static int __init acpi_table_get_sdt(struct acpi_table_rsdp *rsdp)
404{
405 struct acpi_table_header *header = NULL;
406 unsigned int i, id = 0;
407
408 if (!rsdp)
409 return -EINVAL;
410
411 /* First check XSDT (but only on ACPI 2.0-compatible systems) */
412
413 if ((rsdp->revision >= 2) &&
414 (((struct acpi20_table_rsdp *)rsdp)->xsdt_address)) {
415
416 struct acpi_table_xsdt *mapped_xsdt = NULL;
417
418 sdt_pa = ((struct acpi20_table_rsdp *)rsdp)->xsdt_address;
419
420 /* map in just the header */
421 header = (struct acpi_table_header *)
422 __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
423
424 if (!header) {
425 printk(KERN_WARNING PREFIX
426 "Unable to map XSDT header\n");
427 return -ENODEV;
428 }
429
430 /* remap in the entire table before processing */
431 mapped_xsdt = (struct acpi_table_xsdt *)
432 __acpi_map_table(sdt_pa, header->length);
433 if (!mapped_xsdt) {
434 printk(KERN_WARNING PREFIX "Unable to map XSDT\n");
435 return -ENODEV;
436 }
437 header = &mapped_xsdt->header;
438
439 if (strncmp(header->signature, "XSDT", 4)) {
440 printk(KERN_WARNING PREFIX
441 "XSDT signature incorrect\n");
442 return -ENODEV;
443 }
444
445 if (acpi_table_compute_checksum(header, header->length)) {
446 printk(KERN_WARNING PREFIX "Invalid XSDT checksum\n");
447 return -ENODEV;
448 }
449
450 sdt_count =
451 (header->length - sizeof(struct acpi_table_header)) >> 3;
452 if (sdt_count > ACPI_MAX_TABLES) {
453 printk(KERN_WARNING PREFIX
454 "Truncated %lu XSDT entries\n",
455 (sdt_count - ACPI_MAX_TABLES));
456 sdt_count = ACPI_MAX_TABLES;
457 }
458
459 for (i = 0; i < sdt_count; i++)
460 sdt_entry[i].pa = (unsigned long)mapped_xsdt->entry[i];
461 }
462
463 /* Then check RSDT */
464
465 else if (rsdp->rsdt_address) {
466
467 struct acpi_table_rsdt *mapped_rsdt = NULL;
468
469 sdt_pa = rsdp->rsdt_address;
470
471 /* map in just the header */
472 header = (struct acpi_table_header *)
473 __acpi_map_table(sdt_pa, sizeof(struct acpi_table_header));
474 if (!header) {
475 printk(KERN_WARNING PREFIX
476 "Unable to map RSDT header\n");
477 return -ENODEV;
478 }
479
480 /* remap in the entire table before processing */
481 mapped_rsdt = (struct acpi_table_rsdt *)
482 __acpi_map_table(sdt_pa, header->length);
483 if (!mapped_rsdt) {
484 printk(KERN_WARNING PREFIX "Unable to map RSDT\n");
485 return -ENODEV;
486 }
487 header = &mapped_rsdt->header;
488
489 if (strncmp(header->signature, "RSDT", 4)) {
490 printk(KERN_WARNING PREFIX
491 "RSDT signature incorrect\n");
492 return -ENODEV;
493 }
494
495 if (acpi_table_compute_checksum(header, header->length)) {
496 printk(KERN_WARNING PREFIX "Invalid RSDT checksum\n");
497 return -ENODEV;
498 }
499
500 sdt_count =
501 (header->length - sizeof(struct acpi_table_header)) >> 2;
502 if (sdt_count > ACPI_MAX_TABLES) {
503 printk(KERN_WARNING PREFIX
504 "Truncated %lu RSDT entries\n",
505 (sdt_count - ACPI_MAX_TABLES));
506 sdt_count = ACPI_MAX_TABLES;
507 }
508
509 for (i = 0; i < sdt_count; i++)
510 sdt_entry[i].pa = (unsigned long)mapped_rsdt->entry[i];
511 }
512
513 else {
514 printk(KERN_WARNING PREFIX
515 "No System Description Table (RSDT/XSDT) specified in RSDP\n");
516 return -ENODEV;
517 }
518
519 acpi_table_print(header, sdt_pa);
520
521 for (i = 0; i < sdt_count; i++) {
522
523 /* map in just the header */
524 header = (struct acpi_table_header *)
525 __acpi_map_table(sdt_entry[i].pa,
526 sizeof(struct acpi_table_header));
527 if (!header)
528 continue;
529
530 /* remap in the entire table before processing */
531 header = (struct acpi_table_header *)
532 __acpi_map_table(sdt_entry[i].pa, header->length);
533 if (!header)
534 continue;
535
536 acpi_table_print(header, sdt_entry[i].pa);
537
538 if (acpi_table_compute_checksum(header, header->length)) {
539 printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
540 continue;
541 }
542
543 sdt_entry[i].size = header->length;
544
545 for (id = 0; id < ACPI_TABLE_COUNT; id++) {
546 if (!strncmp((char *)&header->signature,
547 acpi_table_signatures[id],
548 sizeof(header->signature))) {
549 sdt_entry[i].id = id;
550 }
551 }
552 }
553
554 /*
555 * The DSDT is *not* in the RSDT (why not? no idea.) but we want
556 * to print its info, because this is what people usually blacklist
557 * against. Unfortunately, we don't know the phys_addr, so just
558 * print 0. Maybe no one will notice.
559 */
560 if (!acpi_get_table_header_early(ACPI_DSDT, &header))
561 acpi_table_print(header, 0);
562
563 return 0;
564} 241}
565 242
566/* 243/*
@@ -568,54 +245,13 @@ static int __init acpi_table_get_sdt(struct acpi_table_rsdp *rsdp)
568 * 245 *
569 * find RSDP, find and checksum SDT/XSDT. 246 * find RSDP, find and checksum SDT/XSDT.
570 * checksum all tables, print SDT/XSDT 247 * checksum all tables, print SDT/XSDT
571 * 248 *
572 * result: sdt_entry[] is initialized 249 * result: sdt_entry[] is initialized
573 */ 250 */
574 251
252
575int __init acpi_table_init(void) 253int __init acpi_table_init(void)
576{ 254{
577 struct acpi_table_rsdp *rsdp = NULL; 255 acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
578 unsigned long rsdp_phys = 0;
579 int result = 0;
580
581 /* Locate and map the Root System Description Table (RSDP) */
582
583 rsdp_phys = acpi_find_rsdp();
584 if (!rsdp_phys) {
585 printk(KERN_ERR PREFIX "Unable to locate RSDP\n");
586 return -ENODEV;
587 }
588
589 rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys,
590 sizeof(struct acpi_table_rsdp));
591 if (!rsdp) {
592 printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
593 return -ENODEV;
594 }
595
596 printk(KERN_DEBUG PREFIX
597 "RSDP (v%3.3d %6.6s ) @ 0x%p\n",
598 rsdp->revision, rsdp->oem_id, (void *)rsdp_phys);
599
600 if (rsdp->revision < 2)
601 result =
602 acpi_table_compute_checksum(rsdp,
603 sizeof(struct acpi_table_rsdp));
604 else
605 result =
606 acpi_table_compute_checksum(rsdp,
607 ((struct acpi20_table_rsdp *)
608 rsdp)->length);
609
610 if (result) {
611 printk(KERN_WARNING " >>> ERROR: Invalid checksum\n");
612 return -ENODEV;
613 }
614
615 /* Locate and map the System Description table (RSDT/XSDT) */
616
617 if (acpi_table_get_sdt(rsdp))
618 return -ENODEV;
619
620 return 0; 256 return 0;
621} 257}
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
index aa4c69594d97..0a7d7afac255 100644
--- a/drivers/acpi/tables/Makefile
+++ b/drivers/acpi/tables/Makefile
@@ -2,7 +2,6 @@
2# Makefile for all Linux ACPI interpreter subdirectories 2# Makefile for all Linux ACPI interpreter subdirectories
3# 3#
4 4
5obj-y := tbconvrt.o tbget.o tbrsdt.o tbxface.o \ 5obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o
6 tbgetall.o tbinstal.o tbutils.o tbxfroot.o
7 6
8EXTRA_CFLAGS += $(ACPI_CFLAGS) 7EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbconvrt.c b/drivers/acpi/tables/tbconvrt.c
deleted file mode 100644
index d697fcb35d52..000000000000
--- a/drivers/acpi/tables/tbconvrt.c
+++ /dev/null
@@ -1,622 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbconvrt - ACPI Table conversion utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbconvrt")
49
50/* Local prototypes */
51static void
52acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
53 u8 register_bit_width,
54 acpi_physical_address address);
55
56static void
57acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
58 struct fadt_descriptor_rev1 *original_fadt);
59
60static void
61acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
62 struct fadt_descriptor *original_fadt);
63
64u8 acpi_fadt_is_v1;
65ACPI_EXPORT_SYMBOL(acpi_fadt_is_v1)
66
67/*******************************************************************************
68 *
69 * FUNCTION: acpi_tb_get_table_count
70 *
71 * PARAMETERS: RSDP - Pointer to the RSDP
72 * RSDT - Pointer to the RSDT/XSDT
73 *
74 * RETURN: The number of tables pointed to by the RSDT or XSDT.
75 *
76 * DESCRIPTION: Calculate the number of tables. Automatically handles either
77 * an RSDT or XSDT.
78 *
79 ******************************************************************************/
80
81u32
82acpi_tb_get_table_count(struct rsdp_descriptor *RSDP,
83 struct acpi_table_header *RSDT)
84{
85 u32 pointer_size;
86
87 ACPI_FUNCTION_ENTRY();
88
89 /* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
90
91 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
92 pointer_size = sizeof(u32);
93 } else {
94 pointer_size = sizeof(u64);
95 }
96
97 /*
98 * Determine the number of tables pointed to by the RSDT/XSDT.
99 * This is defined by the ACPI Specification to be the number of
100 * pointers contained within the RSDT/XSDT. The size of the pointers
101 * is architecture-dependent.
102 */
103 return ((RSDT->length -
104 sizeof(struct acpi_table_header)) / pointer_size);
105}
106
107/*******************************************************************************
108 *
109 * FUNCTION: acpi_tb_convert_to_xsdt
110 *
111 * PARAMETERS: table_info - Info about the RSDT
112 *
113 * RETURN: Status
114 *
115 * DESCRIPTION: Convert an RSDT to an XSDT (internal common format)
116 *
117 ******************************************************************************/
118
119acpi_status acpi_tb_convert_to_xsdt(struct acpi_table_desc *table_info)
120{
121 acpi_size table_size;
122 u32 i;
123 struct xsdt_descriptor *new_table;
124
125 ACPI_FUNCTION_ENTRY();
126
127 /* Compute size of the converted XSDT */
128
129 table_size = ((acpi_size) acpi_gbl_rsdt_table_count * sizeof(u64)) +
130 sizeof(struct acpi_table_header);
131
132 /* Allocate an XSDT */
133
134 new_table = ACPI_ALLOCATE_ZEROED(table_size);
135 if (!new_table) {
136 return (AE_NO_MEMORY);
137 }
138
139 /* Copy the header and set the length */
140
141 ACPI_MEMCPY(new_table, table_info->pointer,
142 sizeof(struct acpi_table_header));
143 new_table->length = (u32) table_size;
144
145 /* Copy the table pointers */
146
147 for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
148
149 /* RSDT pointers are 32 bits, XSDT pointers are 64 bits */
150
151 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
152 ACPI_STORE_ADDRESS(new_table->table_offset_entry[i],
153 (ACPI_CAST_PTR
154 (struct rsdt_descriptor,
155 table_info->pointer))->
156 table_offset_entry[i]);
157 } else {
158 new_table->table_offset_entry[i] =
159 (ACPI_CAST_PTR(struct xsdt_descriptor,
160 table_info->pointer))->
161 table_offset_entry[i];
162 }
163 }
164
165 /* Delete the original table (either mapped or in a buffer) */
166
167 acpi_tb_delete_single_table(table_info);
168
169 /* Point the table descriptor to the new table */
170
171 table_info->pointer =
172 ACPI_CAST_PTR(struct acpi_table_header, new_table);
173 table_info->length = table_size;
174 table_info->allocation = ACPI_MEM_ALLOCATED;
175
176 return (AE_OK);
177}
178
179/*******************************************************************************
180 *
181 * FUNCTION: acpi_tb_init_generic_address
182 *
183 * PARAMETERS: new_gas_struct - GAS struct to be initialized
184 * register_bit_width - Width of this register
185 * Address - Address of the register
186 *
187 * RETURN: None
188 *
189 * DESCRIPTION: Initialize a GAS structure.
190 *
191 ******************************************************************************/
192
193static void
194acpi_tb_init_generic_address(struct acpi_generic_address *new_gas_struct,
195 u8 register_bit_width,
196 acpi_physical_address address)
197{
198
199 ACPI_STORE_ADDRESS(new_gas_struct->address, address);
200
201 new_gas_struct->address_space_id = ACPI_ADR_SPACE_SYSTEM_IO;
202 new_gas_struct->register_bit_width = register_bit_width;
203 new_gas_struct->register_bit_offset = 0;
204 new_gas_struct->access_width = 0;
205}
206
207/*******************************************************************************
208 *
209 * FUNCTION: acpi_tb_convert_fadt1
210 *
211 * PARAMETERS: local_fadt - Pointer to new FADT
212 * original_fadt - Pointer to old FADT
213 *
214 * RETURN: None, populates local_fadt
215 *
216 * DESCRIPTION: Convert an ACPI 1.0 FADT to common internal format
217 *
218 ******************************************************************************/
219
220static void
221acpi_tb_convert_fadt1(struct fadt_descriptor *local_fadt,
222 struct fadt_descriptor_rev1 *original_fadt)
223{
224
225 /* ACPI 1.0 FACS */
226 /* The BIOS stored FADT should agree with Revision 1.0 */
227 acpi_fadt_is_v1 = 1;
228
229 /*
230 * Copy the table header and the common part of the tables.
231 *
232 * The 2.0 table is an extension of the 1.0 table, so the entire 1.0
233 * table can be copied first, then expand some fields to 64 bits.
234 */
235 ACPI_MEMCPY(local_fadt, original_fadt,
236 sizeof(struct fadt_descriptor_rev1));
237
238 /* Convert table pointers to 64-bit fields */
239
240 ACPI_STORE_ADDRESS(local_fadt->xfirmware_ctrl,
241 local_fadt->V1_firmware_ctrl);
242 ACPI_STORE_ADDRESS(local_fadt->Xdsdt, local_fadt->V1_dsdt);
243
244 /*
245 * System Interrupt Model isn't used in ACPI 2.0
246 * (local_fadt->Reserved1 = 0;)
247 */
248
249 /*
250 * This field is set by the OEM to convey the preferred power management
251 * profile to OSPM. It doesn't have any 1.0 equivalence. Since we don't
252 * know what kind of 32-bit system this is, we will use "unspecified".
253 */
254 local_fadt->prefer_PM_profile = PM_UNSPECIFIED;
255
256 /*
257 * Processor Performance State Control. This is the value OSPM writes to
258 * the SMI_CMD register to assume processor performance state control
259 * responsibility. There isn't any equivalence in 1.0, but as many 1.x
260 * ACPI tables contain _PCT and _PSS we also keep this value, unless
261 * acpi_strict is set.
262 */
263 if (acpi_strict)
264 local_fadt->pstate_cnt = 0;
265
266 /*
267 * Support for the _CST object and C States change notification.
268 * This data item hasn't any 1.0 equivalence so leave it zero.
269 */
270 local_fadt->cst_cnt = 0;
271
272 /*
273 * FADT Rev 2 was an interim FADT released between ACPI 1.0 and ACPI 2.0.
274 * It primarily adds the FADT reset mechanism.
275 */
276 if ((original_fadt->revision == 2) &&
277 (original_fadt->length ==
278 sizeof(struct fadt_descriptor_rev2_minus))) {
279 /*
280 * Grab the entire generic address struct, plus the 1-byte reset value
281 * that immediately follows.
282 */
283 ACPI_MEMCPY(&local_fadt->reset_register,
284 &(ACPI_CAST_PTR(struct fadt_descriptor_rev2_minus,
285 original_fadt))->reset_register,
286 sizeof(struct acpi_generic_address) + 1);
287 } else {
288 /*
289 * Since there isn't any equivalence in 1.0 and since it is highly
290 * likely that a 1.0 system has legacy support.
291 */
292 local_fadt->iapc_boot_arch = BAF_LEGACY_DEVICES;
293 }
294
295 /*
296 * Convert the V1.0 block addresses to V2.0 GAS structures
297 */
298 acpi_tb_init_generic_address(&local_fadt->xpm1a_evt_blk,
299 local_fadt->pm1_evt_len,
300 (acpi_physical_address) local_fadt->
301 V1_pm1a_evt_blk);
302 acpi_tb_init_generic_address(&local_fadt->xpm1b_evt_blk,
303 local_fadt->pm1_evt_len,
304 (acpi_physical_address) local_fadt->
305 V1_pm1b_evt_blk);
306 acpi_tb_init_generic_address(&local_fadt->xpm1a_cnt_blk,
307 local_fadt->pm1_cnt_len,
308 (acpi_physical_address) local_fadt->
309 V1_pm1a_cnt_blk);
310 acpi_tb_init_generic_address(&local_fadt->xpm1b_cnt_blk,
311 local_fadt->pm1_cnt_len,
312 (acpi_physical_address) local_fadt->
313 V1_pm1b_cnt_blk);
314 acpi_tb_init_generic_address(&local_fadt->xpm2_cnt_blk,
315 local_fadt->pm2_cnt_len,
316 (acpi_physical_address) local_fadt->
317 V1_pm2_cnt_blk);
318 acpi_tb_init_generic_address(&local_fadt->xpm_tmr_blk,
319 local_fadt->pm_tm_len,
320 (acpi_physical_address) local_fadt->
321 V1_pm_tmr_blk);
322 acpi_tb_init_generic_address(&local_fadt->xgpe0_blk, 0,
323 (acpi_physical_address) local_fadt->
324 V1_gpe0_blk);
325 acpi_tb_init_generic_address(&local_fadt->xgpe1_blk, 0,
326 (acpi_physical_address) local_fadt->
327 V1_gpe1_blk);
328
329 /* Create separate GAS structs for the PM1 Enable registers */
330
331 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
332 (u8) ACPI_DIV_2(acpi_gbl_FADT->
333 pm1_evt_len),
334 (acpi_physical_address)
335 (local_fadt->xpm1a_evt_blk.address +
336 ACPI_DIV_2(acpi_gbl_FADT->pm1_evt_len)));
337
338 /* PM1B is optional; leave null if not present */
339
340 if (local_fadt->xpm1b_evt_blk.address) {
341 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
342 (u8) ACPI_DIV_2(acpi_gbl_FADT->
343 pm1_evt_len),
344 (acpi_physical_address)
345 (local_fadt->xpm1b_evt_blk.
346 address +
347 ACPI_DIV_2(acpi_gbl_FADT->
348 pm1_evt_len)));
349 }
350}
351
352/*******************************************************************************
353 *
354 * FUNCTION: acpi_tb_convert_fadt2
355 *
356 * PARAMETERS: local_fadt - Pointer to new FADT
357 * original_fadt - Pointer to old FADT
358 *
359 * RETURN: None, populates local_fadt
360 *
361 * DESCRIPTION: Convert an ACPI 2.0 FADT to common internal format.
362 * Handles optional "X" fields.
363 *
364 ******************************************************************************/
365
366static void
367acpi_tb_convert_fadt2(struct fadt_descriptor *local_fadt,
368 struct fadt_descriptor *original_fadt)
369{
370
371 /* We have an ACPI 2.0 FADT but we must copy it to our local buffer */
372
373 ACPI_MEMCPY(local_fadt, original_fadt, sizeof(struct fadt_descriptor));
374
375 /*
376 * "X" fields are optional extensions to the original V1.0 fields, so
377 * we must selectively expand V1.0 fields if the corresponding X field
378 * is zero.
379 */
380 if (!(local_fadt->xfirmware_ctrl)) {
381 ACPI_STORE_ADDRESS(local_fadt->xfirmware_ctrl,
382 local_fadt->V1_firmware_ctrl);
383 }
384
385 if (!(local_fadt->Xdsdt)) {
386 ACPI_STORE_ADDRESS(local_fadt->Xdsdt, local_fadt->V1_dsdt);
387 }
388
389 if (!(local_fadt->xpm1a_evt_blk.address)) {
390 acpi_tb_init_generic_address(&local_fadt->xpm1a_evt_blk,
391 local_fadt->pm1_evt_len,
392 (acpi_physical_address)
393 local_fadt->V1_pm1a_evt_blk);
394 }
395
396 if (!(local_fadt->xpm1b_evt_blk.address)) {
397 acpi_tb_init_generic_address(&local_fadt->xpm1b_evt_blk,
398 local_fadt->pm1_evt_len,
399 (acpi_physical_address)
400 local_fadt->V1_pm1b_evt_blk);
401 }
402
403 if (!(local_fadt->xpm1a_cnt_blk.address)) {
404 acpi_tb_init_generic_address(&local_fadt->xpm1a_cnt_blk,
405 local_fadt->pm1_cnt_len,
406 (acpi_physical_address)
407 local_fadt->V1_pm1a_cnt_blk);
408 }
409
410 if (!(local_fadt->xpm1b_cnt_blk.address)) {
411 acpi_tb_init_generic_address(&local_fadt->xpm1b_cnt_blk,
412 local_fadt->pm1_cnt_len,
413 (acpi_physical_address)
414 local_fadt->V1_pm1b_cnt_blk);
415 }
416
417 if (!(local_fadt->xpm2_cnt_blk.address)) {
418 acpi_tb_init_generic_address(&local_fadt->xpm2_cnt_blk,
419 local_fadt->pm2_cnt_len,
420 (acpi_physical_address)
421 local_fadt->V1_pm2_cnt_blk);
422 }
423
424 if (!(local_fadt->xpm_tmr_blk.address)) {
425 acpi_tb_init_generic_address(&local_fadt->xpm_tmr_blk,
426 local_fadt->pm_tm_len,
427 (acpi_physical_address)
428 local_fadt->V1_pm_tmr_blk);
429 }
430
431 if (!(local_fadt->xgpe0_blk.address)) {
432 acpi_tb_init_generic_address(&local_fadt->xgpe0_blk,
433 0,
434 (acpi_physical_address)
435 local_fadt->V1_gpe0_blk);
436 }
437
438 if (!(local_fadt->xgpe1_blk.address)) {
439 acpi_tb_init_generic_address(&local_fadt->xgpe1_blk,
440 0,
441 (acpi_physical_address)
442 local_fadt->V1_gpe1_blk);
443 }
444
445 /* Create separate GAS structs for the PM1 Enable registers */
446
447 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
448 (u8) ACPI_DIV_2(acpi_gbl_FADT->
449 pm1_evt_len),
450 (acpi_physical_address)
451 (local_fadt->xpm1a_evt_blk.address +
452 ACPI_DIV_2(acpi_gbl_FADT->pm1_evt_len)));
453
454 acpi_gbl_xpm1a_enable.address_space_id =
455 local_fadt->xpm1a_evt_blk.address_space_id;
456
457 /* PM1B is optional; leave null if not present */
458
459 if (local_fadt->xpm1b_evt_blk.address) {
460 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
461 (u8) ACPI_DIV_2(acpi_gbl_FADT->
462 pm1_evt_len),
463 (acpi_physical_address)
464 (local_fadt->xpm1b_evt_blk.
465 address +
466 ACPI_DIV_2(acpi_gbl_FADT->
467 pm1_evt_len)));
468
469 acpi_gbl_xpm1b_enable.address_space_id =
470 local_fadt->xpm1b_evt_blk.address_space_id;
471 }
472}
473
474/*******************************************************************************
475 *
476 * FUNCTION: acpi_tb_convert_table_fadt
477 *
478 * PARAMETERS: None
479 *
480 * RETURN: Status
481 *
482 * DESCRIPTION: Converts a BIOS supplied ACPI 1.0 FADT to a local
483 * ACPI 2.0 FADT. If the BIOS supplied a 2.0 FADT then it is simply
484 * copied to the local FADT. The ACPI CA software uses this
485 * local FADT. Thus a significant amount of special #ifdef
486 * type codeing is saved.
487 *
488 ******************************************************************************/
489
490acpi_status acpi_tb_convert_table_fadt(void)
491{
492 struct fadt_descriptor *local_fadt;
493 struct acpi_table_desc *table_desc;
494
495 ACPI_FUNCTION_TRACE(tb_convert_table_fadt);
496
497 /*
498 * acpi_gbl_FADT is valid. Validate the FADT length. The table must be
499 * at least as long as the version 1.0 FADT
500 */
501 if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor_rev1)) {
502 ACPI_ERROR((AE_INFO, "FADT is invalid, too short: 0x%X",
503 acpi_gbl_FADT->length));
504 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
505 }
506
507 /* Allocate buffer for the ACPI 2.0(+) FADT */
508
509 local_fadt = ACPI_ALLOCATE_ZEROED(sizeof(struct fadt_descriptor));
510 if (!local_fadt) {
511 return_ACPI_STATUS(AE_NO_MEMORY);
512 }
513
514 if (acpi_gbl_FADT->revision >= FADT2_REVISION_ID) {
515 if (acpi_gbl_FADT->length < sizeof(struct fadt_descriptor)) {
516
517 /* Length is too short to be a V2.0 table */
518
519 ACPI_WARNING((AE_INFO,
520 "Inconsistent FADT length (0x%X) and revision (0x%X), using FADT V1.0 portion of table",
521 acpi_gbl_FADT->length,
522 acpi_gbl_FADT->revision));
523
524 acpi_tb_convert_fadt1(local_fadt,
525 (void *)acpi_gbl_FADT);
526 } else {
527 /* Valid V2.0 table */
528
529 acpi_tb_convert_fadt2(local_fadt, acpi_gbl_FADT);
530 }
531 } else {
532 /* Valid V1.0 table */
533
534 acpi_tb_convert_fadt1(local_fadt, (void *)acpi_gbl_FADT);
535 }
536
537 /* Global FADT pointer will point to the new common V2.0 FADT */
538
539 acpi_gbl_FADT = local_fadt;
540 acpi_gbl_FADT->length = sizeof(struct fadt_descriptor);
541
542 /* Free the original table */
543
544 table_desc = acpi_gbl_table_lists[ACPI_TABLE_ID_FADT].next;
545 acpi_tb_delete_single_table(table_desc);
546
547 /* Install the new table */
548
549 table_desc->pointer =
550 ACPI_CAST_PTR(struct acpi_table_header, acpi_gbl_FADT);
551 table_desc->allocation = ACPI_MEM_ALLOCATED;
552 table_desc->length = sizeof(struct fadt_descriptor);
553
554 /* Dump the entire FADT */
555
556 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
557 "Hex dump of common internal FADT, size %d (%X)\n",
558 acpi_gbl_FADT->length, acpi_gbl_FADT->length));
559
560 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(u8, acpi_gbl_FADT),
561 acpi_gbl_FADT->length);
562
563 return_ACPI_STATUS(AE_OK);
564}
565
566/*******************************************************************************
567 *
568 * FUNCTION: acpi_tb_build_common_facs
569 *
570 * PARAMETERS: table_info - Info for currently installed FACS
571 *
572 * RETURN: Status
573 *
574 * DESCRIPTION: Convert ACPI 1.0 and ACPI 2.0 FACS to a common internal
575 * table format.
576 *
577 ******************************************************************************/
578
579acpi_status acpi_tb_build_common_facs(struct acpi_table_desc *table_info)
580{
581
582 ACPI_FUNCTION_TRACE(tb_build_common_facs);
583
584 /* Absolute minimum length is 24, but the ACPI spec says 64 */
585
586 if (acpi_gbl_FACS->length < 24) {
587 ACPI_ERROR((AE_INFO, "Invalid FACS table length: 0x%X",
588 acpi_gbl_FACS->length));
589 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
590 }
591
592 if (acpi_gbl_FACS->length < 64) {
593 ACPI_WARNING((AE_INFO,
594 "FACS is shorter than the ACPI specification allows: 0x%X, using anyway",
595 acpi_gbl_FACS->length));
596 }
597
598 /* Copy fields to the new FACS */
599
600 acpi_gbl_common_fACS.global_lock = &(acpi_gbl_FACS->global_lock);
601
602 if ((acpi_gbl_RSDP->revision < 2) ||
603 (acpi_gbl_FACS->length < 32) ||
604 (!(acpi_gbl_FACS->xfirmware_waking_vector))) {
605
606 /* ACPI 1.0 FACS or short table or optional X_ field is zero */
607
608 acpi_gbl_common_fACS.firmware_waking_vector = ACPI_CAST_PTR(u64,
609 &
610 (acpi_gbl_FACS->
611 firmware_waking_vector));
612 acpi_gbl_common_fACS.vector_width = 32;
613 } else {
614 /* ACPI 2.0 FACS with valid X_ field */
615
616 acpi_gbl_common_fACS.firmware_waking_vector =
617 &acpi_gbl_FACS->xfirmware_waking_vector;
618 acpi_gbl_common_fACS.vector_width = 64;
619 }
620
621 return_ACPI_STATUS(AE_OK);
622}
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
new file mode 100644
index 000000000000..807c7116e94b
--- /dev/null
+++ b/drivers/acpi/tables/tbfadt.c
@@ -0,0 +1,434 @@
1/******************************************************************************
2 *
3 * Module Name: tbfadt - FADT table utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfadt")
49
50/* Local prototypes */
51static void inline
52acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
53 u8 bit_width, u64 address);
54
55static void acpi_tb_convert_fadt(void);
56
57static void acpi_tb_validate_fadt(void);
58
59/* Table for conversion of FADT to common internal format and FADT validation */
60
61typedef struct acpi_fadt_info {
62 char *name;
63 u8 target;
64 u8 source;
65 u8 length;
66 u8 type;
67
68} acpi_fadt_info;
69
70#define ACPI_FADT_REQUIRED 1
71#define ACPI_FADT_SEPARATE_LENGTH 2
72
73static struct acpi_fadt_info fadt_info_table[] = {
74 {"Pm1aEventBlock", ACPI_FADT_OFFSET(xpm1a_event_block),
75 ACPI_FADT_OFFSET(pm1a_event_block),
76 ACPI_FADT_OFFSET(pm1_event_length), ACPI_FADT_REQUIRED},
77
78 {"Pm1bEventBlock", ACPI_FADT_OFFSET(xpm1b_event_block),
79 ACPI_FADT_OFFSET(pm1b_event_block),
80 ACPI_FADT_OFFSET(pm1_event_length), 0},
81
82 {"Pm1aControlBlock", ACPI_FADT_OFFSET(xpm1a_control_block),
83 ACPI_FADT_OFFSET(pm1a_control_block),
84 ACPI_FADT_OFFSET(pm1_control_length), ACPI_FADT_REQUIRED},
85
86 {"Pm1bControlBlock", ACPI_FADT_OFFSET(xpm1b_control_block),
87 ACPI_FADT_OFFSET(pm1b_control_block),
88 ACPI_FADT_OFFSET(pm1_control_length), 0},
89
90 {"Pm2ControlBlock", ACPI_FADT_OFFSET(xpm2_control_block),
91 ACPI_FADT_OFFSET(pm2_control_block),
92 ACPI_FADT_OFFSET(pm2_control_length), ACPI_FADT_SEPARATE_LENGTH},
93
94 {"PmTimerBlock", ACPI_FADT_OFFSET(xpm_timer_block),
95 ACPI_FADT_OFFSET(pm_timer_block),
96 ACPI_FADT_OFFSET(pm_timer_length), ACPI_FADT_REQUIRED},
97
98 {"Gpe0Block", ACPI_FADT_OFFSET(xgpe0_block),
99 ACPI_FADT_OFFSET(gpe0_block),
100 ACPI_FADT_OFFSET(gpe0_block_length), ACPI_FADT_SEPARATE_LENGTH},
101
102 {"Gpe1Block", ACPI_FADT_OFFSET(xgpe1_block),
103 ACPI_FADT_OFFSET(gpe1_block),
104 ACPI_FADT_OFFSET(gpe1_block_length), ACPI_FADT_SEPARATE_LENGTH}
105};
106
107#define ACPI_FADT_INFO_ENTRIES (sizeof (fadt_info_table) / sizeof (struct acpi_fadt_info))
108
109/*******************************************************************************
110 *
111 * FUNCTION: acpi_tb_init_generic_address
112 *
113 * PARAMETERS: generic_address - GAS struct to be initialized
114 * bit_width - Width of this register
115 * Address - Address of the register
116 *
117 * RETURN: None
118 *
119 * DESCRIPTION: Initialize a Generic Address Structure (GAS)
120 * See the ACPI specification for a full description and
121 * definition of this structure.
122 *
123 ******************************************************************************/
124
125static void inline
126acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
127 u8 bit_width, u64 address)
128{
129
130 /*
131 * The 64-bit Address field is non-aligned in the byte packed
132 * GAS struct.
133 */
134 ACPI_MOVE_64_TO_64(&generic_address->address, &address);
135
136 /* All other fields are byte-wide */
137
138 generic_address->space_id = ACPI_ADR_SPACE_SYSTEM_IO;
139 generic_address->bit_width = bit_width;
140 generic_address->bit_offset = 0;
141 generic_address->access_width = 0;
142}
143
144/*******************************************************************************
145 *
146 * FUNCTION: acpi_tb_parse_fadt
147 *
148 * PARAMETERS: table_index - Index for the FADT
149 * Flags - Flags
150 *
151 * RETURN: None
152 *
153 * DESCRIPTION: Initialize the FADT, DSDT and FACS tables
154 * (FADT contains the addresses of the DSDT and FACS)
155 *
156 ******************************************************************************/
157
158void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags)
159{
160 u32 length;
161 struct acpi_table_header *table;
162
163 /*
164 * The FADT has multiple versions with different lengths,
165 * and it contains pointers to both the DSDT and FACS tables.
166 *
167 * Get a local copy of the FADT and convert it to a common format
168 * Map entire FADT, assumed to be smaller than one page.
169 */
170 length = acpi_gbl_root_table_list.tables[table_index].length;
171
172 table =
173 acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
174 address, length);
175 if (!table) {
176 return;
177 }
178
179 /*
180 * Validate the FADT checksum before we copy the table. Ignore
181 * checksum error as we want to try to get the DSDT and FACS.
182 */
183 (void)acpi_tb_verify_checksum(table, length);
184
185 /* Obtain a local copy of the FADT in common ACPI 2.0+ format */
186
187 acpi_tb_create_local_fadt(table, length);
188
189 /* All done with the real FADT, unmap it */
190
191 acpi_os_unmap_memory(table, length);
192
193 /* Obtain the DSDT and FACS tables via their addresses within the FADT */
194
195 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
196 flags, ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
197
198 acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
199 flags, ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
200}
201
202/*******************************************************************************
203 *
204 * FUNCTION: acpi_tb_create_local_fadt
205 *
206 * PARAMETERS: Table - Pointer to BIOS FADT
207 * Length - Length of the table
208 *
209 * RETURN: None
210 *
211 * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
212 * Performs validation on some important FADT fields.
213 *
214 ******************************************************************************/
215
216void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
217{
218
219 /*
220 * Check if the FADT is larger than what we know about (ACPI 2.0 version).
221 * Truncate the table, but make some noise.
222 */
223 if (length > sizeof(struct acpi_table_fadt)) {
224 ACPI_WARNING((AE_INFO,
225 "FADT (revision %u) is longer than ACPI 2.0 version, truncating length 0x%X to 0x%zX",
226 table->revision, (unsigned)length,
227 sizeof(struct acpi_table_fadt)));
228 }
229
230 /* Copy the entire FADT locally. Zero first for tb_convert_fadt */
231
232 ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
233
234 ACPI_MEMCPY(&acpi_gbl_FADT, table,
235 ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
236
237 /*
238 * 1) Convert the local copy of the FADT to the common internal format
239 * 2) Validate some of the important values within the FADT
240 */
241 acpi_tb_convert_fadt();
242 acpi_tb_validate_fadt();
243}
244
245/*******************************************************************************
246 *
247 * FUNCTION: acpi_tb_convert_fadt
248 *
249 * PARAMETERS: None, uses acpi_gbl_FADT
250 *
251 * RETURN: None
252 *
253 * DESCRIPTION: Converts all versions of the FADT to a common internal format.
254 * -> Expand all 32-bit addresses to 64-bit.
255 *
256 * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
257 * and must contain a copy of the actual FADT.
258 *
259 * ACPICA will use the "X" fields of the FADT for all addresses.
260 *
261 * "X" fields are optional extensions to the original V1.0 fields. Even if
262 * they are present in the structure, they can be optionally not used by
263 * setting them to zero. Therefore, we must selectively expand V1.0 fields
264 * if the corresponding X field is zero.
265 *
266 * For ACPI 1.0 FADTs, all address fields are expanded to the corresponding
267 * "X" fields.
268 *
269 * For ACPI 2.0 FADTs, any "X" fields that are NULL are filled in by
270 * expanding the corresponding ACPI 1.0 field.
271 *
272 ******************************************************************************/
273
274static void acpi_tb_convert_fadt(void)
275{
276 u8 pm1_register_length;
277 struct acpi_generic_address *target;
278 acpi_native_uint i;
279
280 /* Update the local FADT table header length */
281
282 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
283
284 /* Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary */
285
286 if (!acpi_gbl_FADT.Xfacs) {
287 acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
288 }
289
290 if (!acpi_gbl_FADT.Xdsdt) {
291 acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
292 }
293
294 /*
295 * Expand the 32-bit V1.0 addresses to the 64-bit "X" generic address
296 * structures as necessary.
297 */
298 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
299 target =
300 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
301 fadt_info_table[i].target);
302
303 /* Expand only if the X target is null */
304
305 if (!target->address) {
306 acpi_tb_init_generic_address(target,
307 *ACPI_ADD_PTR(u8,
308 &acpi_gbl_FADT,
309 fadt_info_table
310 [i].length),
311 (u64) * ACPI_ADD_PTR(u32,
312 &acpi_gbl_FADT,
313 fadt_info_table
314 [i].
315 source));
316 }
317 }
318
319 /*
320 * Calculate separate GAS structs for the PM1 Enable registers.
321 * These addresses do not appear (directly) in the FADT, so it is
322 * useful to calculate them once, here.
323 *
324 * The PM event blocks are split into two register blocks, first is the
325 * PM Status Register block, followed immediately by the PM Enable Register
326 * block. Each is of length (pm1_event_length/2)
327 */
328 pm1_register_length = (u8) ACPI_DIV_2(acpi_gbl_FADT.pm1_event_length);
329
330 /* The PM1A register block is required */
331
332 acpi_tb_init_generic_address(&acpi_gbl_xpm1a_enable,
333 pm1_register_length,
334 (acpi_gbl_FADT.xpm1a_event_block.address +
335 pm1_register_length));
336 /* Don't forget to copy space_id of the GAS */
337 acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
338
339 /* The PM1B register block is optional, ignore if not present */
340
341 if (acpi_gbl_FADT.xpm1b_event_block.address) {
342 acpi_tb_init_generic_address(&acpi_gbl_xpm1b_enable,
343 pm1_register_length,
344 (acpi_gbl_FADT.xpm1b_event_block.
345 address + pm1_register_length));
346 /* Don't forget to copy space_id of the GAS */
347 acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
348
349 }
350}
351
352/******************************************************************************
353 *
354 * FUNCTION: acpi_tb_validate_fadt
355 *
356 * PARAMETERS: Table - Pointer to the FADT to be validated
357 *
358 * RETURN: None
359 *
360 * DESCRIPTION: Validate various important fields within the FADT. If a problem
361 * is found, issue a message, but no status is returned.
362 * Used by both the table manager and the disassembler.
363 *
364 * Possible additional checks:
365 * (acpi_gbl_FADT.pm1_event_length >= 4)
366 * (acpi_gbl_FADT.pm1_control_length >= 2)
367 * (acpi_gbl_FADT.pm_timer_length >= 4)
368 * Gpe block lengths must be multiple of 2
369 *
370 ******************************************************************************/
371
372static void acpi_tb_validate_fadt(void)
373{
374 u32 *address32;
375 struct acpi_generic_address *address64;
376 u8 length;
377 acpi_native_uint i;
378
379 /* Examine all of the 64-bit extended address fields (X fields) */
380
381 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
382
383 /* Generate pointers to the 32-bit and 64-bit addresses and get the length */
384
385 address64 =
386 ACPI_ADD_PTR(struct acpi_generic_address, &acpi_gbl_FADT,
387 fadt_info_table[i].target);
388 address32 =
389 ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
390 fadt_info_table[i].source);
391 length =
392 *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
393 fadt_info_table[i].length);
394
395 if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
396 /*
397 * Field is required (Pm1a_event, Pm1a_control, pm_timer).
398 * Both the address and length must be non-zero.
399 */
400 if (!address64->address || !length) {
401 ACPI_ERROR((AE_INFO,
402 "Required field \"%s\" has zero address and/or length: %8.8X%8.8X/%X",
403 fadt_info_table[i].name,
404 ACPI_FORMAT_UINT64(address64->
405 address),
406 length));
407 }
408 } else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
409 /*
410 * Field is optional (PM2Control, GPE0, GPE1) AND has its own
411 * length field. If present, both the address and length must be valid.
412 */
413 if ((address64->address && !length)
414 || (!address64->address && length)) {
415 ACPI_WARNING((AE_INFO,
416 "Optional field \"%s\" has zero address or length: %8.8X%8.8X/%X",
417 fadt_info_table[i].name,
418 ACPI_FORMAT_UINT64(address64->
419 address),
420 length));
421 }
422 }
423
424 /* If both 32- and 64-bit addresses are valid (non-zero), they must match */
425
426 if (address64->address && *address32 &&
427 (address64->address != (u64) * address32)) {
428 ACPI_ERROR((AE_INFO,
429 "32/64X address mismatch in \"%s\": [%8.8X] [%8.8X%8.8X], using 64X",
430 fadt_info_table[i].name, *address32,
431 ACPI_FORMAT_UINT64(address64->address)));
432 }
433 }
434}
diff --git a/drivers/acpi/tables/tbfind.c b/drivers/acpi/tables/tbfind.c
new file mode 100644
index 000000000000..058c064948e1
--- /dev/null
+++ b/drivers/acpi/tables/tbfind.c
@@ -0,0 +1,126 @@
1/******************************************************************************
2 *
3 * Module Name: tbfind - find table
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbfind")
49
50/*******************************************************************************
51 *
52 * FUNCTION: acpi_tb_find_table
53 *
54 * PARAMETERS: Signature - String with ACPI table signature
55 * oem_id - String with the table OEM ID
56 * oem_table_id - String with the OEM Table ID
57 * table_index - Where the table index is returned
58 *
59 * RETURN: Status and table index
60 *
61 * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
62 * Signature, OEM ID and OEM Table ID. Returns an index that can
63 * be used to get the table header or entire table.
64 *
65 ******************************************************************************/
66acpi_status
67acpi_tb_find_table(char *signature,
68 char *oem_id,
69 char *oem_table_id, acpi_native_uint * table_index)
70{
71 acpi_native_uint i;
72 acpi_status status;
73
74 ACPI_FUNCTION_TRACE(tb_find_table);
75
76 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
77 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
78 signature, ACPI_NAME_SIZE)) {
79
80 /* Not the requested table */
81
82 continue;
83 }
84
85 /* Table with matching signature has been found */
86
87 if (!acpi_gbl_root_table_list.tables[i].pointer) {
88
89 /* Table is not currently mapped, map it */
90
91 status =
92 acpi_tb_verify_table(&acpi_gbl_root_table_list.
93 tables[i]);
94 if (ACPI_FAILURE(status)) {
95 return_ACPI_STATUS(status);
96 }
97
98 if (!acpi_gbl_root_table_list.tables[i].pointer) {
99 continue;
100 }
101 }
102
103 /* Check for table match on all IDs */
104
105 if (!ACPI_MEMCMP
106 (acpi_gbl_root_table_list.tables[i].pointer->signature,
107 signature, ACPI_NAME_SIZE) && (!oem_id[0]
108 ||
109 !ACPI_MEMCMP
110 (acpi_gbl_root_table_list.
111 tables[i].pointer->oem_id,
112 oem_id, ACPI_OEM_ID_SIZE))
113 && (!oem_table_id[0]
114 || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
115 pointer->oem_table_id, oem_table_id,
116 ACPI_OEM_TABLE_ID_SIZE))) {
117 *table_index = i;
118
119 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
120 "Found table [%4.4s]\n", signature));
121 return_ACPI_STATUS(AE_OK);
122 }
123 }
124
125 return_ACPI_STATUS(AE_NOT_FOUND);
126}
diff --git a/drivers/acpi/tables/tbget.c b/drivers/acpi/tables/tbget.c
deleted file mode 100644
index 11e2d4454e05..000000000000
--- a/drivers/acpi/tables/tbget.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbget - ACPI Table get* routines
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbget")
49
50/* Local prototypes */
51static acpi_status
52acpi_tb_get_this_table(struct acpi_pointer *address,
53 struct acpi_table_header *header,
54 struct acpi_table_desc *table_info);
55
56static acpi_status
57acpi_tb_table_override(struct acpi_table_header *header,
58 struct acpi_table_desc *table_info);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_tb_get_table
63 *
64 * PARAMETERS: Address - Address of table to retrieve. Can be
65 * Logical or Physical
66 * table_info - Where table info is returned
67 *
68 * RETURN: None
69 *
70 * DESCRIPTION: Get entire table of unknown size.
71 *
72 ******************************************************************************/
73
74acpi_status
75acpi_tb_get_table(struct acpi_pointer *address,
76 struct acpi_table_desc *table_info)
77{
78 acpi_status status;
79 struct acpi_table_header header;
80
81 ACPI_FUNCTION_TRACE(tb_get_table);
82
83 /* Get the header in order to get signature and table size */
84
85 status = acpi_tb_get_table_header(address, &header);
86 if (ACPI_FAILURE(status)) {
87 return_ACPI_STATUS(status);
88 }
89
90 /* Get the entire table */
91
92 status = acpi_tb_get_table_body(address, &header, table_info);
93 if (ACPI_FAILURE(status)) {
94 ACPI_EXCEPTION((AE_INFO, status,
95 "Could not get ACPI table (size %X)",
96 header.length));
97 return_ACPI_STATUS(status);
98 }
99
100 return_ACPI_STATUS(AE_OK);
101}
102
103/*******************************************************************************
104 *
105 * FUNCTION: acpi_tb_get_table_header
106 *
107 * PARAMETERS: Address - Address of table to retrieve. Can be
108 * Logical or Physical
109 * return_header - Where the table header is returned
110 *
111 * RETURN: Status
112 *
113 * DESCRIPTION: Get an ACPI table header. Works in both physical or virtual
114 * addressing mode. Works with both physical or logical pointers.
115 * Table is either copied or mapped, depending on the pointer
116 * type and mode of the processor.
117 *
118 ******************************************************************************/
119
120acpi_status
121acpi_tb_get_table_header(struct acpi_pointer *address,
122 struct acpi_table_header *return_header)
123{
124 acpi_status status = AE_OK;
125 struct acpi_table_header *header = NULL;
126
127 ACPI_FUNCTION_TRACE(tb_get_table_header);
128
129 /*
130 * Flags contains the current processor mode (Virtual or Physical
131 * addressing) The pointer_type is either Logical or Physical
132 */
133 switch (address->pointer_type) {
134 case ACPI_PHYSMODE_PHYSPTR:
135 case ACPI_LOGMODE_LOGPTR:
136
137 /* Pointer matches processor mode, copy the header */
138
139 ACPI_MEMCPY(return_header, address->pointer.logical,
140 sizeof(struct acpi_table_header));
141 break;
142
143 case ACPI_LOGMODE_PHYSPTR:
144
145 /* Create a logical address for the physical pointer */
146
147 status = acpi_os_map_memory(address->pointer.physical,
148 sizeof(struct acpi_table_header),
149 (void *)&header);
150 if (ACPI_FAILURE(status)) {
151 ACPI_ERROR((AE_INFO,
152 "Could not map memory at %8.8X%8.8X for table header",
153 ACPI_FORMAT_UINT64(address->pointer.
154 physical)));
155 return_ACPI_STATUS(status);
156 }
157
158 /* Copy header and delete mapping */
159
160 ACPI_MEMCPY(return_header, header,
161 sizeof(struct acpi_table_header));
162 acpi_os_unmap_memory(header, sizeof(struct acpi_table_header));
163 break;
164
165 default:
166
167 ACPI_ERROR((AE_INFO, "Invalid address flags %X",
168 address->pointer_type));
169 return_ACPI_STATUS(AE_BAD_PARAMETER);
170 }
171
172 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Table Signature: [%4.4s]\n",
173 return_header->signature));
174
175 return_ACPI_STATUS(AE_OK);
176}
177
178/*******************************************************************************
179 *
180 * FUNCTION: acpi_tb_get_table_body
181 *
182 * PARAMETERS: Address - Address of table to retrieve. Can be
183 * Logical or Physical
184 * Header - Header of the table to retrieve
185 * table_info - Where the table info is returned
186 *
187 * RETURN: Status
188 *
189 * DESCRIPTION: Get an entire ACPI table with support to allow the host OS to
190 * replace the table with a newer version (table override.)
191 * Works in both physical or virtual
192 * addressing mode. Works with both physical or logical pointers.
193 * Table is either copied or mapped, depending on the pointer
194 * type and mode of the processor.
195 *
196 ******************************************************************************/
197
198acpi_status
199acpi_tb_get_table_body(struct acpi_pointer *address,
200 struct acpi_table_header *header,
201 struct acpi_table_desc *table_info)
202{
203 acpi_status status;
204
205 ACPI_FUNCTION_TRACE(tb_get_table_body);
206
207 if (!table_info || !address) {
208 return_ACPI_STATUS(AE_BAD_PARAMETER);
209 }
210
211 /* Attempt table override. */
212
213 status = acpi_tb_table_override(header, table_info);
214 if (ACPI_SUCCESS(status)) {
215
216 /* Table was overridden by the host OS */
217
218 return_ACPI_STATUS(status);
219 }
220
221 /* No override, get the original table */
222
223 status = acpi_tb_get_this_table(address, header, table_info);
224 return_ACPI_STATUS(status);
225}
226
227/*******************************************************************************
228 *
229 * FUNCTION: acpi_tb_table_override
230 *
231 * PARAMETERS: Header - Pointer to table header
232 * table_info - Return info if table is overridden
233 *
234 * RETURN: None
235 *
236 * DESCRIPTION: Attempts override of current table with a new one if provided
237 * by the host OS.
238 *
239 ******************************************************************************/
240
241static acpi_status
242acpi_tb_table_override(struct acpi_table_header *header,
243 struct acpi_table_desc *table_info)
244{
245 struct acpi_table_header *new_table;
246 acpi_status status;
247 struct acpi_pointer address;
248
249 ACPI_FUNCTION_TRACE(tb_table_override);
250
251 /*
252 * The OSL will examine the header and decide whether to override this
253 * table. If it decides to override, a table will be returned in new_table,
254 * which we will then copy.
255 */
256 status = acpi_os_table_override(header, &new_table);
257 if (ACPI_FAILURE(status)) {
258
259 /* Some severe error from the OSL, but we basically ignore it */
260
261 ACPI_EXCEPTION((AE_INFO, status,
262 "Could not override ACPI table"));
263 return_ACPI_STATUS(status);
264 }
265
266 if (!new_table) {
267
268 /* No table override */
269
270 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
271 }
272
273 /*
274 * We have a new table to override the old one. Get a copy of
275 * the new one. We know that the new table has a logical pointer.
276 */
277 address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING;
278 address.pointer.logical = new_table;
279
280 status = acpi_tb_get_this_table(&address, new_table, table_info);
281 if (ACPI_FAILURE(status)) {
282 ACPI_EXCEPTION((AE_INFO, status, "Could not copy ACPI table"));
283 return_ACPI_STATUS(status);
284 }
285
286 /* Copy the table info */
287
288 ACPI_INFO((AE_INFO, "Table [%4.4s] replaced by host OS",
289 table_info->pointer->signature));
290
291 return_ACPI_STATUS(AE_OK);
292}
293
294/*******************************************************************************
295 *
296 * FUNCTION: acpi_tb_get_this_table
297 *
298 * PARAMETERS: Address - Address of table to retrieve. Can be
299 * Logical or Physical
300 * Header - Header of the table to retrieve
301 * table_info - Where the table info is returned
302 *
303 * RETURN: Status
304 *
305 * DESCRIPTION: Get an entire ACPI table. Works in both physical or virtual
306 * addressing mode. Works with both physical or logical pointers.
307 * Table is either copied or mapped, depending on the pointer
308 * type and mode of the processor.
309 *
310 ******************************************************************************/
311
312static acpi_status
313acpi_tb_get_this_table(struct acpi_pointer *address,
314 struct acpi_table_header *header,
315 struct acpi_table_desc *table_info)
316{
317 struct acpi_table_header *full_table = NULL;
318 u8 allocation;
319 acpi_status status = AE_OK;
320
321 ACPI_FUNCTION_TRACE(tb_get_this_table);
322
323 /* Validate minimum length */
324
325 if (header->length < sizeof(struct acpi_table_header)) {
326 ACPI_ERROR((AE_INFO,
327 "Table length (%X) is smaller than minimum (%zX)",
328 header->length, sizeof(struct acpi_table_header)));
329
330 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
331 }
332
333 /*
334 * Flags contains the current processor mode (Virtual or Physical
335 * addressing) The pointer_type is either Logical or Physical
336 */
337 switch (address->pointer_type) {
338 case ACPI_PHYSMODE_PHYSPTR:
339 case ACPI_LOGMODE_LOGPTR:
340
341 /* Pointer matches processor mode, copy the table to a new buffer */
342
343 full_table = ACPI_ALLOCATE(header->length);
344 if (!full_table) {
345 ACPI_ERROR((AE_INFO,
346 "Could not allocate table memory for [%4.4s] length %X",
347 header->signature, header->length));
348 return_ACPI_STATUS(AE_NO_MEMORY);
349 }
350
351 /* Copy the entire table (including header) to the local buffer */
352
353 ACPI_MEMCPY(full_table, address->pointer.logical,
354 header->length);
355
356 /* Save allocation type */
357
358 allocation = ACPI_MEM_ALLOCATED;
359 break;
360
361 case ACPI_LOGMODE_PHYSPTR:
362
363 /*
364 * Just map the table's physical memory
365 * into our address space.
366 */
367 status = acpi_os_map_memory(address->pointer.physical,
368 (acpi_size) header->length,
369 ACPI_CAST_PTR(void, &full_table));
370 if (ACPI_FAILURE(status)) {
371 ACPI_ERROR((AE_INFO,
372 "Could not map memory for table [%4.4s] at %8.8X%8.8X for length %X",
373 header->signature,
374 ACPI_FORMAT_UINT64(address->pointer.
375 physical),
376 header->length));
377 return (status);
378 }
379
380 /* Save allocation type */
381
382 allocation = ACPI_MEM_MAPPED;
383 break;
384
385 default:
386
387 ACPI_ERROR((AE_INFO, "Invalid address flags %X",
388 address->pointer_type));
389 return_ACPI_STATUS(AE_BAD_PARAMETER);
390 }
391
392 /*
393 * Validate checksum for _most_ tables,
394 * even the ones whose signature we don't recognize
395 */
396 if (table_info->type != ACPI_TABLE_ID_FACS) {
397 status = acpi_tb_verify_table_checksum(full_table);
398
399#if (!ACPI_CHECKSUM_ABORT)
400 if (ACPI_FAILURE(status)) {
401
402 /* Ignore the error if configuration says so */
403
404 status = AE_OK;
405 }
406#endif
407 }
408
409 /* Return values */
410
411 table_info->pointer = full_table;
412 table_info->length = (acpi_size) header->length;
413 table_info->allocation = allocation;
414
415 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
416 "Found table [%4.4s] at %8.8X%8.8X, mapped/copied to %p\n",
417 full_table->signature,
418 ACPI_FORMAT_UINT64(address->pointer.physical),
419 full_table));
420
421 return_ACPI_STATUS(status);
422}
423
424/*******************************************************************************
425 *
426 * FUNCTION: acpi_tb_get_table_ptr
427 *
428 * PARAMETERS: table_type - one of the defined table types
429 * Instance - Which table of this type
430 * return_table - pointer to location to place the pointer for
431 * return
432 *
433 * RETURN: Status
434 *
435 * DESCRIPTION: This function is called to get the pointer to an ACPI table.
436 *
437 ******************************************************************************/
438
439acpi_status
440acpi_tb_get_table_ptr(acpi_table_type table_type,
441 u32 instance, struct acpi_table_header **return_table)
442{
443 struct acpi_table_desc *table_desc;
444 u32 i;
445
446 ACPI_FUNCTION_TRACE(tb_get_table_ptr);
447
448 if (table_type > ACPI_TABLE_ID_MAX) {
449 return_ACPI_STATUS(AE_BAD_PARAMETER);
450 }
451
452 /* Check for instance out of range of the current table count */
453
454 if (instance > acpi_gbl_table_lists[table_type].count) {
455 return_ACPI_STATUS(AE_NOT_EXIST);
456 }
457
458 /*
459 * Walk the list to get the desired table
460 * Note: Instance is one-based
461 */
462 table_desc = acpi_gbl_table_lists[table_type].next;
463 for (i = 1; i < instance; i++) {
464 table_desc = table_desc->next;
465 }
466
467 /* We are now pointing to the requested table's descriptor */
468
469 *return_table = table_desc->pointer;
470 return_ACPI_STATUS(AE_OK);
471}
diff --git a/drivers/acpi/tables/tbgetall.c b/drivers/acpi/tables/tbgetall.c
deleted file mode 100644
index ad982112e4c6..000000000000
--- a/drivers/acpi/tables/tbgetall.c
+++ /dev/null
@@ -1,311 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbgetall - Get all required ACPI tables
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbgetall")
49
50/* Local prototypes */
51static acpi_status
52acpi_tb_get_primary_table(struct acpi_pointer *address,
53 struct acpi_table_desc *table_info);
54
55static acpi_status
56acpi_tb_get_secondary_table(struct acpi_pointer *address,
57 acpi_string signature,
58 struct acpi_table_desc *table_info);
59
60/*******************************************************************************
61 *
62 * FUNCTION: acpi_tb_get_primary_table
63 *
64 * PARAMETERS: Address - Physical address of table to retrieve
65 * *table_info - Where the table info is returned
66 *
67 * RETURN: Status
68 *
69 * DESCRIPTION: Maps the physical address of table into a logical address
70 *
71 ******************************************************************************/
72
73static acpi_status
74acpi_tb_get_primary_table(struct acpi_pointer *address,
75 struct acpi_table_desc *table_info)
76{
77 acpi_status status;
78 struct acpi_table_header header;
79
80 ACPI_FUNCTION_TRACE(tb_get_primary_table);
81
82 /* Ignore a NULL address in the RSDT */
83
84 if (!address->pointer.value) {
85 return_ACPI_STATUS(AE_OK);
86 }
87
88 /* Get the header in order to get signature and table size */
89
90 status = acpi_tb_get_table_header(address, &header);
91 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status);
93 }
94
95 /* Clear the table_info */
96
97 ACPI_MEMSET(table_info, 0, sizeof(struct acpi_table_desc));
98
99 /*
100 * Check the table signature and make sure it is recognized.
101 * Also checks the header checksum
102 */
103 table_info->pointer = &header;
104 status = acpi_tb_recognize_table(table_info, ACPI_TABLE_PRIMARY);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108
109 /* Get the entire table */
110
111 status = acpi_tb_get_table_body(address, &header, table_info);
112 if (ACPI_FAILURE(status)) {
113 return_ACPI_STATUS(status);
114 }
115
116 /* Install the table */
117
118 status = acpi_tb_install_table(table_info);
119 return_ACPI_STATUS(status);
120}
121
122/*******************************************************************************
123 *
124 * FUNCTION: acpi_tb_get_secondary_table
125 *
126 * PARAMETERS: Address - Physical address of table to retrieve
127 * *table_info - Where the table info is returned
128 *
129 * RETURN: Status
130 *
131 * DESCRIPTION: Maps the physical address of table into a logical address
132 *
133 ******************************************************************************/
134
135static acpi_status
136acpi_tb_get_secondary_table(struct acpi_pointer *address,
137 acpi_string signature,
138 struct acpi_table_desc *table_info)
139{
140 acpi_status status;
141 struct acpi_table_header header;
142
143 ACPI_FUNCTION_TRACE_STR(tb_get_secondary_table, signature);
144
145 /* Get the header in order to match the signature */
146
147 status = acpi_tb_get_table_header(address, &header);
148 if (ACPI_FAILURE(status)) {
149 return_ACPI_STATUS(status);
150 }
151
152 /* Signature must match request */
153
154 if (!ACPI_COMPARE_NAME(header.signature, signature)) {
155 ACPI_ERROR((AE_INFO,
156 "Incorrect table signature - wanted [%s] found [%4.4s]",
157 signature, header.signature));
158 return_ACPI_STATUS(AE_BAD_SIGNATURE);
159 }
160
161 /*
162 * Check the table signature and make sure it is recognized.
163 * Also checks the header checksum
164 */
165 table_info->pointer = &header;
166 status = acpi_tb_recognize_table(table_info, ACPI_TABLE_SECONDARY);
167 if (ACPI_FAILURE(status)) {
168 return_ACPI_STATUS(status);
169 }
170
171 /* Get the entire table */
172
173 status = acpi_tb_get_table_body(address, &header, table_info);
174 if (ACPI_FAILURE(status)) {
175 return_ACPI_STATUS(status);
176 }
177
178 /* Install the table */
179
180 status = acpi_tb_install_table(table_info);
181 return_ACPI_STATUS(status);
182}
183
184/*******************************************************************************
185 *
186 * FUNCTION: acpi_tb_get_required_tables
187 *
188 * PARAMETERS: None
189 *
190 * RETURN: Status
191 *
192 * DESCRIPTION: Load and validate tables other than the RSDT. The RSDT must
193 * already be loaded and validated.
194 *
195 * Get the minimum set of ACPI tables, namely:
196 *
197 * 1) FADT (via RSDT in loop below)
198 * 2) FACS (via FADT)
199 * 3) DSDT (via FADT)
200 *
201 ******************************************************************************/
202
203acpi_status acpi_tb_get_required_tables(void)
204{
205 acpi_status status = AE_OK;
206 u32 i;
207 struct acpi_table_desc table_info;
208 struct acpi_pointer address;
209
210 ACPI_FUNCTION_TRACE(tb_get_required_tables);
211
212 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%d ACPI tables in RSDT\n",
213 acpi_gbl_rsdt_table_count));
214
215 address.pointer_type = acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
216
217 /*
218 * Loop through all table pointers found in RSDT.
219 * This will NOT include the FACS and DSDT - we must get
220 * them after the loop.
221 *
222 * The only tables we are interested in getting here is the FADT and
223 * any SSDTs.
224 */
225 for (i = 0; i < acpi_gbl_rsdt_table_count; i++) {
226
227 /* Get the table address from the common internal XSDT */
228
229 address.pointer.value = acpi_gbl_XSDT->table_offset_entry[i];
230
231 /*
232 * Get the tables needed by this subsystem (FADT and any SSDTs).
233 * NOTE: All other tables are completely ignored at this time.
234 */
235 status = acpi_tb_get_primary_table(&address, &table_info);
236 if ((status != AE_OK) && (status != AE_TABLE_NOT_SUPPORTED)) {
237 ACPI_WARNING((AE_INFO,
238 "%s, while getting table at %8.8X%8.8X",
239 acpi_format_exception(status),
240 ACPI_FORMAT_UINT64(address.pointer.
241 value)));
242 }
243 }
244
245 /* We must have a FADT to continue */
246
247 if (!acpi_gbl_FADT) {
248 ACPI_ERROR((AE_INFO, "No FADT present in RSDT/XSDT"));
249 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
250 }
251
252 /*
253 * Convert the FADT to a common format. This allows earlier revisions of
254 * the table to coexist with newer versions, using common access code.
255 */
256 status = acpi_tb_convert_table_fadt();
257 if (ACPI_FAILURE(status)) {
258 ACPI_ERROR((AE_INFO,
259 "Could not convert FADT to internal common format"));
260 return_ACPI_STATUS(status);
261 }
262
263 /* Get the FACS (Pointed to by the FADT) */
264
265 address.pointer.value = acpi_gbl_FADT->xfirmware_ctrl;
266
267 status = acpi_tb_get_secondary_table(&address, FACS_SIG, &table_info);
268 if (ACPI_FAILURE(status)) {
269 ACPI_EXCEPTION((AE_INFO, status,
270 "Could not get/install the FACS"));
271 return_ACPI_STATUS(status);
272 }
273
274 /*
275 * Create the common FACS pointer table
276 * (Contains pointers to the original table)
277 */
278 status = acpi_tb_build_common_facs(&table_info);
279 if (ACPI_FAILURE(status)) {
280 return_ACPI_STATUS(status);
281 }
282
283 /* Get/install the DSDT (Pointed to by the FADT) */
284
285 address.pointer.value = acpi_gbl_FADT->Xdsdt;
286
287 status = acpi_tb_get_secondary_table(&address, DSDT_SIG, &table_info);
288 if (ACPI_FAILURE(status)) {
289 ACPI_ERROR((AE_INFO, "Could not get/install the DSDT"));
290 return_ACPI_STATUS(status);
291 }
292
293 /* Set Integer Width (32/64) based upon DSDT revision */
294
295 acpi_ut_set_integer_width(acpi_gbl_DSDT->revision);
296
297 /* Dump the entire DSDT */
298
299 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
300 "Hex dump of entire DSDT, size %d (0x%X), Integer width = %d\n",
301 acpi_gbl_DSDT->length, acpi_gbl_DSDT->length,
302 acpi_gbl_integer_bit_width));
303
304 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(u8, acpi_gbl_DSDT),
305 acpi_gbl_DSDT->length);
306
307 /* Always delete the RSDP mapping, we are done with it */
308
309 acpi_tb_delete_tables_by_type(ACPI_TABLE_ID_RSDP);
310 return_ACPI_STATUS(status);
311}
diff --git a/drivers/acpi/tables/tbinstal.c b/drivers/acpi/tables/tbinstal.c
index 1668a232fb67..0e7b121a99ce 100644
--- a/drivers/acpi/tables/tbinstal.c
+++ b/drivers/acpi/tables/tbinstal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,510 +42,498 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h>
45#include <acpi/actables.h> 46#include <acpi/actables.h>
46 47
47#define _COMPONENT ACPI_TABLES 48#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbinstal") 49ACPI_MODULE_NAME("tbinstal")
49 50
50/* Local prototypes */ 51/******************************************************************************
51static acpi_status
52acpi_tb_match_signature(char *signature,
53 struct acpi_table_desc *table_info, u8 search_type);
54
55/*******************************************************************************
56 * 52 *
57 * FUNCTION: acpi_tb_match_signature 53 * FUNCTION: acpi_tb_verify_table
58 * 54 *
59 * PARAMETERS: Signature - Table signature to match 55 * PARAMETERS: table_desc - table
60 * table_info - Return data
61 * search_type - Table type to match (primary/secondary)
62 * 56 *
63 * RETURN: Status 57 * RETURN: Status
64 * 58 *
65 * DESCRIPTION: Compare signature against the list of "ACPI-subsystem-owned" 59 * DESCRIPTION: this function is called to verify and map table
66 * tables (DSDT/FADT/SSDT, etc.) Returns the table_type_iD on match.
67 * 60 *
68 ******************************************************************************/ 61 *****************************************************************************/
69 62acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
70static acpi_status
71acpi_tb_match_signature(char *signature,
72 struct acpi_table_desc *table_info, u8 search_type)
73{ 63{
74 acpi_native_uint i; 64 acpi_status status = AE_OK;
75 65
76 ACPI_FUNCTION_TRACE(tb_match_signature); 66 ACPI_FUNCTION_TRACE(tb_verify_table);
77 67
78 /* Search for a signature match among the known table types */ 68 /* Map the table if necessary */
79 69
80 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) { 70 if (!table_desc->pointer) {
81 if (!(acpi_gbl_table_data[i].flags & search_type)) { 71 if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
82 continue; 72 ACPI_TABLE_ORIGIN_MAPPED) {
73 table_desc->pointer =
74 acpi_os_map_memory(table_desc->address,
75 table_desc->length);
83 } 76 }
77 if (!table_desc->pointer) {
78 return_ACPI_STATUS(AE_NO_MEMORY);
79 }
80 }
84 81
85 if (!ACPI_STRNCMP(signature, acpi_gbl_table_data[i].signature, 82 /* FACS is the odd table, has no standard ACPI header and no checksum */
86 acpi_gbl_table_data[i].sig_length)) {
87
88 /* Found a signature match, return index if requested */
89 83
90 if (table_info) { 84 if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
91 table_info->type = (u8) i;
92 }
93 85
94 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 86 /* Always calculate checksum, ignore bad checksum if requested */
95 "Table [%4.4s] is an ACPI table consumed by the core subsystem\n",
96 (char *)acpi_gbl_table_data[i].
97 signature));
98 87
99 return_ACPI_STATUS(AE_OK); 88 status =
100 } 89 acpi_tb_verify_checksum(table_desc->pointer,
90 table_desc->length);
101 } 91 }
102 92
103 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 93 return_ACPI_STATUS(status);
104 "Table [%4.4s] is not an ACPI table consumed by the core subsystem - ignored\n",
105 (char *)signature));
106
107 return_ACPI_STATUS(AE_TABLE_NOT_SUPPORTED);
108} 94}
109 95
110/******************************************************************************* 96/*******************************************************************************
111 * 97 *
112 * FUNCTION: acpi_tb_install_table 98 * FUNCTION: acpi_tb_add_table
113 * 99 *
114 * PARAMETERS: table_info - Return value from acpi_tb_get_table_body 100 * PARAMETERS: table_desc - Table descriptor
101 * table_index - Where the table index is returned
115 * 102 *
116 * RETURN: Status 103 * RETURN: Status
117 * 104 *
118 * DESCRIPTION: Install the table into the global data structures. 105 * DESCRIPTION: This function is called to add the ACPI table
119 * 106 *
120 ******************************************************************************/ 107 ******************************************************************************/
121 108
122acpi_status acpi_tb_install_table(struct acpi_table_desc *table_info) 109acpi_status
110acpi_tb_add_table(struct acpi_table_desc *table_desc,
111 acpi_native_uint * table_index)
123{ 112{
124 acpi_status status; 113 acpi_native_uint i;
125 114 acpi_native_uint length;
126 ACPI_FUNCTION_TRACE(tb_install_table); 115 acpi_status status = AE_OK;
127 116
128 /* Lock tables while installing */ 117 ACPI_FUNCTION_TRACE(tb_add_table);
129 118
130 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 119 if (!table_desc->pointer) {
131 if (ACPI_FAILURE(status)) { 120 status = acpi_tb_verify_table(table_desc);
132 ACPI_EXCEPTION((AE_INFO, status, 121 if (ACPI_FAILURE(status) || !table_desc->pointer) {
133 "Could not acquire table mutex")); 122 return_ACPI_STATUS(status);
134 return_ACPI_STATUS(status); 123 }
135 } 124 }
136 125
137 /* 126 /* The table must be either an SSDT or a PSDT */
138 * Ignore a table that is already installed. For example, some BIOS 127
139 * ASL code will repeatedly attempt to load the same SSDT. 128 if ((!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT))
140 */ 129 &&
141 status = acpi_tb_is_table_installed(table_info); 130 (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)))
142 if (ACPI_FAILURE(status)) { 131 {
143 goto unlock_and_exit; 132 ACPI_ERROR((AE_INFO,
133 "Table has invalid signature [%4.4s], must be SSDT or PSDT",
134 table_desc->pointer->signature));
135 return_ACPI_STATUS(AE_BAD_SIGNATURE);
144 } 136 }
145 137
146 /* Install the table into the global data structure */ 138 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
139
140 /* Check if table is already registered */
141
142 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
143 if (!acpi_gbl_root_table_list.tables[i].pointer) {
144 status =
145 acpi_tb_verify_table(&acpi_gbl_root_table_list.
146 tables[i]);
147 if (ACPI_FAILURE(status)
148 || !acpi_gbl_root_table_list.tables[i].pointer) {
149 continue;
150 }
151 }
152
153 length = ACPI_MIN(table_desc->length,
154 acpi_gbl_root_table_list.tables[i].length);
155 if (ACPI_MEMCMP(table_desc->pointer,
156 acpi_gbl_root_table_list.tables[i].pointer,
157 length)) {
158 continue;
159 }
160
161 /* Table is already registered */
162
163 acpi_tb_delete_table(table_desc);
164 *table_index = i;
165 goto release;
166 }
147 167
148 status = acpi_tb_init_table_descriptor(table_info->type, table_info); 168 /*
169 * Add the table to the global table list
170 */
171 status = acpi_tb_store_table(table_desc->address, table_desc->pointer,
172 table_desc->length, table_desc->flags,
173 table_index);
149 if (ACPI_FAILURE(status)) { 174 if (ACPI_FAILURE(status)) {
150 ACPI_EXCEPTION((AE_INFO, status, 175 goto release;
151 "Could not install table [%4.4s]",
152 table_info->pointer->signature));
153 } 176 }
154 177
155 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s located at %p\n", 178 acpi_tb_print_table_header(table_desc->address, table_desc->pointer);
156 acpi_gbl_table_data[table_info->type].name,
157 table_info->pointer));
158 179
159 unlock_and_exit: 180 release:
160 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 181 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
161 return_ACPI_STATUS(status); 182 return_ACPI_STATUS(status);
162} 183}
163 184
164/******************************************************************************* 185/*******************************************************************************
165 * 186 *
166 * FUNCTION: acpi_tb_recognize_table 187 * FUNCTION: acpi_tb_resize_root_table_list
167 * 188 *
168 * PARAMETERS: table_info - Return value from acpi_tb_get_table_body 189 * PARAMETERS: None
169 * search_type - Table type to match (primary/secondary)
170 * 190 *
171 * RETURN: Status 191 * RETURN: Status
172 * 192 *
173 * DESCRIPTION: Check a table signature for a match against known table types 193 * DESCRIPTION: Expand the size of global table array
174 *
175 * NOTE: All table pointers are validated as follows:
176 * 1) Table pointer must point to valid physical memory
177 * 2) Signature must be 4 ASCII chars, even if we don't recognize the
178 * name
179 * 3) Table must be readable for length specified in the header
180 * 4) Table checksum must be valid (with the exception of the FACS
181 * which has no checksum for some odd reason)
182 * 194 *
183 ******************************************************************************/ 195 ******************************************************************************/
184 196
185acpi_status 197acpi_status acpi_tb_resize_root_table_list(void)
186acpi_tb_recognize_table(struct acpi_table_desc *table_info, u8 search_type)
187{ 198{
188 struct acpi_table_header *table_header; 199 struct acpi_table_desc *tables;
189 acpi_status status;
190 200
191 ACPI_FUNCTION_TRACE(tb_recognize_table); 201 ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
192 202
193 /* Ensure that we have a valid table pointer */ 203 /* allow_resize flag is a parameter to acpi_initialize_tables */
194 204
195 table_header = (struct acpi_table_header *)table_info->pointer; 205 if (!(acpi_gbl_root_table_list.flags & ACPI_ROOT_ALLOW_RESIZE)) {
196 if (!table_header) { 206 ACPI_ERROR((AE_INFO,
197 return_ACPI_STATUS(AE_BAD_PARAMETER); 207 "Resize of Root Table Array is not allowed"));
208 return_ACPI_STATUS(AE_SUPPORT);
198 } 209 }
199 210
200 /* 211 /* Increase the Table Array size */
201 * We only "recognize" a limited number of ACPI tables -- namely, the 212
202 * ones that are used by the subsystem (DSDT, FADT, etc.) 213 tables = ACPI_ALLOCATE_ZEROED((acpi_gbl_root_table_list.size +
203 * 214 ACPI_ROOT_TABLE_SIZE_INCREMENT)
204 * An AE_TABLE_NOT_SUPPORTED means that the table was not recognized. 215 * sizeof(struct acpi_table_desc));
205 * This can be any one of many valid ACPI tables, it just isn't one of 216 if (!tables) {
206 * the tables that is consumed by the core subsystem 217 ACPI_ERROR((AE_INFO,
207 */ 218 "Could not allocate new root table array"));
208 status = acpi_tb_match_signature(table_header->signature, 219 return_ACPI_STATUS(AE_NO_MEMORY);
209 table_info, search_type);
210 if (ACPI_FAILURE(status)) {
211 return_ACPI_STATUS(status);
212 } 220 }
213 221
214 status = acpi_tb_validate_table_header(table_header); 222 /* Copy and free the previous table array */
215 if (ACPI_FAILURE(status)) { 223
216 return_ACPI_STATUS(status); 224 if (acpi_gbl_root_table_list.tables) {
225 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
226 acpi_gbl_root_table_list.size *
227 sizeof(struct acpi_table_desc));
228
229 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
230 ACPI_FREE(acpi_gbl_root_table_list.tables);
231 }
217 } 232 }
218 233
219 /* Return the table type and length via the info struct */ 234 acpi_gbl_root_table_list.tables = tables;
235 acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT;
236 acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED;
220 237
221 table_info->length = (acpi_size) table_header->length; 238 return_ACPI_STATUS(AE_OK);
222 return_ACPI_STATUS(status);
223} 239}
224 240
225/******************************************************************************* 241/*******************************************************************************
226 * 242 *
227 * FUNCTION: acpi_tb_init_table_descriptor 243 * FUNCTION: acpi_tb_store_table
228 * 244 *
229 * PARAMETERS: table_type - The type of the table 245 * PARAMETERS: Address - Table address
230 * table_info - A table info struct 246 * Table - Table header
247 * Length - Table length
248 * Flags - flags
231 * 249 *
232 * RETURN: None. 250 * RETURN: Status and table index.
233 * 251 *
234 * DESCRIPTION: Install a table into the global data structs. 252 * DESCRIPTION: Add an ACPI table to the global table list
235 * 253 *
236 ******************************************************************************/ 254 ******************************************************************************/
237 255
238acpi_status 256acpi_status
239acpi_tb_init_table_descriptor(acpi_table_type table_type, 257acpi_tb_store_table(acpi_physical_address address,
240 struct acpi_table_desc *table_info) 258 struct acpi_table_header *table,
259 u32 length, u8 flags, acpi_native_uint * table_index)
241{ 260{
242 struct acpi_table_list *list_head; 261 acpi_status status = AE_OK;
243 struct acpi_table_desc *table_desc;
244 acpi_status status;
245
246 ACPI_FUNCTION_TRACE_U32(tb_init_table_descriptor, table_type);
247 262
248 /* Allocate a descriptor for this table */ 263 /* Ensure that there is room for the table in the Root Table List */
249 264
250 table_desc = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc)); 265 if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) {
251 if (!table_desc) { 266 status = acpi_tb_resize_root_table_list();
252 return_ACPI_STATUS(AE_NO_MEMORY); 267 if (ACPI_FAILURE(status)) {
268 return (status);
269 }
253 } 270 }
254 271
255 /* Get a new owner ID for the table */ 272 /* Initialize added table */
273
274 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
275 address = address;
276 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
277 pointer = table;
278 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length =
279 length;
280 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
281 owner_id = 0;
282 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags =
283 flags;
284
285 ACPI_MOVE_32_TO_32(&
286 (acpi_gbl_root_table_list.
287 tables[acpi_gbl_root_table_list.count].signature),
288 table->signature);
289
290 *table_index = acpi_gbl_root_table_list.count;
291 acpi_gbl_root_table_list.count++;
292 return (status);
293}
256 294
257 status = acpi_ut_allocate_owner_id(&table_desc->owner_id); 295/*******************************************************************************
258 if (ACPI_FAILURE(status)) { 296 *
259 goto error_exit1; 297 * FUNCTION: acpi_tb_delete_table
260 } 298 *
299 * PARAMETERS: table_index - Table index
300 *
301 * RETURN: None
302 *
303 * DESCRIPTION: Delete one internal ACPI table
304 *
305 ******************************************************************************/
261 306
262 /* Install the table into the global data structure */ 307void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
308{
309 /* Table must be mapped or allocated */
310 if (!table_desc->pointer) {
311 return;
312 }
313 switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
314 case ACPI_TABLE_ORIGIN_MAPPED:
315 acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
316 break;
317 case ACPI_TABLE_ORIGIN_ALLOCATED:
318 ACPI_FREE(table_desc->pointer);
319 break;
320 default:;
321 }
263 322
264 list_head = &acpi_gbl_table_lists[table_type]; 323 table_desc->pointer = NULL;
324}
265 325
266 /* 326/*******************************************************************************
267 * Two major types of tables: 1) Only one instance is allowed. This 327 *
268 * includes most ACPI tables such as the DSDT. 2) Multiple instances of 328 * FUNCTION: acpi_tb_terminate
269 * the table are allowed. This includes SSDT and PSDTs. 329 *
270 */ 330 * PARAMETERS: None
271 if (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags)) { 331 *
272 /* 332 * RETURN: None
273 * Only one table allowed, and a table has alread been installed 333 *
274 * at this location, so return an error. 334 * DESCRIPTION: Delete all internal ACPI tables
275 */ 335 *
276 if (list_head->next) { 336 ******************************************************************************/
277 status = AE_ALREADY_EXISTS;
278 goto error_exit2;
279 }
280 337
281 table_desc->next = list_head->next; 338void acpi_tb_terminate(void)
282 list_head->next = table_desc; 339{
340 acpi_native_uint i;
283 341
284 if (table_desc->next) { 342 ACPI_FUNCTION_TRACE(tb_terminate);
285 table_desc->next->prev = table_desc;
286 }
287 343
288 list_head->count++; 344 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
289 } else {
290 /*
291 * Link the new table in to the list of tables of this type.
292 * Insert at the end of the list, order IS IMPORTANT.
293 *
294 * table_desc->Prev & Next are already NULL from calloc()
295 */
296 list_head->count++;
297
298 if (!list_head->next) {
299 list_head->next = table_desc;
300 } else {
301 table_desc->next = list_head->next;
302 345
303 while (table_desc->next->next) { 346 /* Delete the individual tables */
304 table_desc->next = table_desc->next->next;
305 }
306 347
307 table_desc->next->next = table_desc; 348 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
308 table_desc->prev = table_desc->next; 349 acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
309 table_desc->next = NULL;
310 }
311 } 350 }
312 351
313 /* Finish initialization of the table descriptor */
314
315 table_desc->loaded_into_namespace = FALSE;
316 table_desc->type = (u8) table_type;
317 table_desc->pointer = table_info->pointer;
318 table_desc->length = table_info->length;
319 table_desc->allocation = table_info->allocation;
320 table_desc->aml_start = (u8 *) (table_desc->pointer + 1),
321 table_desc->aml_length = (u32)
322 (table_desc->length - (u32) sizeof(struct acpi_table_header));
323
324 /* 352 /*
325 * Set the appropriate global pointer (if there is one) to point to the 353 * Delete the root table array if allocated locally. Array cannot be
326 * newly installed table 354 * mapped, so we don't need to check for that flag.
327 */ 355 */
328 if (acpi_gbl_table_data[table_type].global_ptr) { 356 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
329 *(acpi_gbl_table_data[table_type].global_ptr) = 357 ACPI_FREE(acpi_gbl_root_table_list.tables);
330 table_info->pointer;
331 } 358 }
332 359
333 /* Return Data */ 360 acpi_gbl_root_table_list.tables = NULL;
334 361 acpi_gbl_root_table_list.flags = 0;
335 table_info->owner_id = table_desc->owner_id; 362 acpi_gbl_root_table_list.count = 0;
336 table_info->installed_desc = table_desc;
337 return_ACPI_STATUS(AE_OK);
338
339 /* Error exit with cleanup */
340
341 error_exit2:
342
343 acpi_ut_release_owner_id(&table_desc->owner_id);
344 363
345 error_exit1: 364 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
346 365 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
347 ACPI_FREE(table_desc);
348 return_ACPI_STATUS(status);
349} 366}
350 367
351/******************************************************************************* 368/*******************************************************************************
352 * 369 *
353 * FUNCTION: acpi_tb_delete_all_tables 370 * FUNCTION: acpi_tb_delete_namespace_by_owner
354 * 371 *
355 * PARAMETERS: None. 372 * PARAMETERS: table_index - Table index
356 * 373 *
357 * RETURN: None. 374 * RETURN: None
358 * 375 *
359 * DESCRIPTION: Delete all internal ACPI tables 376 * DESCRIPTION: Delete all namespace objects created when this table was loaded.
360 * 377 *
361 ******************************************************************************/ 378 ******************************************************************************/
362 379
363void acpi_tb_delete_all_tables(void) 380void acpi_tb_delete_namespace_by_owner(acpi_native_uint table_index)
364{ 381{
365 acpi_table_type type; 382 acpi_owner_id owner_id;
366 383
367 /* 384 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
368 * Free memory allocated for ACPI tables 385 if (table_index < acpi_gbl_root_table_list.count) {
369 * Memory can either be mapped or allocated 386 owner_id =
370 */ 387 acpi_gbl_root_table_list.tables[table_index].owner_id;
371 for (type = 0; type < (ACPI_TABLE_ID_MAX + 1); type++) { 388 } else {
372 acpi_tb_delete_tables_by_type(type); 389 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
390 return;
373 } 391 }
392
393 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
394 acpi_ns_delete_namespace_by_owner(owner_id);
374} 395}
375 396
376/******************************************************************************* 397/*******************************************************************************
377 * 398 *
378 * FUNCTION: acpi_tb_delete_tables_by_type 399 * FUNCTION: acpi_tb_allocate_owner_id
379 * 400 *
380 * PARAMETERS: Type - The table type to be deleted 401 * PARAMETERS: table_index - Table index
381 * 402 *
382 * RETURN: None. 403 * RETURN: Status
383 * 404 *
384 * DESCRIPTION: Delete an internal ACPI table 405 * DESCRIPTION: Allocates owner_id in table_desc
385 * Locks the ACPI table mutex
386 * 406 *
387 ******************************************************************************/ 407 ******************************************************************************/
388 408
389void acpi_tb_delete_tables_by_type(acpi_table_type type) 409acpi_status acpi_tb_allocate_owner_id(acpi_native_uint table_index)
390{ 410{
391 struct acpi_table_desc *table_desc; 411 acpi_status status = AE_BAD_PARAMETER;
392 u32 count;
393 u32 i;
394 412
395 ACPI_FUNCTION_TRACE_U32(tb_delete_tables_by_type, type); 413 ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
396 414
397 if (type > ACPI_TABLE_ID_MAX) { 415 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
398 return_VOID; 416 if (table_index < acpi_gbl_root_table_list.count) {
399 } 417 status = acpi_ut_allocate_owner_id
400 418 (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
401 if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_TABLES))) {
402 return;
403 }
404
405 /* Clear the appropriate "typed" global table pointer */
406
407 switch (type) {
408 case ACPI_TABLE_ID_RSDP:
409 acpi_gbl_RSDP = NULL;
410 break;
411
412 case ACPI_TABLE_ID_DSDT:
413 acpi_gbl_DSDT = NULL;
414 break;
415
416 case ACPI_TABLE_ID_FADT:
417 acpi_gbl_FADT = NULL;
418 break;
419
420 case ACPI_TABLE_ID_FACS:
421 acpi_gbl_FACS = NULL;
422 break;
423
424 case ACPI_TABLE_ID_XSDT:
425 acpi_gbl_XSDT = NULL;
426 break;
427
428 case ACPI_TABLE_ID_SSDT:
429 case ACPI_TABLE_ID_PSDT:
430 default:
431 break;
432 }
433
434 /*
435 * Free the table
436 * 1) Get the head of the list
437 */
438 table_desc = acpi_gbl_table_lists[type].next;
439 count = acpi_gbl_table_lists[type].count;
440
441 /*
442 * 2) Walk the entire list, deleting both the allocated tables
443 * and the table descriptors
444 */
445 for (i = 0; i < count; i++) {
446 table_desc = acpi_tb_uninstall_table(table_desc);
447 } 419 }
448 420
449 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 421 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
450 return_VOID; 422 return_ACPI_STATUS(status);
451} 423}
452 424
453/******************************************************************************* 425/*******************************************************************************
454 * 426 *
455 * FUNCTION: acpi_tb_delete_single_table 427 * FUNCTION: acpi_tb_release_owner_id
456 * 428 *
457 * PARAMETERS: table_info - A table info struct 429 * PARAMETERS: table_index - Table index
458 * 430 *
459 * RETURN: None. 431 * RETURN: Status
460 * 432 *
461 * DESCRIPTION: Low-level free for a single ACPI table. Handles cases where 433 * DESCRIPTION: Releases owner_id in table_desc
462 * the table was allocated a buffer or was mapped.
463 * 434 *
464 ******************************************************************************/ 435 ******************************************************************************/
465 436
466void acpi_tb_delete_single_table(struct acpi_table_desc *table_desc) 437acpi_status acpi_tb_release_owner_id(acpi_native_uint table_index)
467{ 438{
439 acpi_status status = AE_BAD_PARAMETER;
468 440
469 /* Must have a valid table descriptor and pointer */ 441 ACPI_FUNCTION_TRACE(tb_release_owner_id);
470 442
471 if ((!table_desc) || (!table_desc->pointer)) { 443 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
472 return; 444 if (table_index < acpi_gbl_root_table_list.count) {
445 acpi_ut_release_owner_id(&
446 (acpi_gbl_root_table_list.
447 tables[table_index].owner_id));
448 status = AE_OK;
473 } 449 }
474 450
475 /* Valid table, determine type of memory allocation */ 451 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
476 452 return_ACPI_STATUS(status);
477 switch (table_desc->allocation) {
478 case ACPI_MEM_NOT_ALLOCATED:
479 break;
480
481 case ACPI_MEM_ALLOCATED:
482
483 ACPI_FREE(table_desc->pointer);
484 break;
485
486 case ACPI_MEM_MAPPED:
487
488 acpi_os_unmap_memory(table_desc->pointer, table_desc->length);
489 break;
490
491 default:
492 break;
493 }
494} 453}
495 454
496/******************************************************************************* 455/*******************************************************************************
497 * 456 *
498 * FUNCTION: acpi_tb_uninstall_table 457 * FUNCTION: acpi_tb_get_owner_id
499 * 458 *
500 * PARAMETERS: table_info - A table info struct 459 * PARAMETERS: table_index - Table index
460 * owner_id - Where the table owner_id is returned
501 * 461 *
502 * RETURN: Pointer to the next table in the list (of same type) 462 * RETURN: Status
503 * 463 *
504 * DESCRIPTION: Free the memory associated with an internal ACPI table that 464 * DESCRIPTION: returns owner_id for the ACPI table
505 * is either installed or has never been installed.
506 * Table mutex should be locked.
507 * 465 *
508 ******************************************************************************/ 466 ******************************************************************************/
509 467
510struct acpi_table_desc *acpi_tb_uninstall_table(struct acpi_table_desc 468acpi_status
511 *table_desc) 469acpi_tb_get_owner_id(acpi_native_uint table_index, acpi_owner_id * owner_id)
512{ 470{
513 struct acpi_table_desc *next_desc; 471 acpi_status status = AE_BAD_PARAMETER;
514 472
515 ACPI_FUNCTION_TRACE_PTR(tb_uninstall_table, table_desc); 473 ACPI_FUNCTION_TRACE(tb_get_owner_id);
516 474
517 if (!table_desc) { 475 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
518 return_PTR(NULL); 476 if (table_index < acpi_gbl_root_table_list.count) {
477 *owner_id =
478 acpi_gbl_root_table_list.tables[table_index].owner_id;
479 status = AE_OK;
519 } 480 }
520 481
521 /* Unlink the descriptor from the doubly linked list */ 482 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
483 return_ACPI_STATUS(status);
484}
522 485
523 if (table_desc->prev) { 486/*******************************************************************************
524 table_desc->prev->next = table_desc->next; 487 *
525 } else { 488 * FUNCTION: acpi_tb_is_table_loaded
526 /* Is first on list, update list head */ 489 *
490 * PARAMETERS: table_index - Table index
491 *
492 * RETURN: Table Loaded Flag
493 *
494 ******************************************************************************/
527 495
528 acpi_gbl_table_lists[table_desc->type].next = table_desc->next; 496u8 acpi_tb_is_table_loaded(acpi_native_uint table_index)
529 } 497{
498 u8 is_loaded = FALSE;
530 499
531 if (table_desc->next) { 500 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
532 table_desc->next->prev = table_desc->prev; 501 if (table_index < acpi_gbl_root_table_list.count) {
502 is_loaded = (u8)
503 (acpi_gbl_root_table_list.tables[table_index].
504 flags & ACPI_TABLE_IS_LOADED);
533 } 505 }
534 506
535 /* Free the memory allocated for the table itself */ 507 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
536 508 return (is_loaded);
537 acpi_tb_delete_single_table(table_desc); 509}
538
539 /* Free the owner ID associated with this table */
540
541 acpi_ut_release_owner_id(&table_desc->owner_id);
542 510
543 /* Free the table descriptor */ 511/*******************************************************************************
512 *
513 * FUNCTION: acpi_tb_set_table_loaded_flag
514 *
515 * PARAMETERS: table_index - Table index
516 * is_loaded - TRUE if table is loaded, FALSE otherwise
517 *
518 * RETURN: None
519 *
520 * DESCRIPTION: Sets the table loaded flag to either TRUE or FALSE.
521 *
522 ******************************************************************************/
544 523
545 next_desc = table_desc->next; 524void acpi_tb_set_table_loaded_flag(acpi_native_uint table_index, u8 is_loaded)
546 ACPI_FREE(table_desc); 525{
547 526
548 /* Return pointer to the next descriptor */ 527 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
528 if (table_index < acpi_gbl_root_table_list.count) {
529 if (is_loaded) {
530 acpi_gbl_root_table_list.tables[table_index].flags |=
531 ACPI_TABLE_IS_LOADED;
532 } else {
533 acpi_gbl_root_table_list.tables[table_index].flags &=
534 ~ACPI_TABLE_IS_LOADED;
535 }
536 }
549 537
550 return_PTR(next_desc); 538 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
551} 539}
diff --git a/drivers/acpi/tables/tbrsdt.c b/drivers/acpi/tables/tbrsdt.c
deleted file mode 100644
index 86a5fca9b739..000000000000
--- a/drivers/acpi/tables/tbrsdt.c
+++ /dev/null
@@ -1,307 +0,0 @@
1/******************************************************************************
2 *
3 * Module Name: tbrsdt - ACPI RSDT table utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include <acpi/actables.h>
46
47#define _COMPONENT ACPI_TABLES
48ACPI_MODULE_NAME("tbrsdt")
49
50/*******************************************************************************
51 *
52 * FUNCTION: acpi_tb_verify_rsdp
53 *
54 * PARAMETERS: Address - RSDP (Pointer to RSDT)
55 *
56 * RETURN: Status
57 *
58 * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table)
59 *
60 ******************************************************************************/
61acpi_status acpi_tb_verify_rsdp(struct acpi_pointer *address)
62{
63 struct acpi_table_desc table_info;
64 acpi_status status;
65 struct rsdp_descriptor *rsdp;
66
67 ACPI_FUNCTION_TRACE(tb_verify_rsdp);
68
69 switch (address->pointer_type) {
70 case ACPI_LOGICAL_POINTER:
71
72 rsdp = address->pointer.logical;
73 break;
74
75 case ACPI_PHYSICAL_POINTER:
76 /*
77 * Obtain access to the RSDP structure
78 */
79 status = acpi_os_map_memory(address->pointer.physical,
80 sizeof(struct rsdp_descriptor),
81 ACPI_CAST_PTR(void, &rsdp));
82 if (ACPI_FAILURE(status)) {
83 return_ACPI_STATUS(status);
84 }
85 break;
86
87 default:
88 return_ACPI_STATUS(AE_BAD_PARAMETER);
89 }
90
91 /* Verify RSDP signature and checksum */
92
93 status = acpi_tb_validate_rsdp(rsdp);
94 if (ACPI_FAILURE(status)) {
95 goto cleanup;
96 }
97
98 /* RSDP is ok. Init the table info */
99
100 table_info.pointer = ACPI_CAST_PTR(struct acpi_table_header, rsdp);
101 table_info.length = sizeof(struct rsdp_descriptor);
102
103 if (address->pointer_type == ACPI_PHYSICAL_POINTER) {
104 table_info.allocation = ACPI_MEM_MAPPED;
105 } else {
106 table_info.allocation = ACPI_MEM_NOT_ALLOCATED;
107 }
108
109 /* Save the table pointers and allocation info */
110
111 status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_RSDP, &table_info);
112 if (ACPI_FAILURE(status)) {
113 goto cleanup;
114 }
115
116 /* Save the RSDP in a global for easy access */
117
118 acpi_gbl_RSDP =
119 ACPI_CAST_PTR(struct rsdp_descriptor, table_info.pointer);
120 return_ACPI_STATUS(status);
121
122 /* Error exit */
123 cleanup:
124
125 if (acpi_gbl_table_flags & ACPI_PHYSICAL_POINTER) {
126 acpi_os_unmap_memory(rsdp, sizeof(struct rsdp_descriptor));
127 }
128 return_ACPI_STATUS(status);
129}
130
131/*******************************************************************************
132 *
133 * FUNCTION: acpi_tb_get_rsdt_address
134 *
135 * PARAMETERS: out_address - Where the address is returned
136 *
137 * RETURN: None, Address
138 *
139 * DESCRIPTION: Extract the address of either the RSDT or XSDT, depending on the
140 * version of the RSDP and whether the XSDT pointer is valid
141 *
142 ******************************************************************************/
143
144void acpi_tb_get_rsdt_address(struct acpi_pointer *out_address)
145{
146
147 ACPI_FUNCTION_ENTRY();
148
149 out_address->pointer_type =
150 acpi_gbl_table_flags | ACPI_LOGICAL_ADDRESSING;
151
152 /* Use XSDT if it is present */
153
154 if ((acpi_gbl_RSDP->revision >= 2) &&
155 acpi_gbl_RSDP->xsdt_physical_address) {
156 out_address->pointer.value =
157 acpi_gbl_RSDP->xsdt_physical_address;
158 acpi_gbl_root_table_type = ACPI_TABLE_TYPE_XSDT;
159 } else {
160 /* No XSDT, use the RSDT */
161
162 out_address->pointer.value =
163 acpi_gbl_RSDP->rsdt_physical_address;
164 acpi_gbl_root_table_type = ACPI_TABLE_TYPE_RSDT;
165 }
166}
167
168/*******************************************************************************
169 *
170 * FUNCTION: acpi_tb_validate_rsdt
171 *
172 * PARAMETERS: table_ptr - Addressable pointer to the RSDT.
173 *
174 * RETURN: Status
175 *
176 * DESCRIPTION: Validate signature for the RSDT or XSDT
177 *
178 ******************************************************************************/
179
180acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
181{
182 char *signature;
183
184 ACPI_FUNCTION_ENTRY();
185
186 /* Validate minimum length */
187
188 if (table_ptr->length < sizeof(struct acpi_table_header)) {
189 ACPI_ERROR((AE_INFO,
190 "RSDT/XSDT length (%X) is smaller than minimum (%zX)",
191 table_ptr->length,
192 sizeof(struct acpi_table_header)));
193
194 return (AE_INVALID_TABLE_LENGTH);
195 }
196
197 /* Search for appropriate signature, RSDT or XSDT */
198
199 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
200 signature = RSDT_SIG;
201 } else {
202 signature = XSDT_SIG;
203 }
204
205 if (!ACPI_COMPARE_NAME(table_ptr->signature, signature)) {
206
207 /* Invalid RSDT or XSDT signature */
208
209 ACPI_ERROR((AE_INFO,
210 "Invalid signature where RSDP indicates RSDT/XSDT should be located. RSDP:"));
211
212 ACPI_DUMP_BUFFER(acpi_gbl_RSDP, 20);
213
214 ACPI_ERROR((AE_INFO,
215 "RSDT/XSDT signature at %X is invalid",
216 acpi_gbl_RSDP->rsdt_physical_address));
217
218 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) {
219 ACPI_ERROR((AE_INFO, "Looking for RSDT"));
220 } else {
221 ACPI_ERROR((AE_INFO, "Looking for XSDT"));
222 }
223
224 ACPI_DUMP_BUFFER(ACPI_CAST_PTR(char, table_ptr), 48);
225 return (AE_BAD_SIGNATURE);
226 }
227
228 return (AE_OK);
229}
230
231/*******************************************************************************
232 *
233 * FUNCTION: acpi_tb_get_table_rsdt
234 *
235 * PARAMETERS: None
236 *
237 * RETURN: Status
238 *
239 * DESCRIPTION: Load and validate the RSDP (ptr) and RSDT (table)
240 *
241 ******************************************************************************/
242
243acpi_status acpi_tb_get_table_rsdt(void)
244{
245 struct acpi_table_desc table_info;
246 acpi_status status;
247 struct acpi_pointer address;
248
249 ACPI_FUNCTION_TRACE(tb_get_table_rsdt);
250
251 /* Get the RSDT/XSDT via the RSDP */
252
253 acpi_tb_get_rsdt_address(&address);
254
255 table_info.type = ACPI_TABLE_ID_XSDT;
256 status = acpi_tb_get_table(&address, &table_info);
257 if (ACPI_FAILURE(status)) {
258 ACPI_EXCEPTION((AE_INFO, status,
259 "Could not get the RSDT/XSDT"));
260 return_ACPI_STATUS(status);
261 }
262
263 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
264 "RSDP located at %p, points to RSDT physical=%8.8X%8.8X\n",
265 acpi_gbl_RSDP,
266 ACPI_FORMAT_UINT64(address.pointer.value)));
267
268 /* Check the RSDT or XSDT signature */
269
270 status = acpi_tb_validate_rsdt(table_info.pointer);
271 if (ACPI_FAILURE(status)) {
272 goto error_cleanup;
273 }
274
275 /* Get the number of tables defined in the RSDT or XSDT */
276
277 acpi_gbl_rsdt_table_count = acpi_tb_get_table_count(acpi_gbl_RSDP,
278 table_info.pointer);
279
280 /* Convert and/or copy to an XSDT structure */
281
282 status = acpi_tb_convert_to_xsdt(&table_info);
283 if (ACPI_FAILURE(status)) {
284 goto error_cleanup;
285 }
286
287 /* Save the table pointers and allocation info */
288
289 status = acpi_tb_init_table_descriptor(ACPI_TABLE_ID_XSDT, &table_info);
290 if (ACPI_FAILURE(status)) {
291 goto error_cleanup;
292 }
293
294 acpi_gbl_XSDT =
295 ACPI_CAST_PTR(struct xsdt_descriptor, table_info.pointer);
296
297 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "XSDT located at %p\n", acpi_gbl_XSDT));
298 return_ACPI_STATUS(status);
299
300 error_cleanup:
301
302 /* Free table allocated by acpi_tb_get_table */
303
304 acpi_tb_delete_single_table(&table_info);
305
306 return_ACPI_STATUS(status);
307}
diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
index 209a401801e3..1da64b4518c0 100644
--- a/drivers/acpi/tables/tbutils.c
+++ b/drivers/acpi/tables/tbutils.c
@@ -1,11 +1,11 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: tbutils - Table manipulation utilities 3 * Module Name: tbutils - table utilities
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,137 +48,119 @@
48ACPI_MODULE_NAME("tbutils") 48ACPI_MODULE_NAME("tbutils")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51#ifdef ACPI_OBSOLETE_FUNCTIONS 51static acpi_physical_address
52acpi_status 52acpi_tb_get_root_table_entry(u8 * table_entry,
53acpi_tb_handle_to_object(u16 table_id, struct acpi_table_desc **table_desc); 53 acpi_native_uint table_entry_size);
54#endif
55 54
56/******************************************************************************* 55/*******************************************************************************
57 * 56 *
58 * FUNCTION: acpi_tb_is_table_installed 57 * FUNCTION: acpi_tb_tables_loaded
59 *
60 * PARAMETERS: new_table_desc - Descriptor for new table being installed
61 * 58 *
62 * RETURN: Status - AE_ALREADY_EXISTS if the table is already installed 59 * PARAMETERS: None
63 * 60 *
64 * DESCRIPTION: Determine if an ACPI table is already installed 61 * RETURN: TRUE if required ACPI tables are loaded
65 * 62 *
66 * MUTEX: Table data structures should be locked 63 * DESCRIPTION: Determine if the minimum required ACPI tables are present
64 * (FADT, FACS, DSDT)
67 * 65 *
68 ******************************************************************************/ 66 ******************************************************************************/
69 67
70acpi_status acpi_tb_is_table_installed(struct acpi_table_desc *new_table_desc) 68u8 acpi_tb_tables_loaded(void)
71{ 69{
72 struct acpi_table_desc *table_desc;
73
74 ACPI_FUNCTION_TRACE(tb_is_table_installed);
75 70
76 /* Get the list descriptor and first table descriptor */ 71 if (acpi_gbl_root_table_list.count >= 3) {
77 72 return (TRUE);
78 table_desc = acpi_gbl_table_lists[new_table_desc->type].next; 73 }
79 74
80 /* Examine all installed tables of this type */ 75 return (FALSE);
76}
81 77
82 while (table_desc) { 78/*******************************************************************************
83 /* 79 *
84 * If the table lengths match, perform a full bytewise compare. This 80 * FUNCTION: acpi_tb_print_table_header
85 * means that we will allow tables with duplicate oem_table_id(s), as 81 *
86 * long as the tables are different in some way. 82 * PARAMETERS: Address - Table physical address
87 * 83 * Header - Table header
88 * Checking if the table has been loaded into the namespace means that 84 *
89 * we don't check for duplicate tables during the initial installation 85 * RETURN: None
90 * of tables within the RSDT/XSDT. 86 *
91 */ 87 * DESCRIPTION: Print an ACPI table header. Special cases for FACS and RSDP.
92 if ((table_desc->loaded_into_namespace) && 88 *
93 (table_desc->pointer->length == 89 ******************************************************************************/
94 new_table_desc->pointer->length)
95 &&
96 (!ACPI_MEMCMP
97 (table_desc->pointer, new_table_desc->pointer,
98 new_table_desc->pointer->length))) {
99
100 /* Match: this table is already installed */
101
102 ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
103 "Table [%4.4s] already installed: Rev %X OemTableId [%8.8s]\n",
104 new_table_desc->pointer->signature,
105 new_table_desc->pointer->revision,
106 new_table_desc->pointer->
107 oem_table_id));
108
109 new_table_desc->owner_id = table_desc->owner_id;
110 new_table_desc->installed_desc = table_desc;
111
112 return_ACPI_STATUS(AE_ALREADY_EXISTS);
113 }
114 90
115 /* Get next table on the list */ 91void
92acpi_tb_print_table_header(acpi_physical_address address,
93 struct acpi_table_header *header)
94{
116 95
117 table_desc = table_desc->next; 96 if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
97
98 /* FACS only has signature and length fields of common table header */
99
100 ACPI_INFO((AE_INFO, "%4.4s %08lX, %04X",
101 header->signature, (unsigned long)address,
102 header->length));
103 } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
104
105 /* RSDP has no common fields */
106
107 ACPI_INFO((AE_INFO, "RSDP %08lX, %04X (r%d %6.6s)",
108 (unsigned long)address,
109 (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
110 revision >
111 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
112 header)->length : 20,
113 ACPI_CAST_PTR(struct acpi_table_rsdp,
114 header)->revision,
115 ACPI_CAST_PTR(struct acpi_table_rsdp,
116 header)->oem_id));
117 } else {
118 /* Standard ACPI table with full common header */
119
120 ACPI_INFO((AE_INFO,
121 "%4.4s %08lX, %04X (r%d %6.6s %8.8s %8X %4.4s %8X)",
122 header->signature, (unsigned long)address,
123 header->length, header->revision, header->oem_id,
124 header->oem_table_id, header->oem_revision,
125 header->asl_compiler_id,
126 header->asl_compiler_revision));
118 } 127 }
119
120 return_ACPI_STATUS(AE_OK);
121} 128}
122 129
123/******************************************************************************* 130/*******************************************************************************
124 * 131 *
125 * FUNCTION: acpi_tb_validate_table_header 132 * FUNCTION: acpi_tb_validate_checksum
126 * 133 *
127 * PARAMETERS: table_header - Logical pointer to the table 134 * PARAMETERS: Table - ACPI table to verify
135 * Length - Length of entire table
128 * 136 *
129 * RETURN: Status 137 * RETURN: Status
130 * 138 *
131 * DESCRIPTION: Check an ACPI table header for validity 139 * DESCRIPTION: Verifies that the table checksums to zero. Optionally returns
132 * 140 * exception on bad checksum.
133 * NOTE: Table pointers are validated as follows:
134 * 1) Table pointer must point to valid physical memory
135 * 2) Signature must be 4 ASCII chars, even if we don't recognize the
136 * name
137 * 3) Table must be readable for length specified in the header
138 * 4) Table checksum must be valid (with the exception of the FACS
139 * which has no checksum because it contains variable fields)
140 * 141 *
141 ******************************************************************************/ 142 ******************************************************************************/
142 143
143acpi_status 144acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
144acpi_tb_validate_table_header(struct acpi_table_header *table_header)
145{ 145{
146 acpi_name signature; 146 u8 checksum;
147
148 ACPI_FUNCTION_ENTRY();
149
150 /* Verify that this is a valid address */
151
152 if (!acpi_os_readable(table_header, sizeof(struct acpi_table_header))) {
153 ACPI_ERROR((AE_INFO,
154 "Cannot read table header at %p", table_header));
155
156 return (AE_BAD_ADDRESS);
157 }
158 147
159 /* Ensure that the signature is 4 ASCII characters */ 148 /* Compute the checksum on the table */
160 149
161 ACPI_MOVE_32_TO_32(&signature, table_header->signature); 150 checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
162 if (!acpi_ut_valid_acpi_name(signature)) {
163 ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
164 signature));
165 151
166 ACPI_DUMP_BUFFER(table_header, 152 /* Checksum ok? (should be zero) */
167 sizeof(struct acpi_table_header));
168 return (AE_BAD_SIGNATURE);
169 }
170 153
171 /* Validate the table length */ 154 if (checksum) {
155 ACPI_WARNING((AE_INFO,
156 "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
157 table->signature, table->checksum,
158 (u8) (table->checksum - checksum)));
172 159
173 if (table_header->length < sizeof(struct acpi_table_header)) { 160#if (ACPI_CHECKSUM_ABORT)
174 ACPI_ERROR((AE_INFO,
175 "Invalid length 0x%X in table with signature %4.4s",
176 (u32) table_header->length,
177 ACPI_CAST_PTR(char, &signature)));
178 161
179 ACPI_DUMP_BUFFER(table_header, 162 return (AE_BAD_CHECKSUM);
180 sizeof(struct acpi_table_header)); 163#endif
181 return (AE_BAD_HEADER);
182 } 164 }
183 165
184 return (AE_OK); 166 return (AE_OK);
@@ -186,157 +168,320 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
186 168
187/******************************************************************************* 169/*******************************************************************************
188 * 170 *
189 * FUNCTION: acpi_tb_sum_table 171 * FUNCTION: acpi_tb_checksum
190 * 172 *
191 * PARAMETERS: Buffer - Buffer to sum 173 * PARAMETERS: Buffer - Pointer to memory region to be checked
192 * Length - Size of the buffer 174 * Length - Length of this memory region
193 * 175 *
194 * RETURN: 8 bit sum of buffer 176 * RETURN: Checksum (u8)
195 * 177 *
196 * DESCRIPTION: Computes an 8 bit sum of the buffer(length) and returns it. 178 * DESCRIPTION: Calculates circular checksum of memory region.
197 * 179 *
198 ******************************************************************************/ 180 ******************************************************************************/
199 181
200u8 acpi_tb_sum_table(void *buffer, u32 length) 182u8 acpi_tb_checksum(u8 * buffer, acpi_native_uint length)
201{ 183{
202 acpi_native_uint i;
203 u8 sum = 0; 184 u8 sum = 0;
185 u8 *end = buffer + length;
204 186
205 if (!buffer || !length) { 187 while (buffer < end) {
206 return (0); 188 sum = (u8) (sum + *(buffer++));
207 } 189 }
208 190
209 for (i = 0; i < length; i++) { 191 return sum;
210 sum = (u8) (sum + ((u8 *) buffer)[i]);
211 }
212 return (sum);
213} 192}
214 193
215/******************************************************************************* 194/*******************************************************************************
216 * 195 *
217 * FUNCTION: acpi_tb_generate_checksum 196 * FUNCTION: acpi_tb_install_table
218 * 197 *
219 * PARAMETERS: Table - Pointer to a valid ACPI table (with a 198 * PARAMETERS: Address - Physical address of DSDT or FACS
220 * standard ACPI header) 199 * Flags - Flags
200 * Signature - Table signature, NULL if no need to
201 * match
202 * table_index - Index into root table array
221 * 203 *
222 * RETURN: 8 bit checksum of buffer 204 * RETURN: None
223 * 205 *
224 * DESCRIPTION: Computes an 8 bit checksum of the table. 206 * DESCRIPTION: Install an ACPI table into the global data structure.
225 * 207 *
226 ******************************************************************************/ 208 ******************************************************************************/
227 209
228u8 acpi_tb_generate_checksum(struct acpi_table_header * table) 210void
211acpi_tb_install_table(acpi_physical_address address,
212 u8 flags, char *signature, acpi_native_uint table_index)
229{ 213{
230 u8 checksum; 214 struct acpi_table_header *table;
215
216 if (!address) {
217 ACPI_ERROR((AE_INFO,
218 "Null physical address for ACPI table [%s]",
219 signature));
220 return;
221 }
222
223 /* Map just the table header */
224
225 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
226 if (!table) {
227 return;
228 }
229
230 /* If a particular signature is expected, signature must match */
231
232 if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
233 ACPI_ERROR((AE_INFO,
234 "Invalid signature 0x%X for ACPI table [%s]",
235 *ACPI_CAST_PTR(u32, table->signature), signature));
236 goto unmap_and_exit;
237 }
231 238
232 /* Sum the entire table as-is */ 239 /* Initialize the table entry */
233 240
234 checksum = acpi_tb_sum_table(table, table->length); 241 acpi_gbl_root_table_list.tables[table_index].address = address;
242 acpi_gbl_root_table_list.tables[table_index].length = table->length;
243 acpi_gbl_root_table_list.tables[table_index].flags = flags;
235 244
236 /* Subtract off the existing checksum value in the table */ 245 ACPI_MOVE_32_TO_32(&
246 (acpi_gbl_root_table_list.tables[table_index].
247 signature), table->signature);
237 248
238 checksum = (u8) (checksum - table->checksum); 249 acpi_tb_print_table_header(address, table);
239 250
240 /* Compute the final checksum */ 251 if (table_index == ACPI_TABLE_INDEX_DSDT) {
241 252
242 checksum = (u8) (0 - checksum); 253 /* Global integer width is based upon revision of the DSDT */
243 return (checksum); 254
255 acpi_ut_set_integer_width(table->revision);
256 }
257
258 unmap_and_exit:
259 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
244} 260}
245 261
246/******************************************************************************* 262/*******************************************************************************
247 * 263 *
248 * FUNCTION: acpi_tb_set_checksum 264 * FUNCTION: acpi_tb_get_root_table_entry
249 * 265 *
250 * PARAMETERS: Table - Pointer to a valid ACPI table (with a 266 * PARAMETERS: table_entry - Pointer to the RSDT/XSDT table entry
251 * standard ACPI header) 267 * table_entry_size - sizeof 32 or 64 (RSDT or XSDT)
252 * 268 *
253 * RETURN: None. Sets the table checksum field 269 * RETURN: Physical address extracted from the root table
254 * 270 *
255 * DESCRIPTION: Computes an 8 bit checksum of the table and inserts the 271 * DESCRIPTION: Get one root table entry. Handles 32-bit and 64-bit cases on
256 * checksum into the table header. 272 * both 32-bit and 64-bit platforms
273 *
274 * NOTE: acpi_physical_address is 32-bit on 32-bit platforms, 64-bit on
275 * 64-bit platforms.
257 * 276 *
258 ******************************************************************************/ 277 ******************************************************************************/
259 278
260void acpi_tb_set_checksum(struct acpi_table_header *table) 279static acpi_physical_address
280acpi_tb_get_root_table_entry(u8 * table_entry,
281 acpi_native_uint table_entry_size)
261{ 282{
283 u64 address64;
284
285 /*
286 * Get the table physical address (32-bit for RSDT, 64-bit for XSDT):
287 * Note: Addresses are 32-bit aligned (not 64) in both RSDT and XSDT
288 */
289 if (table_entry_size == sizeof(u32)) {
290 /*
291 * 32-bit platform, RSDT: Return 32-bit table entry
292 * 64-bit platform, RSDT: Expand 32-bit to 64-bit and return
293 */
294 return ((acpi_physical_address)
295 (*ACPI_CAST_PTR(u32, table_entry)));
296 } else {
297 /*
298 * 32-bit platform, XSDT: Truncate 64-bit to 32-bit and return
299 * 64-bit platform, XSDT: Move (unaligned) 64-bit to local, return 64-bit
300 */
301 ACPI_MOVE_64_TO_64(&address64, table_entry);
262 302
263 table->checksum = acpi_tb_generate_checksum(table); 303#if ACPI_MACHINE_WIDTH == 32
304 if (address64 > ACPI_UINT32_MAX) {
305
306 /* Will truncate 64-bit address to 32 bits, issue warning */
307
308 ACPI_WARNING((AE_INFO,
309 "64-bit Physical Address in XSDT is too large (%8.8X%8.8X), truncating",
310 ACPI_FORMAT_UINT64(address64)));
311 }
312#endif
313 return ((acpi_physical_address) (address64));
314 }
264} 315}
265 316
266/******************************************************************************* 317/*******************************************************************************
267 * 318 *
268 * FUNCTION: acpi_tb_verify_table_checksum 319 * FUNCTION: acpi_tb_parse_root_table
320 *
321 * PARAMETERS: Rsdp - Pointer to the RSDP
322 * Flags - Flags
269 * 323 *
270 * PARAMETERS: *table_header - ACPI table to verify 324 * RETURN: Status
271 * 325 *
272 * RETURN: 8 bit checksum of table 326 * DESCRIPTION: This function is called to parse the Root System Description
327 * Table (RSDT or XSDT)
273 * 328 *
274 * DESCRIPTION: Generates an 8 bit checksum of table and returns and compares 329 * NOTE: Tables are mapped (not copied) for efficiency. The FACS must
275 * it to the existing checksum value. 330 * be mapped and cannot be copied because it contains the actual
331 * memory location of the ACPI Global Lock.
276 * 332 *
277 ******************************************************************************/ 333 ******************************************************************************/
278 334
279acpi_status 335acpi_status __init
280acpi_tb_verify_table_checksum(struct acpi_table_header *table_header) 336acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
281{ 337{
282 u8 checksum; 338 struct acpi_table_rsdp *rsdp;
339 acpi_native_uint table_entry_size;
340 acpi_native_uint i;
341 u32 table_count;
342 struct acpi_table_header *table;
343 acpi_physical_address address;
344 u32 length;
345 u8 *table_entry;
346 acpi_status status;
347
348 ACPI_FUNCTION_TRACE(tb_parse_root_table);
349
350 /*
351 * Map the entire RSDP and extract the address of the RSDT or XSDT
352 */
353 rsdp = acpi_os_map_memory(rsdp_address, sizeof(struct acpi_table_rsdp));
354 if (!rsdp) {
355 return_ACPI_STATUS(AE_NO_MEMORY);
356 }
283 357
284 ACPI_FUNCTION_TRACE(tb_verify_table_checksum); 358 acpi_tb_print_table_header(rsdp_address,
359 ACPI_CAST_PTR(struct acpi_table_header,
360 rsdp));
285 361
286 /* Compute the checksum on the table */ 362 /* Differentiate between RSDT and XSDT root tables */
287 363
288 checksum = acpi_tb_generate_checksum(table_header); 364 if (rsdp->revision > 1 && rsdp->xsdt_physical_address) {
365 /*
366 * Root table is an XSDT (64-bit physical addresses). We must use the
367 * XSDT if the revision is > 1 and the XSDT pointer is present, as per
368 * the ACPI specification.
369 */
370 address = (acpi_physical_address) rsdp->xsdt_physical_address;
371 table_entry_size = sizeof(u64);
372 } else {
373 /* Root table is an RSDT (32-bit physical addresses) */
289 374
290 /* Checksum ok? */ 375 address = (acpi_physical_address) rsdp->rsdt_physical_address;
376 table_entry_size = sizeof(u32);
377 }
291 378
292 if (checksum == table_header->checksum) { 379 /*
293 return_ACPI_STATUS(AE_OK); 380 * It is not possible to map more than one entry in some environments,
381 * so unmap the RSDP here before mapping other tables
382 */
383 acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
384
385 /* Map the RSDT/XSDT table header to get the full table length */
386
387 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
388 if (!table) {
389 return_ACPI_STATUS(AE_NO_MEMORY);
294 } 390 }
295 391
296 ACPI_WARNING((AE_INFO, 392 acpi_tb_print_table_header(address, table);
297 "Incorrect checksum in table [%4.4s] - is %2.2X, should be %2.2X",
298 table_header->signature, table_header->checksum,
299 checksum));
300 393
301 return_ACPI_STATUS(AE_BAD_CHECKSUM); 394 /* Get the length of the full table, verify length and map entire table */
302}
303 395
304#ifdef ACPI_OBSOLETE_FUNCTIONS 396 length = table->length;
305/******************************************************************************* 397 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
306 *
307 * FUNCTION: acpi_tb_handle_to_object
308 *
309 * PARAMETERS: table_id - Id for which the function is searching
310 * table_desc - Pointer to return the matching table
311 * descriptor.
312 *
313 * RETURN: Search the tables to find one with a matching table_id and
314 * return a pointer to that table descriptor.
315 *
316 ******************************************************************************/
317 398
318acpi_status 399 if (length < sizeof(struct acpi_table_header)) {
319acpi_tb_handle_to_object(u16 table_id, 400 ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
320 struct acpi_table_desc **return_table_desc) 401 length));
321{ 402 return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
322 u32 i; 403 }
323 struct acpi_table_desc *table_desc;
324 404
325 ACPI_FUNCTION_NAME(tb_handle_to_object); 405 table = acpi_os_map_memory(address, length);
406 if (!table) {
407 return_ACPI_STATUS(AE_NO_MEMORY);
408 }
409
410 /* Validate the root table checksum */
411
412 status = acpi_tb_verify_checksum(table, length);
413 if (ACPI_FAILURE(status)) {
414 acpi_os_unmap_memory(table, length);
415 return_ACPI_STATUS(status);
416 }
326 417
327 for (i = 0; i < ACPI_TABLE_MAX; i++) { 418 /* Calculate the number of tables described in the root table */
328 table_desc = acpi_gbl_table_lists[i].next; 419
329 while (table_desc) { 420 table_count =
330 if (table_desc->table_id == table_id) { 421 (u32) ((table->length -
331 *return_table_desc = table_desc; 422 sizeof(struct acpi_table_header)) / table_entry_size);
332 return (AE_OK); 423
424 /*
425 * First two entries in the table array are reserved for the DSDT and FACS,
426 * which are not actually present in the RSDT/XSDT - they come from the FADT
427 */
428 table_entry =
429 ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
430 acpi_gbl_root_table_list.count = 2;
431
432 /*
433 * Initialize the root table array from the RSDT/XSDT
434 */
435 for (i = 0; i < table_count; i++) {
436 if (acpi_gbl_root_table_list.count >=
437 acpi_gbl_root_table_list.size) {
438
439 /* There is no more room in the root table array, attempt resize */
440
441 status = acpi_tb_resize_root_table_list();
442 if (ACPI_FAILURE(status)) {
443 ACPI_WARNING((AE_INFO,
444 "Truncating %u table entries!",
445 (unsigned)
446 (acpi_gbl_root_table_list.size -
447 acpi_gbl_root_table_list.
448 count)));
449 break;
333 } 450 }
451 }
452
453 /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
334 454
335 table_desc = table_desc->next; 455 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].
456 address =
457 acpi_tb_get_root_table_entry(table_entry, table_entry_size);
458
459 table_entry += table_entry_size;
460 acpi_gbl_root_table_list.count++;
461 }
462
463 /*
464 * It is not possible to map more than one entry in some environments,
465 * so unmap the root table here before mapping other tables
466 */
467 acpi_os_unmap_memory(table, length);
468
469 /*
470 * Complete the initialization of the root table array by examining
471 * the header of each table
472 */
473 for (i = 2; i < acpi_gbl_root_table_list.count; i++) {
474 acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
475 address, flags, NULL, i);
476
477 /* Special case for FADT - get the DSDT and FACS */
478
479 if (ACPI_COMPARE_NAME
480 (&acpi_gbl_root_table_list.tables[i].signature,
481 ACPI_SIG_FADT)) {
482 acpi_tb_parse_fadt(i, flags);
336 } 483 }
337 } 484 }
338 485
339 ACPI_ERROR((AE_INFO, "TableId=%X does not exist", table_id)); 486 return_ACPI_STATUS(AE_OK);
340 return (AE_BAD_PARAMETER);
341} 487}
342#endif
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 5ba9303293ad..807978d5381a 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -6,7 +6,7 @@
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
8/* 8/*
9 * Copyright (C) 2000 - 2006, R. Byron Moore 9 * Copyright (C) 2000 - 2007, R. Byron Moore
10 * All rights reserved. 10 * All rights reserved.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
@@ -49,80 +49,158 @@
49#define _COMPONENT ACPI_TABLES 49#define _COMPONENT ACPI_TABLES
50ACPI_MODULE_NAME("tbxface") 50ACPI_MODULE_NAME("tbxface")
51 51
52/* Local prototypes */
53static acpi_status acpi_tb_load_namespace(void);
54
52/******************************************************************************* 55/*******************************************************************************
53 * 56 *
54 * FUNCTION: acpi_load_tables 57 * FUNCTION: acpi_allocate_root_table
55 * 58 *
56 * PARAMETERS: None 59 * PARAMETERS: initial_table_count - Size of initial_table_array, in number of
60 * struct acpi_table_desc structures
57 * 61 *
58 * RETURN: Status 62 * RETURN: Status
59 * 63 *
60 * DESCRIPTION: This function is called to load the ACPI tables from the 64 * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
61 * provided RSDT 65 * acpi_initialize_tables.
62 * 66 *
63 ******************************************************************************/ 67 ******************************************************************************/
64acpi_status acpi_load_tables(void) 68
69acpi_status acpi_allocate_root_table(u32 initial_table_count)
65{ 70{
66 struct acpi_pointer rsdp_address;
67 acpi_status status;
68 71
69 ACPI_FUNCTION_TRACE(acpi_load_tables); 72 acpi_gbl_root_table_list.size = initial_table_count;
73 acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
70 74
71 /* Get the RSDP */ 75 return (acpi_tb_resize_root_table_list());
76}
72 77
73 status = acpi_os_get_root_pointer(ACPI_LOGICAL_ADDRESSING, 78/*******************************************************************************
74 &rsdp_address); 79 *
75 if (ACPI_FAILURE(status)) { 80 * FUNCTION: acpi_initialize_tables
76 ACPI_EXCEPTION((AE_INFO, status, "Could not get the RSDP")); 81 *
77 goto error_exit; 82 * PARAMETERS: initial_table_array - Pointer to an array of pre-allocated
78 } 83 * struct acpi_table_desc structures. If NULL, the
84 * array is dynamically allocated.
85 * initial_table_count - Size of initial_table_array, in number of
86 * struct acpi_table_desc structures
87 * allow_realloc - Flag to tell Table Manager if resize of
88 * pre-allocated array is allowed. Ignored
89 * if initial_table_array is NULL.
90 *
91 * RETURN: Status
92 *
93 * DESCRIPTION: Initialize the table manager, get the RSDP and RSDT/XSDT.
94 *
95 * NOTE: Allows static allocation of the initial table array in order
96 * to avoid the use of dynamic memory in confined environments
97 * such as the kernel boot sequence where it may not be available.
98 *
99 * If the host OS memory managers are initialized, use NULL for
100 * initial_table_array, and the table will be dynamically allocated.
101 *
102 ******************************************************************************/
79 103
80 /* Map and validate the RSDP */ 104acpi_status __init
105acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
106 u32 initial_table_count, u8 allow_resize)
107{
108 acpi_physical_address rsdp_address;
109 acpi_status status;
81 110
82 acpi_gbl_table_flags = rsdp_address.pointer_type; 111 ACPI_FUNCTION_TRACE(acpi_initialize_tables);
83 112
84 status = acpi_tb_verify_rsdp(&rsdp_address); 113 /*
85 if (ACPI_FAILURE(status)) { 114 * Set up the Root Table Array
86 ACPI_EXCEPTION((AE_INFO, status, "During RSDP validation")); 115 * Allocate the table array if requested
87 goto error_exit; 116 */
117 if (!initial_table_array) {
118 status = acpi_allocate_root_table(initial_table_count);
119 if (ACPI_FAILURE(status)) {
120 return_ACPI_STATUS(status);
121 }
122 } else {
123 /* Root Table Array has been statically allocated by the host */
124
125 ACPI_MEMSET(initial_table_array, 0,
126 initial_table_count *
127 sizeof(struct acpi_table_desc));
128
129 acpi_gbl_root_table_list.tables = initial_table_array;
130 acpi_gbl_root_table_list.size = initial_table_count;
131 acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
132 if (allow_resize) {
133 acpi_gbl_root_table_list.flags |=
134 ACPI_ROOT_ALLOW_RESIZE;
135 }
88 } 136 }
89 137
90 /* Get the RSDT via the RSDP */ 138 /* Get the address of the RSDP */
91 139
92 status = acpi_tb_get_table_rsdt(); 140 rsdp_address = acpi_os_get_root_pointer();
93 if (ACPI_FAILURE(status)) { 141 if (!rsdp_address) {
94 ACPI_EXCEPTION((AE_INFO, status, "Could not load RSDT")); 142 return_ACPI_STATUS(AE_NOT_FOUND);
95 goto error_exit;
96 } 143 }
97 144
98 /* Now get the tables needed by this subsystem (FADT, DSDT, etc.) */ 145 /*
146 * Get the root table (RSDT or XSDT) and extract all entries to the local
147 * Root Table Array. This array contains the information of the RSDT/XSDT
148 * in a common, more useable format.
149 */
150 status =
151 acpi_tb_parse_root_table(rsdp_address, ACPI_TABLE_ORIGIN_MAPPED);
152 return_ACPI_STATUS(status);
153}
99 154
100 status = acpi_tb_get_required_tables(); 155/*******************************************************************************
101 if (ACPI_FAILURE(status)) { 156 *
102 ACPI_EXCEPTION((AE_INFO, status, 157 * FUNCTION: acpi_reallocate_root_table
103 "Could not get all required tables (DSDT/FADT/FACS)")); 158 *
104 goto error_exit; 159 * PARAMETERS: None
160 *
161 * RETURN: Status
162 *
163 * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the
164 * root list from the previously provided scratch area. Should
165 * be called once dynamic memory allocation is available in the
166 * kernel
167 *
168 ******************************************************************************/
169acpi_status acpi_reallocate_root_table(void)
170{
171 struct acpi_table_desc *tables;
172 acpi_size new_size;
173
174 ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
175
176 /*
177 * Only reallocate the root table if the host provided a static buffer
178 * for the table array in the call to acpi_initialize_tables.
179 */
180 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
181 return_ACPI_STATUS(AE_SUPPORT);
105 } 182 }
106 183
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n")); 184 new_size =
185 (acpi_gbl_root_table_list.count +
186 ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc);
108 187
109 /* Load the namespace from the tables */ 188 /* Create new array and copy the old array */
110 189
111 status = acpi_ns_load_namespace(); 190 tables = ACPI_ALLOCATE_ZEROED(new_size);
112 if (ACPI_FAILURE(status)) { 191 if (!tables) {
113 ACPI_EXCEPTION((AE_INFO, status, "Could not load namespace")); 192 return_ACPI_STATUS(AE_NO_MEMORY);
114 goto error_exit;
115 } 193 }
116 194
117 return_ACPI_STATUS(AE_OK); 195 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size);
118 196
119 error_exit: 197 acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count;
120 ACPI_EXCEPTION((AE_INFO, status, "Could not load tables")); 198 acpi_gbl_root_table_list.tables = tables;
121 return_ACPI_STATUS(status); 199 acpi_gbl_root_table_list.flags =
122} 200 ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
123
124ACPI_EXPORT_SYMBOL(acpi_load_tables)
125 201
202 return_ACPI_STATUS(AE_OK);
203}
126/******************************************************************************* 204/*******************************************************************************
127 * 205 *
128 * FUNCTION: acpi_load_table 206 * FUNCTION: acpi_load_table
@@ -141,342 +219,405 @@ ACPI_EXPORT_SYMBOL(acpi_load_tables)
141acpi_status acpi_load_table(struct acpi_table_header *table_ptr) 219acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
142{ 220{
143 acpi_status status; 221 acpi_status status;
144 struct acpi_table_desc table_info; 222 acpi_native_uint table_index;
145 struct acpi_pointer address; 223 struct acpi_table_desc table_desc;
146
147 ACPI_FUNCTION_TRACE(acpi_load_table);
148
149 if (!table_ptr) {
150 return_ACPI_STATUS(AE_BAD_PARAMETER);
151 }
152
153 /* Copy the table to a local buffer */
154 224
155 address.pointer_type = ACPI_LOGICAL_POINTER | ACPI_LOGICAL_ADDRESSING; 225 if (!table_ptr)
156 address.pointer.logical = table_ptr; 226 return AE_BAD_PARAMETER;
157
158 status = acpi_tb_get_table_body(&address, table_ptr, &table_info);
159 if (ACPI_FAILURE(status)) {
160 return_ACPI_STATUS(status);
161 }
162
163 /* Check signature for a valid table type */
164
165 status = acpi_tb_recognize_table(&table_info, ACPI_TABLE_ALL);
166 if (ACPI_FAILURE(status)) {
167 return_ACPI_STATUS(status);
168 }
169 227
170 /* Install the new table into the local data structures */ 228 ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
229 table_desc.pointer = table_ptr;
230 table_desc.length = table_ptr->length;
231 table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
171 232
172 status = acpi_tb_install_table(&table_info); 233 /*
234 * Install the new table into the local data structures
235 */
236 status = acpi_tb_add_table(&table_desc, &table_index);
173 if (ACPI_FAILURE(status)) { 237 if (ACPI_FAILURE(status)) {
174 if (status == AE_ALREADY_EXISTS) { 238 return status;
175
176 /* Table already exists, no error */
177
178 status = AE_OK;
179 }
180
181 /* Free table allocated by acpi_tb_get_table_body */
182
183 acpi_tb_delete_single_table(&table_info);
184 return_ACPI_STATUS(status);
185 } 239 }
240 status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
241 return status;
242}
186 243
187 /* Convert the table to common format if necessary */ 244ACPI_EXPORT_SYMBOL(acpi_load_table)
188
189 switch (table_info.type) {
190 case ACPI_TABLE_ID_FADT:
191
192 status = acpi_tb_convert_table_fadt();
193 break;
194
195 case ACPI_TABLE_ID_FACS:
196 245
197 status = acpi_tb_build_common_facs(&table_info); 246/******************************************************************************
198 break; 247 *
248 * FUNCTION: acpi_get_table_header
249 *
250 * PARAMETERS: Signature - ACPI signature of needed table
251 * Instance - Which instance (for SSDTs)
252 * out_table_header - The pointer to the table header to fill
253 *
254 * RETURN: Status and pointer to mapped table header
255 *
256 * DESCRIPTION: Finds an ACPI table header.
257 *
258 * NOTE: Caller is responsible in unmapping the header with
259 * acpi_os_unmap_memory
260 *
261 *****************************************************************************/
262acpi_status
263acpi_get_table_header(char *signature,
264 acpi_native_uint instance,
265 struct acpi_table_header *out_table_header)
266{
267 acpi_native_uint i;
268 acpi_native_uint j;
269 struct acpi_table_header *header;
199 270
200 default: 271 /* Parameter validation */
201 /* Load table into namespace if it contains executable AML */
202 272
203 status = 273 if (!signature || !out_table_header) {
204 acpi_ns_load_table(table_info.installed_desc, 274 return (AE_BAD_PARAMETER);
205 acpi_gbl_root_node);
206 break;
207 } 275 }
208 276
209 if (ACPI_FAILURE(status)) { 277 /*
278 * Walk the root table list
279 */
280 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
281 if (!ACPI_COMPARE_NAME
282 (&(acpi_gbl_root_table_list.tables[i].signature),
283 signature)) {
284 continue;
285 }
210 286
211 /* Uninstall table and free the buffer */ 287 if (++j < instance) {
288 continue;
289 }
212 290
213 (void)acpi_tb_uninstall_table(table_info.installed_desc); 291 if (!acpi_gbl_root_table_list.tables[i].pointer) {
292 if ((acpi_gbl_root_table_list.tables[i].
293 flags & ACPI_TABLE_ORIGIN_MASK) ==
294 ACPI_TABLE_ORIGIN_MAPPED) {
295 header =
296 acpi_os_map_memory(acpi_gbl_root_table_list.
297 tables[i].address,
298 sizeof(struct
299 acpi_table_header));
300 if (!header) {
301 return AE_NO_MEMORY;
302 }
303 ACPI_MEMCPY(out_table_header, header,
304 sizeof(struct acpi_table_header));
305 acpi_os_unmap_memory(header,
306 sizeof(struct
307 acpi_table_header));
308 } else {
309 return AE_NOT_FOUND;
310 }
311 } else {
312 ACPI_MEMCPY(out_table_header,
313 acpi_gbl_root_table_list.tables[i].pointer,
314 sizeof(struct acpi_table_header));
315 }
316 return (AE_OK);
214 } 317 }
215 318
216 return_ACPI_STATUS(status); 319 return (AE_NOT_FOUND);
217} 320}
218 321
219ACPI_EXPORT_SYMBOL(acpi_load_table) 322ACPI_EXPORT_SYMBOL(acpi_get_table_header)
220 323
221/******************************************************************************* 324
325/******************************************************************************
222 * 326 *
223 * FUNCTION: acpi_unload_table_id 327 * FUNCTION: acpi_unload_table_id
224 * 328 *
225 * PARAMETERS: table_type - Type of table to be unloaded 329 * PARAMETERS: id - Owner ID of the table to be removed.
226 * id - Owner ID of the table to be removed.
227 * 330 *
228 * RETURN: Status 331 * RETURN: Status
229 * 332 *
230 * DESCRIPTION: This routine is used to force the unload of a table (by id) 333 * DESCRIPTION: This routine is used to force the unload of a table (by id)
231 * 334 *
232 ******************************************************************************/ 335 ******************************************************************************/
233acpi_status acpi_unload_table_id(acpi_table_type table_type, acpi_owner_id id) 336acpi_status acpi_unload_table_id(acpi_owner_id id)
234{ 337{
235 struct acpi_table_desc *table_desc; 338 int i;
236 acpi_status status; 339 acpi_status status = AE_NOT_EXIST;
237 340
238 ACPI_FUNCTION_TRACE(acpi_unload_table); 341 ACPI_FUNCTION_TRACE(acpi_unload_table);
239 342
240 /* Parameter validation */
241 if (table_type > ACPI_TABLE_ID_MAX)
242 return_ACPI_STATUS(AE_BAD_PARAMETER);
243
244 /* Find table from the requested type list */ 343 /* Find table from the requested type list */
245 table_desc = acpi_gbl_table_lists[table_type].next; 344 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
246 while (table_desc && table_desc->owner_id != id) 345 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
247 table_desc = table_desc->next; 346 continue;
248 347 }
249 if (!table_desc) 348 /*
250 return_ACPI_STATUS(AE_NOT_EXIST); 349 * Delete all namespace objects owned by this table. Note that these
251 350 * objects can appear anywhere in the namespace by virtue of the AML
252 /* 351 * "Scope" operator. Thus, we need to track ownership by an ID, not
253 * Delete all namespace objects owned by this table. Note that these 352 * simply a position within the hierarchy
254 * objects can appear anywhere in the namespace by virtue of the AML 353 */
255 * "Scope" operator. Thus, we need to track ownership by an ID, not 354 acpi_tb_delete_namespace_by_owner(i);
256 * simply a position within the hierarchy 355 acpi_tb_release_owner_id(i);
257 */ 356 acpi_tb_set_table_loaded_flag(i, FALSE);
258 acpi_ns_delete_namespace_by_owner(table_desc->owner_id); 357 }
259 358 return_ACPI_STATUS(status);
260 status = acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
261 if (ACPI_FAILURE(status))
262 return_ACPI_STATUS(status);
263
264 (void)acpi_tb_uninstall_table(table_desc);
265
266 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
267
268 return_ACPI_STATUS(AE_OK);
269} 359}
270 360
271ACPI_EXPORT_SYMBOL(acpi_unload_table_id) 361ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
272 362
273#ifdef ACPI_FUTURE_USAGE
274/******************************************************************************* 363/*******************************************************************************
275 * 364 *
276 * FUNCTION: acpi_unload_table 365 * FUNCTION: acpi_get_table
277 * 366 *
278 * PARAMETERS: table_type - Type of table to be unloaded 367 * PARAMETERS: Signature - ACPI signature of needed table
368 * Instance - Which instance (for SSDTs)
369 * out_table - Where the pointer to the table is returned
279 * 370 *
280 * RETURN: Status 371 * RETURN: Status and pointer to table
281 * 372 *
282 * DESCRIPTION: This routine is used to force the unload of a table 373 * DESCRIPTION: Finds and verifies an ACPI table.
283 * 374 *
284 ******************************************************************************/ 375 *****************************************************************************/
285acpi_status acpi_unload_table(acpi_table_type table_type) 376acpi_status
377acpi_get_table(char *signature,
378 acpi_native_uint instance, struct acpi_table_header ** out_table)
286{ 379{
287 struct acpi_table_desc *table_desc; 380 acpi_native_uint i;
288 381 acpi_native_uint j;
289 ACPI_FUNCTION_TRACE(acpi_unload_table); 382 acpi_status status;
290 383
291 /* Parameter validation */ 384 /* Parameter validation */
292 385
293 if (table_type > ACPI_TABLE_ID_MAX) { 386 if (!signature || !out_table) {
294 return_ACPI_STATUS(AE_BAD_PARAMETER); 387 return (AE_BAD_PARAMETER);
295 } 388 }
296 389
297 /* Find all tables of the requested type */ 390 /*
391 * Walk the root table list
392 */
393 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) {
394 if (!ACPI_COMPARE_NAME
395 (&(acpi_gbl_root_table_list.tables[i].signature),
396 signature)) {
397 continue;
398 }
298 399
299 table_desc = acpi_gbl_table_lists[table_type].next; 400 if (++j < instance) {
300 if (!table_desc) { 401 continue;
301 return_ACPI_STATUS(AE_NOT_EXIST); 402 }
302 }
303 403
304 while (table_desc) { 404 status =
305 /* 405 acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
306 * Delete all namespace objects owned by this table. Note that these 406 if (ACPI_SUCCESS(status)) {
307 * objects can appear anywhere in the namespace by virtue of the AML 407 *out_table = acpi_gbl_root_table_list.tables[i].pointer;
308 * "Scope" operator. Thus, we need to track ownership by an ID, not 408 }
309 * simply a position within the hierarchy
310 */
311 acpi_ns_delete_namespace_by_owner(table_desc->owner_id);
312 table_desc = table_desc->next;
313 }
314 409
315 /* Delete (or unmap) all tables of this type */ 410 if (!acpi_gbl_permanent_mmap) {
411 acpi_gbl_root_table_list.tables[i].pointer = 0;
412 }
316 413
317 acpi_tb_delete_tables_by_type(table_type); 414 return (status);
318 return_ACPI_STATUS(AE_OK); 415 }
416
417 return (AE_NOT_FOUND);
319} 418}
320 419
321ACPI_EXPORT_SYMBOL(acpi_unload_table) 420ACPI_EXPORT_SYMBOL(acpi_get_table)
322 421
323/******************************************************************************* 422/*******************************************************************************
324 * 423 *
325 * FUNCTION: acpi_get_table_header 424 * FUNCTION: acpi_get_table_by_index
326 * 425 *
327 * PARAMETERS: table_type - one of the defined table types 426 * PARAMETERS: table_index - Table index
328 * Instance - the non zero instance of the table, allows 427 * Table - Where the pointer to the table is returned
329 * support for multiple tables of the same type
330 * see acpi_gbl_acpi_table_flag
331 * out_table_header - pointer to the struct acpi_table_header if successful
332 * 428 *
333 * DESCRIPTION: This function is called to get an ACPI table header. The caller 429 * RETURN: Status and pointer to the table
334 * supplies an pointer to a data area sufficient to contain an ACPI
335 * struct acpi_table_header structure.
336 * 430 *
337 * The header contains a length field that can be used to determine 431 * DESCRIPTION: Obtain a table by an index into the global table list.
338 * the size of the buffer needed to contain the entire table. This
339 * function is not valid for the RSD PTR table since it does not
340 * have a standard header and is fixed length.
341 * 432 *
342 ******************************************************************************/ 433 ******************************************************************************/
343acpi_status 434acpi_status
344acpi_get_table_header(acpi_table_type table_type, 435acpi_get_table_by_index(acpi_native_uint table_index,
345 u32 instance, struct acpi_table_header *out_table_header) 436 struct acpi_table_header ** table)
346{ 437{
347 struct acpi_table_header *tbl_ptr;
348 acpi_status status; 438 acpi_status status;
349 439
350 ACPI_FUNCTION_TRACE(acpi_get_table_header); 440 ACPI_FUNCTION_TRACE(acpi_get_table_by_index);
441
442 /* Parameter validation */
351 443
352 if ((instance == 0) || 444 if (!table) {
353 (table_type == ACPI_TABLE_ID_RSDP) || (!out_table_header)) {
354 return_ACPI_STATUS(AE_BAD_PARAMETER); 445 return_ACPI_STATUS(AE_BAD_PARAMETER);
355 } 446 }
356 447
357 /* Check the table type and instance */ 448 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
449
450 /* Validate index */
358 451
359 if ((table_type > ACPI_TABLE_ID_MAX) || 452 if (table_index >= acpi_gbl_root_table_list.count) {
360 (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) && 453 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
361 instance > 1)) {
362 return_ACPI_STATUS(AE_BAD_PARAMETER); 454 return_ACPI_STATUS(AE_BAD_PARAMETER);
363 } 455 }
364 456
365 /* Get a pointer to the entire table */ 457 if (!acpi_gbl_root_table_list.tables[table_index].pointer) {
366 458
367 status = acpi_tb_get_table_ptr(table_type, instance, &tbl_ptr); 459 /* Table is not mapped, map it */
368 if (ACPI_FAILURE(status)) {
369 return_ACPI_STATUS(status);
370 }
371 460
372 /* The function will return a NULL pointer if the table is not loaded */ 461 status =
373 462 acpi_tb_verify_table(&acpi_gbl_root_table_list.
374 if (tbl_ptr == NULL) { 463 tables[table_index]);
375 return_ACPI_STATUS(AE_NOT_EXIST); 464 if (ACPI_FAILURE(status)) {
465 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
466 return_ACPI_STATUS(status);
467 }
376 } 468 }
377 469
378 /* Copy the header to the caller's buffer */ 470 *table = acpi_gbl_root_table_list.tables[table_index].pointer;
379 471 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
380 ACPI_MEMCPY(ACPI_CAST_PTR(void, out_table_header), 472 return_ACPI_STATUS(AE_OK);
381 ACPI_CAST_PTR(void, tbl_ptr),
382 sizeof(struct acpi_table_header));
383
384 return_ACPI_STATUS(status);
385} 473}
386 474
387ACPI_EXPORT_SYMBOL(acpi_get_table_header) 475ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
388#endif /* ACPI_FUTURE_USAGE */
389 476
390/******************************************************************************* 477/*******************************************************************************
391 * 478 *
392 * FUNCTION: acpi_get_table 479 * FUNCTION: acpi_tb_load_namespace
393 * 480 *
394 * PARAMETERS: table_type - one of the defined table types 481 * PARAMETERS: None
395 * Instance - the non zero instance of the table, allows
396 * support for multiple tables of the same type
397 * see acpi_gbl_acpi_table_flag
398 * ret_buffer - pointer to a structure containing a buffer to
399 * receive the table
400 * 482 *
401 * RETURN: Status 483 * RETURN: Status
402 * 484 *
403 * DESCRIPTION: This function is called to get an ACPI table. The caller 485 * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
404 * supplies an out_buffer large enough to contain the entire ACPI 486 * the RSDT/XSDT.
405 * table. The caller should call the acpi_get_table_header function
406 * first to determine the buffer size needed. Upon completion
407 * the out_buffer->Length field will indicate the number of bytes
408 * copied into the out_buffer->buf_ptr buffer. This table will be
409 * a complete table including the header.
410 * 487 *
411 ******************************************************************************/ 488 ******************************************************************************/
412acpi_status 489static acpi_status acpi_tb_load_namespace(void)
413acpi_get_table(acpi_table_type table_type,
414 u32 instance, struct acpi_buffer *ret_buffer)
415{ 490{
416 struct acpi_table_header *tbl_ptr;
417 acpi_status status; 491 acpi_status status;
418 acpi_size table_length; 492 struct acpi_table_header *table;
493 acpi_native_uint i;
419 494
420 ACPI_FUNCTION_TRACE(acpi_get_table); 495 ACPI_FUNCTION_TRACE(tb_load_namespace);
421 496
422 /* Parameter validation */ 497 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
423 498
424 if (instance == 0) { 499 /*
425 return_ACPI_STATUS(AE_BAD_PARAMETER); 500 * Load the namespace. The DSDT is required, but any SSDT and PSDT tables
501 * are optional.
502 */
503 if (!acpi_gbl_root_table_list.count ||
504 !ACPI_COMPARE_NAME(&
505 (acpi_gbl_root_table_list.
506 tables[ACPI_TABLE_INDEX_DSDT].signature),
507 ACPI_SIG_DSDT)
508 ||
509 ACPI_FAILURE(acpi_tb_verify_table
510 (&acpi_gbl_root_table_list.
511 tables[ACPI_TABLE_INDEX_DSDT]))) {
512 status = AE_NO_ACPI_TABLES;
513 goto unlock_and_exit;
426 } 514 }
427 515
428 status = acpi_ut_validate_buffer(ret_buffer); 516 /*
429 if (ACPI_FAILURE(status)) { 517 * Find DSDT table
430 return_ACPI_STATUS(status); 518 */
519 status =
520 acpi_os_table_override(acpi_gbl_root_table_list.
521 tables[ACPI_TABLE_INDEX_DSDT].pointer,
522 &table);
523 if (ACPI_SUCCESS(status) && table) {
524 /*
525 * DSDT table has been found
526 */
527 acpi_tb_delete_table(&acpi_gbl_root_table_list.
528 tables[ACPI_TABLE_INDEX_DSDT]);
529 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer =
530 table;
531 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].length =
532 table->length;
533 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].flags =
534 ACPI_TABLE_ORIGIN_UNKNOWN;
535
536 ACPI_INFO((AE_INFO, "Table DSDT replaced by host OS"));
537 acpi_tb_print_table_header(0, table);
431 } 538 }
432 539
433 /* Check the table type and instance */ 540 status =
541 acpi_tb_verify_table(&acpi_gbl_root_table_list.
542 tables[ACPI_TABLE_INDEX_DSDT]);
543 if (ACPI_FAILURE(status)) {
434 544
435 if ((table_type > ACPI_TABLE_ID_MAX) || 545 /* A valid DSDT is required */
436 (ACPI_IS_SINGLE_TABLE(acpi_gbl_table_data[table_type].flags) && 546
437 instance > 1)) { 547 status = AE_NO_ACPI_TABLES;
438 return_ACPI_STATUS(AE_BAD_PARAMETER); 548 goto unlock_and_exit;
439 } 549 }
440 550
441 /* Get a pointer to the entire table */ 551 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
442 552
443 status = acpi_tb_get_table_ptr(table_type, instance, &tbl_ptr); 553 /*
554 * Load and parse tables.
555 */
556 status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
444 if (ACPI_FAILURE(status)) { 557 if (ACPI_FAILURE(status)) {
445 return_ACPI_STATUS(status); 558 return_ACPI_STATUS(status);
446 } 559 }
447 560
448 /* 561 /*
449 * acpi_tb_get_table_ptr will return a NULL pointer if the 562 * Load any SSDT or PSDT tables. Note: Loop leaves tables locked
450 * table is not loaded.
451 */ 563 */
452 if (tbl_ptr == NULL) { 564 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
453 return_ACPI_STATUS(AE_NOT_EXIST); 565 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
566 if ((!ACPI_COMPARE_NAME
567 (&(acpi_gbl_root_table_list.tables[i].signature),
568 ACPI_SIG_SSDT)
569 &&
570 !ACPI_COMPARE_NAME(&
571 (acpi_gbl_root_table_list.tables[i].
572 signature), ACPI_SIG_PSDT))
573 ||
574 ACPI_FAILURE(acpi_tb_verify_table
575 (&acpi_gbl_root_table_list.tables[i]))) {
576 continue;
577 }
578
579 /* Ignore errors while loading tables, get as many as possible */
580
581 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
582 (void)acpi_ns_load_table(i, acpi_gbl_root_node);
583 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
454 } 584 }
455 585
456 /* Get the table length */ 586 ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
457 587
458 if (table_type == ACPI_TABLE_ID_RSDP) { 588 unlock_and_exit:
589 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
590 return_ACPI_STATUS(status);
591}
459 592
460 /* RSD PTR is the only "table" without a header */ 593/*******************************************************************************
594 *
595 * FUNCTION: acpi_load_tables
596 *
597 * PARAMETERS: None
598 *
599 * RETURN: Status
600 *
601 * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
602 *
603 ******************************************************************************/
461 604
462 table_length = sizeof(struct rsdp_descriptor); 605acpi_status acpi_load_tables(void)
463 } else { 606{
464 table_length = (acpi_size) tbl_ptr->length; 607 acpi_status status;
465 }
466 608
467 /* Validate/Allocate/Clear caller buffer */ 609 ACPI_FUNCTION_TRACE(acpi_load_tables);
468 610
469 status = acpi_ut_initialize_buffer(ret_buffer, table_length); 611 /*
612 * Load the namespace from the tables
613 */
614 status = acpi_tb_load_namespace();
470 if (ACPI_FAILURE(status)) { 615 if (ACPI_FAILURE(status)) {
471 return_ACPI_STATUS(status); 616 ACPI_EXCEPTION((AE_INFO, status,
617 "While loading namespace from ACPI tables"));
472 } 618 }
473 619
474 /* Copy the table to the buffer */ 620 return_ACPI_STATUS(status);
475
476 ACPI_MEMCPY(ACPI_CAST_PTR(void, ret_buffer->pointer),
477 ACPI_CAST_PTR(void, tbl_ptr), table_length);
478
479 return_ACPI_STATUS(AE_OK);
480} 621}
481 622
482ACPI_EXPORT_SYMBOL(acpi_get_table) 623ACPI_EXPORT_SYMBOL(acpi_load_tables)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index da2648bbdbc0..cf8fa514189f 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -48,16 +48,15 @@
48ACPI_MODULE_NAME("tbxfroot") 48ACPI_MODULE_NAME("tbxfroot")
49 49
50/* Local prototypes */ 50/* Local prototypes */
51static acpi_status
52acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags);
53
54static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length); 51static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
55 52
53static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
54
56/******************************************************************************* 55/*******************************************************************************
57 * 56 *
58 * FUNCTION: acpi_tb_validate_rsdp 57 * FUNCTION: acpi_tb_validate_rsdp
59 * 58 *
60 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP 59 * PARAMETERS: Rsdp - Pointer to unvalidated RSDP
61 * 60 *
62 * RETURN: Status 61 * RETURN: Status
63 * 62 *
@@ -65,14 +64,18 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
65 * 64 *
66 ******************************************************************************/ 65 ******************************************************************************/
67 66
68acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp) 67static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
69{ 68{
70 ACPI_FUNCTION_ENTRY(); 69 ACPI_FUNCTION_ENTRY();
71 70
72 /* 71 /*
73 * The signature and checksum must both be correct 72 * The signature and checksum must both be correct
73 *
74 * Note: Sometimes there exists more than one RSDP in memory; the valid
75 * RSDP has a valid checksum, all others have an invalid checksum.
74 */ 76 */
75 if (ACPI_STRNCMP((char *)rsdp, RSDP_SIG, sizeof(RSDP_SIG) - 1) != 0) { 77 if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)
78 != 0) {
76 79
77 /* Nope, BAD Signature */ 80 /* Nope, BAD Signature */
78 81
@@ -81,14 +84,14 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
81 84
82 /* Check the standard checksum */ 85 /* Check the standard checksum */
83 86
84 if (acpi_tb_sum_table(rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) { 87 if (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_CHECKSUM_LENGTH) != 0) {
85 return (AE_BAD_CHECKSUM); 88 return (AE_BAD_CHECKSUM);
86 } 89 }
87 90
88 /* Check extended checksum if table version >= 2 */ 91 /* Check extended checksum if table version >= 2 */
89 92
90 if ((rsdp->revision >= 2) && 93 if ((rsdp->revision >= 2) &&
91 (acpi_tb_sum_table(rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) { 94 (acpi_tb_checksum((u8 *) rsdp, ACPI_RSDP_XCHECKSUM_LENGTH) != 0)) {
92 return (AE_BAD_CHECKSUM); 95 return (AE_BAD_CHECKSUM);
93 } 96 }
94 97
@@ -97,314 +100,123 @@ acpi_status acpi_tb_validate_rsdp(struct rsdp_descriptor *rsdp)
97 100
98/******************************************************************************* 101/*******************************************************************************
99 * 102 *
100 * FUNCTION: acpi_tb_find_table 103 * FUNCTION: acpi_tb_find_rsdp
101 *
102 * PARAMETERS: Signature - String with ACPI table signature
103 * oem_id - String with the table OEM ID
104 * oem_table_id - String with the OEM Table ID
105 * table_ptr - Where the table pointer is returned
106 *
107 * RETURN: Status
108 * 104 *
109 * DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the 105 * PARAMETERS: table_address - Where the table pointer is returned
110 * Signature, OEM ID and OEM Table ID.
111 * 106 *
112 ******************************************************************************/ 107 * RETURN: Status, RSDP physical address
113
114acpi_status
115acpi_tb_find_table(char *signature,
116 char *oem_id,
117 char *oem_table_id, struct acpi_table_header ** table_ptr)
118{
119 acpi_status status;
120 struct acpi_table_header *table;
121
122 ACPI_FUNCTION_TRACE(tb_find_table);
123
124 /* Validate string lengths */
125
126 if ((ACPI_STRLEN(signature) > ACPI_NAME_SIZE) ||
127 (ACPI_STRLEN(oem_id) > sizeof(table->oem_id)) ||
128 (ACPI_STRLEN(oem_table_id) > sizeof(table->oem_table_id))) {
129 return_ACPI_STATUS(AE_AML_STRING_LIMIT);
130 }
131
132 if (ACPI_COMPARE_NAME(signature, DSDT_SIG)) {
133 /*
134 * The DSDT pointer is contained in the FADT, not the RSDT.
135 * This code should suffice, because the only code that would perform
136 * a "find" on the DSDT is the data_table_region() AML opcode -- in
137 * which case, the DSDT is guaranteed to be already loaded.
138 * If this becomes insufficient, the FADT will have to be found first.
139 */
140 if (!acpi_gbl_DSDT) {
141 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
142 }
143 table = acpi_gbl_DSDT;
144 } else {
145 /* Find the table */
146
147 status = acpi_get_firmware_table(signature, 1,
148 ACPI_LOGICAL_ADDRESSING,
149 &table);
150 if (ACPI_FAILURE(status)) {
151 return_ACPI_STATUS(status);
152 }
153 }
154
155 /* Check oem_id and oem_table_id */
156
157 if ((oem_id[0] &&
158 ACPI_STRNCMP(oem_id, table->oem_id,
159 sizeof(table->oem_id))) ||
160 (oem_table_id[0] &&
161 ACPI_STRNCMP(oem_table_id, table->oem_table_id,
162 sizeof(table->oem_table_id)))) {
163 return_ACPI_STATUS(AE_AML_NAME_NOT_FOUND);
164 }
165
166 ACPI_DEBUG_PRINT((ACPI_DB_TABLES, "Found table [%4.4s]\n",
167 table->signature));
168
169 *table_ptr = table;
170 return_ACPI_STATUS(AE_OK);
171}
172
173/*******************************************************************************
174 *
175 * FUNCTION: acpi_get_firmware_table
176 * 108 *
177 * PARAMETERS: Signature - Any ACPI table signature 109 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
178 * Instance - the non zero instance of the table, allows 110 * pointer structure. If it is found, set *RSDP to point to it.
179 * support for multiple tables of the same type
180 * Flags - Physical/Virtual support
181 * table_pointer - Where a buffer containing the table is
182 * returned
183 * 111 *
184 * RETURN: Status 112 * NOTE1: The RSDP must be either in the first 1_k of the Extended
113 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
114 * Only a 32-bit physical address is necessary.
185 * 115 *
186 * DESCRIPTION: This function is called to get an ACPI table. A buffer is 116 * NOTE2: This function is always available, regardless of the
187 * allocated for the table and returned in table_pointer. 117 * initialization state of the rest of ACPI.
188 * This table will be a complete table including the header.
189 * 118 *
190 ******************************************************************************/ 119 ******************************************************************************/
191 120
192acpi_status 121acpi_status acpi_find_root_pointer(acpi_native_uint * table_address)
193acpi_get_firmware_table(acpi_string signature,
194 u32 instance,
195 u32 flags, struct acpi_table_header **table_pointer)
196{ 122{
197 acpi_status status; 123 u8 *table_ptr;
198 struct acpi_pointer address; 124 u8 *mem_rover;
199 struct acpi_table_header *header = NULL; 125 u32 physical_address;
200 struct acpi_table_desc *table_info = NULL;
201 struct acpi_table_desc *rsdt_info;
202 u32 table_count;
203 u32 i;
204 u32 j;
205
206 ACPI_FUNCTION_TRACE(acpi_get_firmware_table);
207
208 /*
209 * Ensure that at least the table manager is initialized. We don't
210 * require that the entire ACPI subsystem is up for this interface.
211 * If we have a buffer, we must have a length too
212 */
213 if ((instance == 0) || (!signature) || (!table_pointer)) {
214 return_ACPI_STATUS(AE_BAD_PARAMETER);
215 }
216
217 /* Ensure that we have a RSDP */
218
219 if (!acpi_gbl_RSDP) {
220
221 /* Get the RSDP */
222
223 status = acpi_os_get_root_pointer(flags, &address);
224 if (ACPI_FAILURE(status)) {
225 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "RSDP not found\n"));
226 return_ACPI_STATUS(AE_NO_ACPI_TABLES);
227 }
228
229 /* Map and validate the RSDP */
230
231 if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
232 status = acpi_os_map_memory(address.pointer.physical,
233 sizeof(struct
234 rsdp_descriptor),
235 (void *)&acpi_gbl_RSDP);
236 if (ACPI_FAILURE(status)) {
237 return_ACPI_STATUS(status);
238 }
239 } else {
240 acpi_gbl_RSDP = address.pointer.logical;
241 }
242
243 /* The RDSP signature and checksum must both be correct */
244
245 status = acpi_tb_validate_rsdp(acpi_gbl_RSDP);
246 if (ACPI_FAILURE(status)) {
247 return_ACPI_STATUS(status);
248 }
249 }
250
251 /* Get the RSDT address via the RSDP */
252
253 acpi_tb_get_rsdt_address(&address);
254 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
255 "RSDP located at %p, RSDT physical=%8.8X%8.8X\n",
256 acpi_gbl_RSDP,
257 ACPI_FORMAT_UINT64(address.pointer.value)));
258 126
259 /* Insert processor_mode flags */ 127 ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
260 128
261 address.pointer_type |= flags; 129 /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
262 130
263 /* Get and validate the RSDT */ 131 table_ptr = acpi_os_map_memory((acpi_physical_address)
132 ACPI_EBDA_PTR_LOCATION,
133 ACPI_EBDA_PTR_LENGTH);
134 if (!table_ptr) {
135 ACPI_ERROR((AE_INFO,
136 "Could not map memory at %8.8X for length %X",
137 ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
264 138
265 rsdt_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_table_desc));
266 if (!rsdt_info) {
267 return_ACPI_STATUS(AE_NO_MEMORY); 139 return_ACPI_STATUS(AE_NO_MEMORY);
268 } 140 }
269 141
270 status = acpi_tb_get_table(&address, rsdt_info); 142 ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
271 if (ACPI_FAILURE(status)) {
272 goto cleanup;
273 }
274
275 status = acpi_tb_validate_rsdt(rsdt_info->pointer);
276 if (ACPI_FAILURE(status)) {
277 goto cleanup;
278 }
279 143
280 /* Allocate a scratch table header and table descriptor */ 144 /* Convert segment part to physical address */
281 145
282 header = ACPI_ALLOCATE(sizeof(struct acpi_table_header)); 146 physical_address <<= 4;
283 if (!header) { 147 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
284 status = AE_NO_MEMORY;
285 goto cleanup;
286 }
287 148
288 table_info = ACPI_ALLOCATE(sizeof(struct acpi_table_desc)); 149 /* EBDA present? */
289 if (!table_info) {
290 status = AE_NO_MEMORY;
291 goto cleanup;
292 }
293 150
294 /* Get the number of table pointers within the RSDT */ 151 if (physical_address > 0x400) {
295
296 table_count =
297 acpi_tb_get_table_count(acpi_gbl_RSDP, rsdt_info->pointer);
298 address.pointer_type = acpi_gbl_table_flags | flags;
299
300 /*
301 * Search the RSDT/XSDT for the correct instance of the
302 * requested table
303 */
304 for (i = 0, j = 0; i < table_count; i++) {
305 /* 152 /*
306 * Get the next table pointer, handle RSDT vs. XSDT 153 * 1b) Search EBDA paragraphs (EBDA is required to be a
307 * RSDT pointers are 32 bits, XSDT pointers are 64 bits 154 * minimum of 1_k length)
308 */ 155 */
309 if (acpi_gbl_root_table_type == ACPI_TABLE_TYPE_RSDT) { 156 table_ptr = acpi_os_map_memory((acpi_native_uint)
310 address.pointer.value = 157 physical_address,
311 (ACPI_CAST_PTR 158 ACPI_EBDA_WINDOW_SIZE);
312 (struct rsdt_descriptor, 159 if (!table_ptr) {
313 rsdt_info->pointer))->table_offset_entry[i]; 160 ACPI_ERROR((AE_INFO,
314 } else { 161 "Could not map memory at %8.8X for length %X",
315 address.pointer.value = 162 physical_address, ACPI_EBDA_WINDOW_SIZE));
316 (ACPI_CAST_PTR
317 (struct xsdt_descriptor,
318 rsdt_info->pointer))->table_offset_entry[i];
319 }
320
321 /* Get the table header */
322 163
323 status = acpi_tb_get_table_header(&address, header); 164 return_ACPI_STATUS(AE_NO_MEMORY);
324 if (ACPI_FAILURE(status)) {
325 goto cleanup;
326 } 165 }
327 166
328 /* Compare table signatures and table instance */ 167 mem_rover =
329 168 acpi_tb_scan_memory_for_rsdp(table_ptr,
330 if (ACPI_COMPARE_NAME(header->signature, signature)) { 169 ACPI_EBDA_WINDOW_SIZE);
331 170 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
332 /* An instance of the table was found */
333 171
334 j++; 172 if (mem_rover) {
335 if (j >= instance) {
336 173
337 /* Found the correct instance, get the entire table */ 174 /* Return the physical address */
338 175
339 status = 176 physical_address +=
340 acpi_tb_get_table_body(&address, header, 177 (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
341 table_info);
342 if (ACPI_FAILURE(status)) {
343 goto cleanup;
344 }
345 178
346 *table_pointer = table_info->pointer; 179 *table_address = physical_address;
347 goto cleanup; 180 return_ACPI_STATUS(AE_OK);
348 }
349 } 181 }
350 } 182 }
351 183
352 /* Did not find the table */ 184 /*
185 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
186 */
187 table_ptr = acpi_os_map_memory((acpi_physical_address)
188 ACPI_HI_RSDP_WINDOW_BASE,
189 ACPI_HI_RSDP_WINDOW_SIZE);
353 190
354 status = AE_NOT_EXIST; 191 if (!table_ptr) {
192 ACPI_ERROR((AE_INFO,
193 "Could not map memory at %8.8X for length %X",
194 ACPI_HI_RSDP_WINDOW_BASE,
195 ACPI_HI_RSDP_WINDOW_SIZE));
355 196
356 cleanup: 197 return_ACPI_STATUS(AE_NO_MEMORY);
357 if (rsdt_info->pointer) {
358 acpi_os_unmap_memory(rsdt_info->pointer,
359 (acpi_size) rsdt_info->pointer->length);
360 } 198 }
361 ACPI_FREE(rsdt_info);
362 199
363 if (header) { 200 mem_rover =
364 ACPI_FREE(header); 201 acpi_tb_scan_memory_for_rsdp(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
365 } 202 acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
366 if (table_info) {
367 ACPI_FREE(table_info);
368 }
369 return_ACPI_STATUS(status);
370}
371 203
372ACPI_EXPORT_SYMBOL(acpi_get_firmware_table) 204 if (mem_rover) {
373 205
374/* TBD: Move to a new file */ 206 /* Return the physical address */
375#if ACPI_MACHINE_WIDTH != 16
376/*******************************************************************************
377 *
378 * FUNCTION: acpi_find_root_pointer
379 *
380 * PARAMETERS: Flags - Logical/Physical addressing
381 * rsdp_address - Where to place the RSDP address
382 *
383 * RETURN: Status, Physical address of the RSDP
384 *
385 * DESCRIPTION: Find the RSDP
386 *
387 ******************************************************************************/
388acpi_status acpi_find_root_pointer(u32 flags, struct acpi_pointer *rsdp_address)
389{
390 struct acpi_table_desc table_info;
391 acpi_status status;
392
393 ACPI_FUNCTION_TRACE(acpi_find_root_pointer);
394
395 /* Get the RSDP */
396 207
397 status = acpi_tb_find_rsdp(&table_info, flags); 208 physical_address = (u32)
398 if (ACPI_FAILURE(status)) { 209 (ACPI_HI_RSDP_WINDOW_BASE +
399 ACPI_EXCEPTION((AE_INFO, status, 210 ACPI_PTR_DIFF(mem_rover, table_ptr));
400 "RSDP structure not found - Flags=%X", flags));
401 211
402 return_ACPI_STATUS(AE_NO_ACPI_TABLES); 212 *table_address = physical_address;
213 return_ACPI_STATUS(AE_OK);
403 } 214 }
404 215
405 rsdp_address->pointer_type = ACPI_PHYSICAL_POINTER; 216 /* A valid RSDP was not found */
406 rsdp_address->pointer.physical = table_info.physical_address; 217
407 return_ACPI_STATUS(AE_OK); 218 ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
219 return_ACPI_STATUS(AE_NOT_FOUND);
408} 220}
409 221
410ACPI_EXPORT_SYMBOL(acpi_find_root_pointer) 222ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
@@ -440,7 +252,7 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
440 252
441 status = 253 status =
442 acpi_tb_validate_rsdp(ACPI_CAST_PTR 254 acpi_tb_validate_rsdp(ACPI_CAST_PTR
443 (struct rsdp_descriptor, mem_rover)); 255 (struct acpi_table_rsdp, mem_rover));
444 if (ACPI_SUCCESS(status)) { 256 if (ACPI_SUCCESS(status)) {
445 257
446 /* Sig and checksum valid, we have found a real RSDP */ 258 /* Sig and checksum valid, we have found a real RSDP */
@@ -461,189 +273,3 @@ static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
461 start_address)); 273 start_address));
462 return_PTR(NULL); 274 return_PTR(NULL);
463} 275}
464
465/*******************************************************************************
466 *
467 * FUNCTION: acpi_tb_find_rsdp
468 *
469 * PARAMETERS: table_info - Where the table info is returned
470 * Flags - Current memory mode (logical vs.
471 * physical addressing)
472 *
473 * RETURN: Status, RSDP physical address
474 *
475 * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
476 * pointer structure. If it is found, set *RSDP to point to it.
477 *
478 * NOTE1: The RSDP must be either in the first 1_k of the Extended
479 * BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
480 * Only a 32-bit physical address is necessary.
481 *
482 * NOTE2: This function is always available, regardless of the
483 * initialization state of the rest of ACPI.
484 *
485 ******************************************************************************/
486
487static acpi_status
488acpi_tb_find_rsdp(struct acpi_table_desc *table_info, u32 flags)
489{
490 u8 *table_ptr;
491 u8 *mem_rover;
492 u32 physical_address;
493 acpi_status status;
494
495 ACPI_FUNCTION_TRACE(tb_find_rsdp);
496
497 /*
498 * Scan supports either logical addressing or physical addressing
499 */
500 if ((flags & ACPI_MEMORY_MODE) == ACPI_LOGICAL_ADDRESSING) {
501
502 /* 1a) Get the location of the Extended BIOS Data Area (EBDA) */
503
504 status = acpi_os_map_memory((acpi_physical_address)
505 ACPI_EBDA_PTR_LOCATION,
506 ACPI_EBDA_PTR_LENGTH,
507 (void *)&table_ptr);
508 if (ACPI_FAILURE(status)) {
509 ACPI_ERROR((AE_INFO,
510 "Could not map memory at %8.8X for length %X",
511 ACPI_EBDA_PTR_LOCATION,
512 ACPI_EBDA_PTR_LENGTH));
513
514 return_ACPI_STATUS(status);
515 }
516
517 ACPI_MOVE_16_TO_32(&physical_address, table_ptr);
518
519 /* Convert segment part to physical address */
520
521 physical_address <<= 4;
522 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_PTR_LENGTH);
523
524 /* EBDA present? */
525
526 if (physical_address > 0x400) {
527 /*
528 * 1b) Search EBDA paragraphs (EBDA is required to be a
529 * minimum of 1_k length)
530 */
531 status = acpi_os_map_memory((acpi_physical_address)
532 physical_address,
533 ACPI_EBDA_WINDOW_SIZE,
534 (void *)&table_ptr);
535 if (ACPI_FAILURE(status)) {
536 ACPI_ERROR((AE_INFO,
537 "Could not map memory at %8.8X for length %X",
538 physical_address,
539 ACPI_EBDA_WINDOW_SIZE));
540
541 return_ACPI_STATUS(status);
542 }
543
544 mem_rover = acpi_tb_scan_memory_for_rsdp(table_ptr,
545 ACPI_EBDA_WINDOW_SIZE);
546 acpi_os_unmap_memory(table_ptr, ACPI_EBDA_WINDOW_SIZE);
547
548 if (mem_rover) {
549
550 /* Return the physical address */
551
552 physical_address +=
553 (u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
554
555 table_info->physical_address =
556 (acpi_physical_address) physical_address;
557 return_ACPI_STATUS(AE_OK);
558 }
559 }
560
561 /*
562 * 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh
563 */
564 status = acpi_os_map_memory((acpi_physical_address)
565 ACPI_HI_RSDP_WINDOW_BASE,
566 ACPI_HI_RSDP_WINDOW_SIZE,
567 (void *)&table_ptr);
568
569 if (ACPI_FAILURE(status)) {
570 ACPI_ERROR((AE_INFO,
571 "Could not map memory at %8.8X for length %X",
572 ACPI_HI_RSDP_WINDOW_BASE,
573 ACPI_HI_RSDP_WINDOW_SIZE));
574
575 return_ACPI_STATUS(status);
576 }
577
578 mem_rover =
579 acpi_tb_scan_memory_for_rsdp(table_ptr,
580 ACPI_HI_RSDP_WINDOW_SIZE);
581 acpi_os_unmap_memory(table_ptr, ACPI_HI_RSDP_WINDOW_SIZE);
582
583 if (mem_rover) {
584
585 /* Return the physical address */
586
587 physical_address = (u32)
588 (ACPI_HI_RSDP_WINDOW_BASE +
589 ACPI_PTR_DIFF(mem_rover, table_ptr));
590
591 table_info->physical_address =
592 (acpi_physical_address) physical_address;
593 return_ACPI_STATUS(AE_OK);
594 }
595 }
596
597 /*
598 * Physical addressing
599 */
600 else {
601 /* 1a) Get the location of the EBDA */
602
603 ACPI_MOVE_16_TO_32(&physical_address, ACPI_EBDA_PTR_LOCATION);
604 physical_address <<= 4; /* Convert segment to physical address */
605
606 /* EBDA present? */
607
608 if (physical_address > 0x400) {
609 /*
610 * 1b) Search EBDA paragraphs (EBDA is required to be a minimum of
611 * 1_k length)
612 */
613 mem_rover =
614 acpi_tb_scan_memory_for_rsdp(ACPI_PHYSADDR_TO_PTR
615 (physical_address),
616 ACPI_EBDA_WINDOW_SIZE);
617 if (mem_rover) {
618
619 /* Return the physical address */
620
621 table_info->physical_address =
622 ACPI_TO_INTEGER(mem_rover);
623 return_ACPI_STATUS(AE_OK);
624 }
625 }
626
627 /* 2) Search upper memory: 16-byte boundaries in E0000h-FFFFFh */
628
629 mem_rover =
630 acpi_tb_scan_memory_for_rsdp(ACPI_PHYSADDR_TO_PTR
631 (ACPI_HI_RSDP_WINDOW_BASE),
632 ACPI_HI_RSDP_WINDOW_SIZE);
633 if (mem_rover) {
634
635 /* Found it, return the physical address */
636
637 table_info->physical_address =
638 ACPI_TO_INTEGER(mem_rover);
639 return_ACPI_STATUS(AE_OK);
640 }
641 }
642
643 /* A valid RSDP was not found */
644
645 ACPI_ERROR((AE_INFO, "No valid RSDP was found"));
646 return_ACPI_STATUS(AE_NOT_FOUND);
647}
648
649#endif
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 40ddb4dd9631..f76d3168c2b2 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -82,7 +82,7 @@ MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.\n");
82 82
83static int acpi_thermal_add(struct acpi_device *device); 83static int acpi_thermal_add(struct acpi_device *device);
84static int acpi_thermal_remove(struct acpi_device *device, int type); 84static int acpi_thermal_remove(struct acpi_device *device, int type);
85static int acpi_thermal_resume(struct acpi_device *device, int state); 85static int acpi_thermal_resume(struct acpi_device *device);
86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); 86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); 87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); 88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
@@ -1353,7 +1353,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1353 return 0; 1353 return 0;
1354} 1354}
1355 1355
1356static int acpi_thermal_resume(struct acpi_device *device, int state) 1356static int acpi_thermal_resume(struct acpi_device *device)
1357{ 1357{
1358 struct acpi_thermal *tz = NULL; 1358 struct acpi_thermal *tz = NULL;
1359 int i; 1359 int i;
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index f6cbc0b1bfd0..55a764807499 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -42,6 +42,7 @@
42 */ 42 */
43 43
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acdebug.h>
45 46
46#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utalloc") 48ACPI_MODULE_NAME("utalloc")
@@ -142,6 +143,14 @@ acpi_status acpi_ut_create_caches(void)
142 143
143acpi_status acpi_ut_delete_caches(void) 144acpi_status acpi_ut_delete_caches(void)
144{ 145{
146#ifdef ACPI_DBG_TRACK_ALLOCATIONS
147 char buffer[7];
148
149 if (acpi_gbl_display_final_mem_stats) {
150 ACPI_STRCPY(buffer, "MEMORY");
151 acpi_db_display_statistics(buffer);
152 }
153#endif
145 154
146 (void)acpi_os_delete_cache(acpi_gbl_namespace_cache); 155 (void)acpi_os_delete_cache(acpi_gbl_namespace_cache);
147 acpi_gbl_namespace_cache = NULL; 156 acpi_gbl_namespace_cache = NULL;
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 1a1f8109159c..870f6edeb5f2 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -289,6 +289,14 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
289 289
290 ACPI_MEM_TRACKING(cache->total_allocated++); 290 ACPI_MEM_TRACKING(cache->total_allocated++);
291 291
292#ifdef ACPI_DBG_TRACK_ALLOCATIONS
293 if ((cache->total_allocated - cache->total_freed) >
294 cache->max_occupied) {
295 cache->max_occupied =
296 cache->total_allocated - cache->total_freed;
297 }
298#endif
299
292 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */ 300 /* Avoid deadlock with ACPI_ALLOCATE_ZEROED */
293 301
294 status = acpi_ut_release_mutex(ACPI_MTX_CACHES); 302 status = acpi_ut_release_mutex(ACPI_MTX_CACHES);
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 5e1a80d1bc36..84d529db0a66 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -719,6 +719,15 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
719 acpi_ut_add_reference(source_desc->reference.object); 719 acpi_ut_add_reference(source_desc->reference.object);
720 break; 720 break;
721 721
722 case ACPI_TYPE_REGION:
723 /*
724 * We copied the Region Handler, so we now must add a reference
725 */
726 if (dest_desc->region.handler) {
727 acpi_ut_add_reference(dest_desc->region.handler);
728 }
729 break;
730
722 default: 731 default:
723 /* Nothing to do for other simple objects */ 732 /* Nothing to do for other simple objects */
724 break; 733 break;
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 9e9054e155c1..61ad4f2daee2 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -181,8 +181,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
181 if (ACPI_LV_THREADS & acpi_dbg_level) { 181 if (ACPI_LV_THREADS & acpi_dbg_level) {
182 acpi_os_printf 182 acpi_os_printf
183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n", 183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
184 (unsigned long) acpi_gbl_prev_thread_id, 184 (unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id);
185 (unsigned long) thread_id);
186 } 185 }
187 186
188 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
@@ -195,7 +194,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
195 acpi_os_printf("%8s-%04ld ", module_name, line_number); 194 acpi_os_printf("%8s-%04ld ", module_name, line_number);
196 195
197 if (ACPI_LV_THREADS & acpi_dbg_level) { 196 if (ACPI_LV_THREADS & acpi_dbg_level) {
198 acpi_os_printf("[%04lX] ", thread_id); 197 acpi_os_printf("[%04lX] ", (unsigned long)thread_id);
199 } 198 }
200 199
201 acpi_os_printf("[%02ld] %-22.22s: ", 200 acpi_os_printf("[%02ld] %-22.22s: ",
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 9d3f1149ba21..f777cebdc46d 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -158,16 +158,20 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
158 "***** Mutex %p, OS Mutex %p\n", 158 "***** Mutex %p, OS Mutex %p\n",
159 object, object->mutex.os_mutex)); 159 object, object->mutex.os_mutex));
160 160
161 if (object->mutex.os_mutex != ACPI_GLOBAL_LOCK) { 161 if (object->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
162 acpi_ex_unlink_mutex(object); 162
163 acpi_os_delete_mutex(object->mutex.os_mutex); 163 /* Global Lock has extra semaphore */
164 } else {
165 /* Global Lock "mutex" is actually a counting semaphore */
166 164
167 (void) 165 (void)
168 acpi_os_delete_semaphore 166 acpi_os_delete_semaphore
169 (acpi_gbl_global_lock_semaphore); 167 (acpi_gbl_global_lock_semaphore);
170 acpi_gbl_global_lock_semaphore = NULL; 168 acpi_gbl_global_lock_semaphore = NULL;
169
170 acpi_os_delete_mutex(object->mutex.os_mutex);
171 acpi_gbl_global_lock_mutex = NULL;
172 } else {
173 acpi_ex_unlink_mutex(object);
174 acpi_os_delete_mutex(object->mutex.os_mutex);
171 } 175 }
172 break; 176 break;
173 177
diff --git a/drivers/acpi/utilities/uteval.c b/drivers/acpi/utilities/uteval.c
index d6d7121583c0..13d5879cd98b 100644
--- a/drivers/acpi/utilities/uteval.c
+++ b/drivers/acpi/utilities/uteval.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index 014030af8b50..af33358a964b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -46,89 +46,9 @@
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include <acpi/acnamesp.h> 47#include <acpi/acnamesp.h>
48 48
49ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
49#define _COMPONENT ACPI_UTILITIES 50#define _COMPONENT ACPI_UTILITIES
50ACPI_MODULE_NAME("utglobal") 51 ACPI_MODULE_NAME("utglobal")
51
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_format_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. A valid pointer is
59 * always returned.
60 *
61 * DESCRIPTION: This function translates an ACPI exception into an ASCII string.
62 *
63 ******************************************************************************/
64const char *acpi_format_exception(acpi_status status)
65{
66 acpi_status sub_status;
67 const char *exception = NULL;
68
69 ACPI_FUNCTION_ENTRY();
70
71 /*
72 * Status is composed of two parts, a "type" and an actual code
73 */
74 sub_status = (status & ~AE_CODE_MASK);
75
76 switch (status & AE_CODE_MASK) {
77 case AE_CODE_ENVIRONMENTAL:
78
79 if (sub_status <= AE_CODE_ENV_MAX) {
80 exception = acpi_gbl_exception_names_env[sub_status];
81 }
82 break;
83
84 case AE_CODE_PROGRAMMER:
85
86 if (sub_status <= AE_CODE_PGM_MAX) {
87 exception =
88 acpi_gbl_exception_names_pgm[sub_status - 1];
89 }
90 break;
91
92 case AE_CODE_ACPI_TABLES:
93
94 if (sub_status <= AE_CODE_TBL_MAX) {
95 exception =
96 acpi_gbl_exception_names_tbl[sub_status - 1];
97 }
98 break;
99
100 case AE_CODE_AML:
101
102 if (sub_status <= AE_CODE_AML_MAX) {
103 exception =
104 acpi_gbl_exception_names_aml[sub_status - 1];
105 }
106 break;
107
108 case AE_CODE_CONTROL:
109
110 if (sub_status <= AE_CODE_CTRL_MAX) {
111 exception =
112 acpi_gbl_exception_names_ctrl[sub_status - 1];
113 }
114 break;
115
116 default:
117 break;
118 }
119
120 if (!exception) {
121
122 /* Exception code was not recognized */
123
124 ACPI_ERROR((AE_INFO,
125 "Unknown exception code: 0x%8.8X", status));
126
127 exception = "UNKNOWN_STATUS_CODE";
128 }
129
130 return (ACPI_CAST_PTR(const char, exception));
131}
132 52
133/******************************************************************************* 53/*******************************************************************************
134 * 54 *
@@ -163,8 +83,6 @@ u32 acpi_gbl_startup_flags = 0;
163 83
164u8 acpi_gbl_shutdown = TRUE; 84u8 acpi_gbl_shutdown = TRUE;
165 85
166const u8 acpi_gbl_decode_to8bit[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
167
168const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = { 86const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
169 "\\_S0_", 87 "\\_S0_",
170 "\\_S1_", 88 "\\_S1_",
@@ -183,10 +101,45 @@ const char *acpi_gbl_highest_dstate_names[4] = {
183 101
184/******************************************************************************* 102/*******************************************************************************
185 * 103 *
186 * Namespace globals 104 * FUNCTION: acpi_format_exception
105 *
106 * PARAMETERS: Status - The acpi_status code to be formatted
107 *
108 * RETURN: A string containing the exception text. A valid pointer is
109 * always returned.
110 *
111 * DESCRIPTION: This function translates an ACPI exception into an ASCII string
112 * It is here instead of utxface.c so it is always present.
187 * 113 *
188 ******************************************************************************/ 114 ******************************************************************************/
189 115
116const char *acpi_format_exception(acpi_status status)
117{
118 const char *exception = NULL;
119
120 ACPI_FUNCTION_ENTRY();
121
122 exception = acpi_ut_validate_exception(status);
123 if (!exception) {
124
125 /* Exception code was not recognized */
126
127 ACPI_ERROR((AE_INFO,
128 "Unknown exception code: 0x%8.8X", status));
129
130 exception = "UNKNOWN_STATUS_CODE";
131 }
132
133 return (ACPI_CAST_PTR(const char, exception));
134}
135
136ACPI_EXPORT_SYMBOL(acpi_format_exception)
137
138/*******************************************************************************
139 *
140 * Namespace globals
141 *
142 ******************************************************************************/
190/* 143/*
191 * Predefined ACPI Names (Built-in to the Interpreter) 144 * Predefined ACPI Names (Built-in to the Interpreter)
192 * 145 *
@@ -280,53 +233,6 @@ char acpi_ut_hex_to_ascii_char(acpi_integer integer, u32 position)
280 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]); 233 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
281} 234}
282 235
283/*******************************************************************************
284 *
285 * Table name globals
286 *
287 * NOTE: This table includes ONLY the ACPI tables that the subsystem consumes.
288 * it is NOT an exhaustive list of all possible ACPI tables. All ACPI tables
289 * that are not used by the subsystem are simply ignored.
290 *
291 * Do NOT add any table to this list that is not consumed directly by this
292 * subsystem (No MADT, ECDT, SBST, etc.)
293 *
294 ******************************************************************************/
295
296struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1];
297
298struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1] = {
299 /*********** Name, Signature, Global typed pointer Signature size, Type How many allowed?, Contains valid AML? */
300
301 /* RSDP 0 */ {RSDP_NAME, RSDP_SIG, NULL, sizeof(RSDP_SIG) - 1,
302 ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}
303 ,
304 /* DSDT 1 */ {DSDT_SIG, DSDT_SIG, (void *)&acpi_gbl_DSDT,
305 sizeof(DSDT_SIG) - 1,
306 ACPI_TABLE_SECONDARY | ACPI_TABLE_SINGLE |
307 ACPI_TABLE_EXECUTABLE}
308 ,
309 /* FADT 2 */ {FADT_SIG, FADT_SIG, (void *)&acpi_gbl_FADT,
310 sizeof(FADT_SIG) - 1,
311 ACPI_TABLE_PRIMARY | ACPI_TABLE_SINGLE}
312 ,
313 /* FACS 3 */ {FACS_SIG, FACS_SIG, (void *)&acpi_gbl_FACS,
314 sizeof(FACS_SIG) - 1,
315 ACPI_TABLE_SECONDARY | ACPI_TABLE_SINGLE}
316 ,
317 /* PSDT 4 */ {PSDT_SIG, PSDT_SIG, NULL, sizeof(PSDT_SIG) - 1,
318 ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE |
319 ACPI_TABLE_EXECUTABLE}
320 ,
321 /* SSDT 5 */ {SSDT_SIG, SSDT_SIG, NULL, sizeof(SSDT_SIG) - 1,
322 ACPI_TABLE_PRIMARY | ACPI_TABLE_MULTIPLE |
323 ACPI_TABLE_EXECUTABLE}
324 ,
325 /* XSDT 6 */ {XSDT_SIG, XSDT_SIG, NULL, sizeof(RSDT_SIG) - 1,
326 ACPI_TABLE_ROOT | ACPI_TABLE_SINGLE}
327 ,
328};
329
330/****************************************************************************** 236/******************************************************************************
331 * 237 *
332 * Event and Hardware globals 238 * Event and Hardware globals
@@ -612,7 +518,7 @@ char *acpi_ut_get_node_name(void *object)
612 /* Name must be a valid ACPI name */ 518 /* Name must be a valid ACPI name */
613 519
614 if (!acpi_ut_valid_acpi_name(node->name.integer)) { 520 if (!acpi_ut_valid_acpi_name(node->name.integer)) {
615 node->name.integer = acpi_ut_repair_name(node->name.integer); 521 node->name.integer = acpi_ut_repair_name(node->name.ascii);
616 } 522 }
617 523
618 /* Return the name */ 524 /* Return the name */
@@ -751,13 +657,6 @@ void acpi_ut_init_globals(void)
751 return; 657 return;
752 } 658 }
753 659
754 /* ACPI table structure */
755
756 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
757 acpi_gbl_table_lists[i].next = NULL;
758 acpi_gbl_table_lists[i].count = 0;
759 }
760
761 /* Mutex locked flags */ 660 /* Mutex locked flags */
762 661
763 for (i = 0; i < ACPI_NUM_MUTEX; i++) { 662 for (i = 0; i < ACPI_NUM_MUTEX; i++) {
@@ -773,6 +672,7 @@ void acpi_ut_init_globals(void)
773 672
774 /* GPE support */ 673 /* GPE support */
775 674
675 acpi_gpe_count = 0;
776 acpi_gbl_gpe_xrupt_list_head = NULL; 676 acpi_gbl_gpe_xrupt_list_head = NULL;
777 acpi_gbl_gpe_fadt_blocks[0] = NULL; 677 acpi_gbl_gpe_fadt_blocks[0] = NULL;
778 acpi_gbl_gpe_fadt_blocks[1] = NULL; 678 acpi_gbl_gpe_fadt_blocks[1] = NULL;
@@ -784,25 +684,15 @@ void acpi_ut_init_globals(void)
784 acpi_gbl_exception_handler = NULL; 684 acpi_gbl_exception_handler = NULL;
785 acpi_gbl_init_handler = NULL; 685 acpi_gbl_init_handler = NULL;
786 686
787 /* Global "typed" ACPI table pointers */
788
789 acpi_gbl_RSDP = NULL;
790 acpi_gbl_XSDT = NULL;
791 acpi_gbl_FACS = NULL;
792 acpi_gbl_FADT = NULL;
793 acpi_gbl_DSDT = NULL;
794
795 /* Global Lock support */ 687 /* Global Lock support */
796 688
797 acpi_gbl_global_lock_semaphore = NULL; 689 acpi_gbl_global_lock_semaphore = NULL;
690 acpi_gbl_global_lock_mutex = NULL;
798 acpi_gbl_global_lock_acquired = FALSE; 691 acpi_gbl_global_lock_acquired = FALSE;
799 acpi_gbl_global_lock_thread_count = 0;
800 acpi_gbl_global_lock_handle = 0; 692 acpi_gbl_global_lock_handle = 0;
801 693
802 /* Miscellaneous variables */ 694 /* Miscellaneous variables */
803 695
804 acpi_gbl_table_flags = ACPI_PHYSICAL_POINTER;
805 acpi_gbl_rsdp_original_location = 0;
806 acpi_gbl_cm_single_step = FALSE; 696 acpi_gbl_cm_single_step = FALSE;
807 acpi_gbl_db_terminate_threads = FALSE; 697 acpi_gbl_db_terminate_threads = FALSE;
808 acpi_gbl_shutdown = FALSE; 698 acpi_gbl_shutdown = FALSE;
@@ -837,8 +727,13 @@ void acpi_ut_init_globals(void)
837 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX; 727 acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX;
838#endif 728#endif
839 729
730#ifdef ACPI_DBG_TRACK_ALLOCATIONS
731 acpi_gbl_display_final_mem_stats = FALSE;
732#endif
733
840 return_VOID; 734 return_VOID;
841} 735}
842 736
843ACPI_EXPORT_SYMBOL(acpi_dbg_level) 737ACPI_EXPORT_SYMBOL(acpi_dbg_level)
844ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 738ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
739ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utinit.c b/drivers/acpi/utilities/utinit.c
index ff76055eb7d6..ad3c0d0a5cf8 100644
--- a/drivers/acpi/utilities/utinit.c
+++ b/drivers/acpi/utilities/utinit.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -44,119 +44,14 @@
44#include <acpi/acpi.h> 44#include <acpi/acpi.h>
45#include <acpi/acnamesp.h> 45#include <acpi/acnamesp.h>
46#include <acpi/acevents.h> 46#include <acpi/acevents.h>
47#include <acpi/actables.h>
47 48
48#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utinit") 50ACPI_MODULE_NAME("utinit")
50 51
51/* Local prototypes */ 52/* Local prototypes */
52static void
53acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset);
54
55static void acpi_ut_terminate(void); 53static void acpi_ut_terminate(void);
56 54
57/*******************************************************************************
58 *
59 * FUNCTION: acpi_ut_fadt_register_error
60 *
61 * PARAMETERS: register_name - Pointer to string identifying register
62 * Value - Actual register contents value
63 * Offset - Byte offset in the FADT
64 *
65 * RETURN: AE_BAD_VALUE
66 *
67 * DESCRIPTION: Display failure message
68 *
69 ******************************************************************************/
70
71static void
72acpi_ut_fadt_register_error(char *register_name, u32 value, u8 offset)
73{
74
75 ACPI_WARNING((AE_INFO,
76 "Invalid FADT value %s=%X at offset %X FADT=%p",
77 register_name, value, offset, acpi_gbl_FADT));
78}
79
80/******************************************************************************
81 *
82 * FUNCTION: acpi_ut_validate_fadt
83 *
84 * PARAMETERS: None
85 *
86 * RETURN: Status
87 *
88 * DESCRIPTION: Validate various ACPI registers in the FADT
89 *
90 ******************************************************************************/
91
92acpi_status acpi_ut_validate_fadt(void)
93{
94
95 /*
96 * Verify Fixed ACPI Description Table fields,
97 * but don't abort on any problems, just display error
98 */
99 if (acpi_gbl_FADT->pm1_evt_len < 4) {
100 acpi_ut_fadt_register_error("PM1_EVT_LEN",
101 (u32) acpi_gbl_FADT->pm1_evt_len,
102 ACPI_FADT_OFFSET(pm1_evt_len));
103 }
104
105 if (!acpi_gbl_FADT->pm1_cnt_len) {
106 acpi_ut_fadt_register_error("PM1_CNT_LEN", 0,
107 ACPI_FADT_OFFSET(pm1_cnt_len));
108 }
109
110 if (!acpi_gbl_FADT->xpm1a_evt_blk.address) {
111 acpi_ut_fadt_register_error("X_PM1a_EVT_BLK", 0,
112 ACPI_FADT_OFFSET(xpm1a_evt_blk.
113 address));
114 }
115
116 if (!acpi_gbl_FADT->xpm1a_cnt_blk.address) {
117 acpi_ut_fadt_register_error("X_PM1a_CNT_BLK", 0,
118 ACPI_FADT_OFFSET(xpm1a_cnt_blk.
119 address));
120 }
121
122 if (!acpi_gbl_FADT->xpm_tmr_blk.address) {
123 acpi_ut_fadt_register_error("X_PM_TMR_BLK", 0,
124 ACPI_FADT_OFFSET(xpm_tmr_blk.
125 address));
126 }
127
128 if ((acpi_gbl_FADT->xpm2_cnt_blk.address &&
129 !acpi_gbl_FADT->pm2_cnt_len)) {
130 acpi_ut_fadt_register_error("PM2_CNT_LEN",
131 (u32) acpi_gbl_FADT->pm2_cnt_len,
132 ACPI_FADT_OFFSET(pm2_cnt_len));
133 }
134
135 if (acpi_gbl_FADT->pm_tm_len < 4) {
136 acpi_ut_fadt_register_error("PM_TM_LEN",
137 (u32) acpi_gbl_FADT->pm_tm_len,
138 ACPI_FADT_OFFSET(pm_tm_len));
139 }
140
141 /* Length of GPE blocks must be a multiple of 2 */
142
143 if (acpi_gbl_FADT->xgpe0_blk.address &&
144 (acpi_gbl_FADT->gpe0_blk_len & 1)) {
145 acpi_ut_fadt_register_error("(x)GPE0_BLK_LEN",
146 (u32) acpi_gbl_FADT->gpe0_blk_len,
147 ACPI_FADT_OFFSET(gpe0_blk_len));
148 }
149
150 if (acpi_gbl_FADT->xgpe1_blk.address &&
151 (acpi_gbl_FADT->gpe1_blk_len & 1)) {
152 acpi_ut_fadt_register_error("(x)GPE1_BLK_LEN",
153 (u32) acpi_gbl_FADT->gpe1_blk_len,
154 ACPI_FADT_OFFSET(gpe1_blk_len));
155 }
156
157 return (AE_OK);
158}
159
160/****************************************************************************** 55/******************************************************************************
161 * 56 *
162 * FUNCTION: acpi_ut_terminate 57 * FUNCTION: acpi_ut_terminate
@@ -178,7 +73,6 @@ static void acpi_ut_terminate(void)
178 73
179 ACPI_FUNCTION_TRACE(ut_terminate); 74 ACPI_FUNCTION_TRACE(ut_terminate);
180 75
181 /* Free global tables, etc. */
182 /* Free global GPE blocks and related info structures */ 76 /* Free global GPE blocks and related info structures */
183 77
184 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; 78 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
@@ -239,6 +133,10 @@ void acpi_ut_subsystem_shutdown(void)
239 133
240 acpi_ns_terminate(); 134 acpi_ns_terminate();
241 135
136 /* Delete the ACPI tables */
137
138 acpi_tb_terminate();
139
242 /* Close the globals */ 140 /* Close the globals */
243 141
244 acpi_ut_terminate(); 142 acpi_ut_terminate();
diff --git a/drivers/acpi/utilities/utmath.c b/drivers/acpi/utilities/utmath.c
index 19d74bedce27..0c56a0d20b29 100644
--- a/drivers/acpi/utilities/utmath.c
+++ b/drivers/acpi/utilities/utmath.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 6d8a8211be90..50133fffe420 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,78 @@ ACPI_MODULE_NAME("utmisc")
51 51
52/******************************************************************************* 52/*******************************************************************************
53 * 53 *
54 * FUNCTION: acpi_ut_validate_exception
55 *
56 * PARAMETERS: Status - The acpi_status code to be formatted
57 *
58 * RETURN: A string containing the exception text. NULL if exception is
59 * not valid.
60 *
61 * DESCRIPTION: This function validates and translates an ACPI exception into
62 * an ASCII string.
63 *
64 ******************************************************************************/
65const char *acpi_ut_validate_exception(acpi_status status)
66{
67 acpi_status sub_status;
68 const char *exception = NULL;
69
70 ACPI_FUNCTION_ENTRY();
71
72 /*
73 * Status is composed of two parts, a "type" and an actual code
74 */
75 sub_status = (status & ~AE_CODE_MASK);
76
77 switch (status & AE_CODE_MASK) {
78 case AE_CODE_ENVIRONMENTAL:
79
80 if (sub_status <= AE_CODE_ENV_MAX) {
81 exception = acpi_gbl_exception_names_env[sub_status];
82 }
83 break;
84
85 case AE_CODE_PROGRAMMER:
86
87 if (sub_status <= AE_CODE_PGM_MAX) {
88 exception =
89 acpi_gbl_exception_names_pgm[sub_status - 1];
90 }
91 break;
92
93 case AE_CODE_ACPI_TABLES:
94
95 if (sub_status <= AE_CODE_TBL_MAX) {
96 exception =
97 acpi_gbl_exception_names_tbl[sub_status - 1];
98 }
99 break;
100
101 case AE_CODE_AML:
102
103 if (sub_status <= AE_CODE_AML_MAX) {
104 exception =
105 acpi_gbl_exception_names_aml[sub_status - 1];
106 }
107 break;
108
109 case AE_CODE_CONTROL:
110
111 if (sub_status <= AE_CODE_CTRL_MAX) {
112 exception =
113 acpi_gbl_exception_names_ctrl[sub_status - 1];
114 }
115 break;
116
117 default:
118 break;
119 }
120
121 return (ACPI_CAST_PTR(const char, exception));
122}
123
124/*******************************************************************************
125 *
54 * FUNCTION: acpi_ut_is_aml_table 126 * FUNCTION: acpi_ut_is_aml_table
55 * 127 *
56 * PARAMETERS: Table - An ACPI table 128 * PARAMETERS: Table - An ACPI table
@@ -62,14 +134,15 @@ ACPI_MODULE_NAME("utmisc")
62 * data tables that do not contain AML code. 134 * data tables that do not contain AML code.
63 * 135 *
64 ******************************************************************************/ 136 ******************************************************************************/
137
65u8 acpi_ut_is_aml_table(struct acpi_table_header *table) 138u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
66{ 139{
67 140
68 /* These are the only tables that contain executable AML */ 141 /* These are the only tables that contain executable AML */
69 142
70 if (ACPI_COMPARE_NAME(table->signature, DSDT_SIG) || 143 if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
71 ACPI_COMPARE_NAME(table->signature, PSDT_SIG) || 144 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
72 ACPI_COMPARE_NAME(table->signature, SSDT_SIG)) { 145 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
73 return (TRUE); 146 return (TRUE);
74 } 147 }
75 148
@@ -418,7 +491,7 @@ u32 acpi_ut_dword_byte_swap(u32 value)
418void acpi_ut_set_integer_width(u8 revision) 491void acpi_ut_set_integer_width(u8 revision)
419{ 492{
420 493
421 if (revision <= 1) { 494 if (revision < 2) {
422 495
423 /* 32-bit case */ 496 /* 32-bit case */
424 497
@@ -582,26 +655,25 @@ u8 acpi_ut_valid_acpi_name(u32 name)
582 * 655 *
583 ******************************************************************************/ 656 ******************************************************************************/
584 657
585acpi_name acpi_ut_repair_name(acpi_name name) 658acpi_name acpi_ut_repair_name(char *name)
586{ 659{
587 char *name_ptr = ACPI_CAST_PTR(char, &name);
588 char new_name[ACPI_NAME_SIZE];
589 acpi_native_uint i; 660 acpi_native_uint i;
661 char new_name[ACPI_NAME_SIZE];
590 662
591 for (i = 0; i < ACPI_NAME_SIZE; i++) { 663 for (i = 0; i < ACPI_NAME_SIZE; i++) {
592 new_name[i] = name_ptr[i]; 664 new_name[i] = name[i];
593 665
594 /* 666 /*
595 * Replace a bad character with something printable, yet technically 667 * Replace a bad character with something printable, yet technically
596 * still invalid. This prevents any collisions with existing "good" 668 * still invalid. This prevents any collisions with existing "good"
597 * names in the namespace. 669 * names in the namespace.
598 */ 670 */
599 if (!acpi_ut_valid_acpi_char(name_ptr[i], i)) { 671 if (!acpi_ut_valid_acpi_char(name[i], i)) {
600 new_name[i] = '*'; 672 new_name[i] = '*';
601 } 673 }
602 } 674 }
603 675
604 return (*ACPI_CAST_PTR(u32, new_name)); 676 return (*(u32 *) new_name);
605} 677}
606 678
607/******************************************************************************* 679/*******************************************************************************
@@ -996,9 +1068,13 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
996{ 1068{
997 va_list args; 1069 va_list args;
998 1070
999 acpi_os_printf("ACPI (%s-%04d): ", module_name, line_number); 1071 /*
1072 * Removed module_name, line_number, and acpica version, not needed
1073 * for info output
1074 */
1075 acpi_os_printf("ACPI: ");
1000 1076
1001 va_start(args, format); 1077 va_start(args, format);
1002 acpi_os_vprintf(format, args); 1078 acpi_os_vprintf(format, args);
1003 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1079 acpi_os_printf("\n");
1004} 1080}
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index 180e73ceb6e2..cbad2ef5987d 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index ba7d8ac702df..4696124759e1 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index 5a2de92831d3..e8fe1ba6cc24 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utstate.c b/drivers/acpi/utilities/utstate.c
index eaa13d05c859..edcaafad0a31 100644
--- a/drivers/acpi/utilities/utstate.c
+++ b/drivers/acpi/utilities/utstate.c
@@ -5,7 +5,7 @@
5 ******************************************************************************/ 5 ******************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index 3538f69c82a1..de3276f4f468 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -5,7 +5,7 @@
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
7/* 7/*
8 * Copyright (C) 2000 - 2006, R. Byron Moore 8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
@@ -67,6 +67,7 @@ acpi_status acpi_initialize_subsystem(void)
67 67
68 ACPI_FUNCTION_TRACE(acpi_initialize_subsystem); 68 ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
69 69
70 acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
70 ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace()); 71 ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
71 72
72 /* Initialize the OS-Dependent layer */ 73 /* Initialize the OS-Dependent layer */
@@ -127,20 +128,6 @@ acpi_status acpi_enable_subsystem(u32 flags)
127 128
128 ACPI_FUNCTION_TRACE(acpi_enable_subsystem); 129 ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
129 130
130 /*
131 * We must initialize the hardware before we can enable ACPI.
132 * The values from the FADT are validated here.
133 */
134 if (!(flags & ACPI_NO_HARDWARE_INIT)) {
135 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
136 "[Init] Initializing ACPI hardware\n"));
137
138 status = acpi_hw_initialize();
139 if (ACPI_FAILURE(status)) {
140 return_ACPI_STATUS(status);
141 }
142 }
143
144 /* Enable ACPI mode */ 131 /* Enable ACPI mode */
145 132
146 if (!(flags & ACPI_NO_ACPI_ENABLE)) { 133 if (!(flags & ACPI_NO_ACPI_ENABLE)) {
@@ -398,7 +385,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
398{ 385{
399 struct acpi_system_info *info_ptr; 386 struct acpi_system_info *info_ptr;
400 acpi_status status; 387 acpi_status status;
401 u32 i;
402 388
403 ACPI_FUNCTION_TRACE(acpi_get_system_info); 389 ACPI_FUNCTION_TRACE(acpi_get_system_info);
404 390
@@ -431,9 +417,7 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
431 417
432 /* Timer resolution - 24 or 32 bits */ 418 /* Timer resolution - 24 or 32 bits */
433 419
434 if (!acpi_gbl_FADT) { 420 if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
435 info_ptr->timer_resolution = 0;
436 } else if (acpi_gbl_FADT->tmr_val_ext == 0) {
437 info_ptr->timer_resolution = 24; 421 info_ptr->timer_resolution = 24;
438 } else { 422 } else {
439 info_ptr->timer_resolution = 32; 423 info_ptr->timer_resolution = 32;
@@ -449,13 +433,6 @@ acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
449 info_ptr->debug_layer = acpi_dbg_layer; 433 info_ptr->debug_layer = acpi_dbg_layer;
450 info_ptr->debug_level = acpi_dbg_level; 434 info_ptr->debug_level = acpi_dbg_level;
451 435
452 /* Current status of the ACPI tables, per table type */
453
454 info_ptr->num_table_types = ACPI_TABLE_ID_MAX + 1;
455 for (i = 0; i < (ACPI_TABLE_ID_MAX + 1); i++) {
456 info_ptr->table_info[i].count = acpi_gbl_table_lists[i].count;
457 }
458
459 return_ACPI_STATUS(AE_OK); 436 return_ACPI_STATUS(AE_OK);
460} 437}
461 438
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 3d54680d0333..e0b97add8c63 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34 34
35#include <linux/backlight.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
37#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
@@ -56,6 +57,12 @@
56 57
57#define ACPI_VIDEO_HEAD_INVALID (~0u - 1) 58#define ACPI_VIDEO_HEAD_INVALID (~0u - 1)
58#define ACPI_VIDEO_HEAD_END (~0u) 59#define ACPI_VIDEO_HEAD_END (~0u)
60#define MAX_NAME_LEN 20
61
62#define ACPI_VIDEO_DISPLAY_CRT 1
63#define ACPI_VIDEO_DISPLAY_TV 2
64#define ACPI_VIDEO_DISPLAY_DVI 3
65#define ACPI_VIDEO_DISPLAY_LCD 4
59 66
60#define _COMPONENT ACPI_VIDEO_COMPONENT 67#define _COMPONENT ACPI_VIDEO_COMPONENT
61ACPI_MODULE_NAME("acpi_video") 68ACPI_MODULE_NAME("acpi_video")
@@ -66,16 +73,14 @@ MODULE_LICENSE("GPL");
66 73
67static int acpi_video_bus_add(struct acpi_device *device); 74static int acpi_video_bus_add(struct acpi_device *device);
68static int acpi_video_bus_remove(struct acpi_device *device, int type); 75static int acpi_video_bus_remove(struct acpi_device *device, int type);
69static int acpi_video_bus_match(struct acpi_device *device,
70 struct acpi_driver *driver);
71 76
72static struct acpi_driver acpi_video_bus = { 77static struct acpi_driver acpi_video_bus = {
73 .name = ACPI_VIDEO_DRIVER_NAME, 78 .name = ACPI_VIDEO_DRIVER_NAME,
74 .class = ACPI_VIDEO_CLASS, 79 .class = ACPI_VIDEO_CLASS,
80 .ids = ACPI_VIDEO_HID,
75 .ops = { 81 .ops = {
76 .add = acpi_video_bus_add, 82 .add = acpi_video_bus_add,
77 .remove = acpi_video_bus_remove, 83 .remove = acpi_video_bus_remove,
78 .match = acpi_video_bus_match,
79 }, 84 },
80}; 85};
81 86
@@ -133,20 +138,21 @@ struct acpi_video_device_flags {
133 u8 crt:1; 138 u8 crt:1;
134 u8 lcd:1; 139 u8 lcd:1;
135 u8 tvout:1; 140 u8 tvout:1;
141 u8 dvi:1;
136 u8 bios:1; 142 u8 bios:1;
137 u8 unknown:1; 143 u8 unknown:1;
138 u8 reserved:3; 144 u8 reserved:2;
139}; 145};
140 146
141struct acpi_video_device_cap { 147struct acpi_video_device_cap {
142 u8 _ADR:1; /*Return the unique ID */ 148 u8 _ADR:1; /*Return the unique ID */
143 u8 _BCL:1; /*Query list of brightness control levels supported */ 149 u8 _BCL:1; /*Query list of brightness control levels supported */
144 u8 _BCM:1; /*Set the brightness level */ 150 u8 _BCM:1; /*Set the brightness level */
151 u8 _BQC:1; /* Get current brightness level */
145 u8 _DDC:1; /*Return the EDID for this device */ 152 u8 _DDC:1; /*Return the EDID for this device */
146 u8 _DCS:1; /*Return status of output device */ 153 u8 _DCS:1; /*Return status of output device */
147 u8 _DGS:1; /*Query graphics state */ 154 u8 _DGS:1; /*Query graphics state */
148 u8 _DSS:1; /*Device state set */ 155 u8 _DSS:1; /*Device state set */
149 u8 _reserved:1;
150}; 156};
151 157
152struct acpi_video_device_brightness { 158struct acpi_video_device_brightness {
@@ -163,6 +169,8 @@ struct acpi_video_device {
163 struct acpi_video_bus *video; 169 struct acpi_video_bus *video;
164 struct acpi_device *dev; 170 struct acpi_device *dev;
165 struct acpi_video_device_brightness *brightness; 171 struct acpi_video_device_brightness *brightness;
172 struct backlight_device *backlight;
173 struct backlight_properties *data;
166}; 174};
167 175
168/* bus */ 176/* bus */
@@ -257,11 +265,35 @@ static void acpi_video_device_bind(struct acpi_video_bus *video,
257 struct acpi_video_device *device); 265 struct acpi_video_device *device);
258static int acpi_video_device_enumerate(struct acpi_video_bus *video); 266static int acpi_video_device_enumerate(struct acpi_video_bus *video);
259static int acpi_video_switch_output(struct acpi_video_bus *video, int event); 267static int acpi_video_switch_output(struct acpi_video_bus *video, int event);
268static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
269 int level);
270static int acpi_video_device_lcd_get_level_current(
271 struct acpi_video_device *device,
272 unsigned long *level);
260static int acpi_video_get_next_level(struct acpi_video_device *device, 273static int acpi_video_get_next_level(struct acpi_video_device *device,
261 u32 level_current, u32 event); 274 u32 level_current, u32 event);
262static void acpi_video_switch_brightness(struct acpi_video_device *device, 275static void acpi_video_switch_brightness(struct acpi_video_device *device,
263 int event); 276 int event);
264 277
278/*backlight device sysfs support*/
279static int acpi_video_get_brightness(struct backlight_device *bd)
280{
281 unsigned long cur_level;
282 struct acpi_video_device *vd =
283 (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
284 acpi_video_device_lcd_get_level_current(vd, &cur_level);
285 return (int) cur_level;
286}
287
288static int acpi_video_set_brightness(struct backlight_device *bd)
289{
290 int request_level = bd->props->brightness;
291 struct acpi_video_device *vd =
292 (struct acpi_video_device *)class_get_devdata(&bd->class_dev);
293 acpi_video_device_lcd_set_level(vd, request_level);
294 return 0;
295}
296
265/* -------------------------------------------------------------------------- 297/* --------------------------------------------------------------------------
266 Video Management 298 Video Management
267 -------------------------------------------------------------------------- */ 299 -------------------------------------------------------------------------- */
@@ -499,6 +531,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
499 acpi_integer status; 531 acpi_integer status;
500 acpi_handle h_dummy1; 532 acpi_handle h_dummy1;
501 int i; 533 int i;
534 u32 max_level = 0;
502 union acpi_object *obj = NULL; 535 union acpi_object *obj = NULL;
503 struct acpi_video_device_brightness *br = NULL; 536 struct acpi_video_device_brightness *br = NULL;
504 537
@@ -514,6 +547,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
514 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) { 547 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
515 device->cap._BCM = 1; 548 device->cap._BCM = 1;
516 } 549 }
550 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
551 device->cap._BQC = 1;
517 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { 552 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
518 device->cap._DDC = 1; 553 device->cap._DDC = 1;
519 } 554 }
@@ -550,6 +585,8 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
550 continue; 585 continue;
551 } 586 }
552 br->levels[count] = (u32) o->integer.value; 587 br->levels[count] = (u32) o->integer.value;
588 if (br->levels[count] > max_level)
589 max_level = br->levels[count];
553 count++; 590 count++;
554 } 591 }
555 out: 592 out:
@@ -568,6 +605,37 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
568 605
569 kfree(obj); 606 kfree(obj);
570 607
608 if (device->cap._BCL && device->cap._BCM && device->cap._BQC){
609 unsigned long tmp;
610 static int count = 0;
611 char *name;
612 struct backlight_properties *acpi_video_data;
613
614 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
615 if (!name)
616 return;
617
618 acpi_video_data = kzalloc(
619 sizeof(struct backlight_properties),
620 GFP_KERNEL);
621 if (!acpi_video_data){
622 kfree(name);
623 return;
624 }
625 acpi_video_data->owner = THIS_MODULE;
626 acpi_video_data->get_brightness =
627 acpi_video_get_brightness;
628 acpi_video_data->update_status =
629 acpi_video_set_brightness;
630 sprintf(name, "acpi_video%d", count++);
631 device->data = acpi_video_data;
632 acpi_video_data->max_brightness = max_level;
633 acpi_video_device_lcd_get_level_current(device, &tmp);
634 acpi_video_data->brightness = (int)tmp;
635 device->backlight = backlight_device_register(name,
636 NULL, device, acpi_video_data);
637 kfree(name);
638 }
571 return; 639 return;
572} 640}
573 641
@@ -668,6 +736,8 @@ static int acpi_video_device_info_seq_show(struct seq_file *seq, void *offset)
668 seq_printf(seq, "LCD\n"); 736 seq_printf(seq, "LCD\n");
669 else if (dev->flags.tvout) 737 else if (dev->flags.tvout)
670 seq_printf(seq, "TVOUT\n"); 738 seq_printf(seq, "TVOUT\n");
739 else if (dev->flags.dvi)
740 seq_printf(seq, "DVI\n");
671 else 741 else
672 seq_printf(seq, "UNKNOWN\n"); 742 seq_printf(seq, "UNKNOWN\n");
673 743
@@ -1242,6 +1312,16 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device)
1242 -------------------------------------------------------------------------- */ 1312 -------------------------------------------------------------------------- */
1243 1313
1244/* device interface */ 1314/* device interface */
1315static struct acpi_video_device_attrib*
1316acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
1317{
1318 int count;
1319
1320 for(count = 0; count < video->attached_count; count++)
1321 if((video->attached_array[count].value.int_val & 0xffff) == device_id)
1322 return &(video->attached_array[count].value.attrib);
1323 return NULL;
1324}
1245 1325
1246static int 1326static int
1247acpi_video_bus_get_one_device(struct acpi_device *device, 1327acpi_video_bus_get_one_device(struct acpi_device *device,
@@ -1250,7 +1330,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1250 unsigned long device_id; 1330 unsigned long device_id;
1251 int status; 1331 int status;
1252 struct acpi_video_device *data; 1332 struct acpi_video_device *data;
1253 1333 struct acpi_video_device_attrib* attribute;
1254 1334
1255 if (!device || !video) 1335 if (!device || !video)
1256 return -EINVAL; 1336 return -EINVAL;
@@ -1271,20 +1351,30 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1271 data->video = video; 1351 data->video = video;
1272 data->dev = device; 1352 data->dev = device;
1273 1353
1274 switch (device_id & 0xffff) { 1354 attribute = acpi_video_get_device_attr(video, device_id);
1275 case 0x0100: 1355
1276 data->flags.crt = 1; 1356 if((attribute != NULL) && attribute->device_id_scheme) {
1277 break; 1357 switch (attribute->display_type) {
1278 case 0x0400: 1358 case ACPI_VIDEO_DISPLAY_CRT:
1279 data->flags.lcd = 1; 1359 data->flags.crt = 1;
1280 break; 1360 break;
1281 case 0x0200: 1361 case ACPI_VIDEO_DISPLAY_TV:
1282 data->flags.tvout = 1; 1362 data->flags.tvout = 1;
1283 break; 1363 break;
1284 default: 1364 case ACPI_VIDEO_DISPLAY_DVI:
1365 data->flags.dvi = 1;
1366 break;
1367 case ACPI_VIDEO_DISPLAY_LCD:
1368 data->flags.lcd = 1;
1369 break;
1370 default:
1371 data->flags.unknown = 1;
1372 break;
1373 }
1374 if(attribute->bios_can_detect)
1375 data->flags.bios = 1;
1376 } else
1285 data->flags.unknown = 1; 1377 data->flags.unknown = 1;
1286 break;
1287 }
1288 1378
1289 acpi_video_device_bind(video, data); 1379 acpi_video_device_bind(video, data);
1290 acpi_video_device_find_cap(data); 1380 acpi_video_device_find_cap(data);
@@ -1588,7 +1678,10 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
1588 status = acpi_remove_notify_handler(device->dev->handle, 1678 status = acpi_remove_notify_handler(device->dev->handle,
1589 ACPI_DEVICE_NOTIFY, 1679 ACPI_DEVICE_NOTIFY,
1590 acpi_video_device_notify); 1680 acpi_video_device_notify);
1591 1681 if (device->backlight){
1682 backlight_device_unregister(device->backlight);
1683 kfree(device->data);
1684 }
1592 return 0; 1685 return 0;
1593} 1686}
1594 1687
@@ -1790,39 +1883,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
1790 return 0; 1883 return 0;
1791} 1884}
1792 1885
1793static int
1794acpi_video_bus_match(struct acpi_device *device, struct acpi_driver *driver)
1795{
1796 acpi_handle h_dummy1;
1797 acpi_handle h_dummy2;
1798 acpi_handle h_dummy3;
1799
1800
1801 if (!device || !driver)
1802 return -EINVAL;
1803
1804 /* Since there is no HID, CID for ACPI Video drivers, we have
1805 * to check well known required nodes for each feature we support.
1806 */
1807
1808 /* Does this device able to support video switching ? */
1809 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
1810 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
1811 return 0;
1812
1813 /* Does this device able to retrieve a video ROM ? */
1814 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
1815 return 0;
1816
1817 /* Does this device able to configure which video head to be POSTed ? */
1818 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
1819 ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
1820 ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
1821 return 0;
1822
1823 return -ENODEV;
1824}
1825
1826static int __init acpi_video_init(void) 1886static int __init acpi_video_init(void)
1827{ 1887{
1828 int result = 0; 1888 int result = 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1c94b43d2c9b..3747457fee7a 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -41,12 +41,12 @@ config SATA_SVW
41 If unsure, say N. 41 If unsure, say N.
42 42
43config ATA_PIIX 43config ATA_PIIX
44 tristate "Intel PIIX/ICH SATA support" 44 tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
45 depends on PCI 45 depends on PCI
46 help 46 help
47 This option enables support for ICH5/6/7/8 Serial ATA 47 This option enables support for ICH5/6/7/8 Serial ATA
48 and support for PATA on the Intel PIIX3/PIIX4/ICH series 48 and support for PATA on the Intel ESB/ICH/PIIX3/PIIX4 series
49 PATA host controllers. 49 host controllers.
50 50
51 If unsure, say N. 51 If unsure, say N.
52 52
@@ -116,11 +116,14 @@ config SATA_SIL24
116 If unsure, say N. 116 If unsure, say N.
117 117
118config SATA_SIS 118config SATA_SIS
119 tristate "SiS 964/180 SATA support" 119 tristate "SiS 964/965/966/180 SATA support"
120 depends on PCI 120 depends on PCI
121 select PATA_SIS
121 help 122 help
122 This option enables support for SiS Serial ATA 964/180. 123 This option enables support for SiS Serial ATA on
123 124 SiS 964/965/966/180 and Parallel ATA on SiS 180.
125 The PATA support for SiS 180 requires additionally to
126 enable the PATA_SIS driver in the config.
124 If unsure, say N. 127 If unsure, say N.
125 128
126config SATA_ULI 129config SATA_ULI
@@ -147,6 +150,12 @@ config SATA_VITESSE
147 150
148 If unsure, say N. 151 If unsure, say N.
149 152
153config SATA_INIC162X
154 tristate "Initio 162x SATA support (HIGHLY EXPERIMENTAL)"
155 depends on PCI && EXPERIMENTAL
156 help
157 This option enables support for Initio 162x Serial ATA.
158
150config SATA_INTEL_COMBINED 159config SATA_INTEL_COMBINED
151 bool 160 bool
152 depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX) 161 depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
@@ -296,7 +305,7 @@ config PATA_ISAPNP
296 If unsure, say N. 305 If unsure, say N.
297 306
298config PATA_IT821X 307config PATA_IT821X
299 tristate "IT821x PATA support (Experimental)" 308 tristate "IT8211/2 PATA support (Experimental)"
300 depends on PCI && EXPERIMENTAL 309 depends on PCI && EXPERIMENTAL
301 help 310 help
302 This option enables support for the ITE 8211 and 8212 311 This option enables support for the ITE 8211 and 8212
@@ -305,6 +314,15 @@ config PATA_IT821X
305 314
306 If unsure, say N. 315 If unsure, say N.
307 316
317config PATA_IT8213
318 tristate "IT8213 PATA support (Experimental)"
319 depends on PCI && EXPERIMENTAL
320 help
321 This option enables support for the ITE 821 PATA
322 controllers via the new ATA layer.
323
324 If unsure, say N.
325
308config PATA_JMICRON 326config PATA_JMICRON
309 tristate "JMicron PATA support" 327 tristate "JMicron PATA support"
310 depends on PCI 328 depends on PCI
@@ -341,6 +359,15 @@ config PATA_MARVELL
341 359
342 If unsure, say N. 360 If unsure, say N.
343 361
362config PATA_MPC52xx
363 tristate "Freescale MPC52xx SoC internal IDE"
364 depends on PPC_MPC52xx
365 help
366 This option enables support for integrated IDE controller
367 of the Freescale MPC52xx SoC.
368
369 If unsure, say N.
370
344config PATA_MPIIX 371config PATA_MPIIX
345 tristate "Intel PATA MPIIX support" 372 tristate "Intel PATA MPIIX support"
346 depends on PCI 373 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index bc3d81ae757e..cd096f0c78a1 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SATA_SX4) += sata_sx4.o
15obj-$(CONFIG_SATA_NV) += sata_nv.o 15obj-$(CONFIG_SATA_NV) += sata_nv.o
16obj-$(CONFIG_SATA_ULI) += sata_uli.o 16obj-$(CONFIG_SATA_ULI) += sata_uli.o
17obj-$(CONFIG_SATA_MV) += sata_mv.o 17obj-$(CONFIG_SATA_MV) += sata_mv.o
18obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
18obj-$(CONFIG_PDC_ADMA) += pdc_adma.o 19obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
19 20
20obj-$(CONFIG_PATA_ALI) += pata_ali.o 21obj-$(CONFIG_PATA_ALI) += pata_ali.o
@@ -33,11 +34,13 @@ obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
33obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o 34obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
34obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o 35obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
35obj-$(CONFIG_PATA_IT821X) += pata_it821x.o 36obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
37obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
36obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 38obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
37obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 39obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
38obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o 40obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
39obj-$(CONFIG_PATA_OPTI) += pata_opti.o 41obj-$(CONFIG_PATA_OPTI) += pata_opti.o
40obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o 42obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
43obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
41obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o 44obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
42obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o 45obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
43obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o 46obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 48616c6fee9d..92cdb0c5171f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -45,7 +45,6 @@
45#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
46#include <scsi/scsi_cmnd.h> 46#include <scsi/scsi_cmnd.h>
47#include <linux/libata.h> 47#include <linux/libata.h>
48#include <asm/io.h>
49 48
50#define DRV_NAME "ahci" 49#define DRV_NAME "ahci"
51#define DRV_VERSION "2.0" 50#define DRV_VERSION "2.0"
@@ -166,9 +165,6 @@ enum {
166 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ 165 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
167 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ 166 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
168 167
169 /* hpriv->flags bits */
170 AHCI_FLAG_MSI = (1 << 0),
171
172 /* ap->flags bits */ 168 /* ap->flags bits */
173 AHCI_FLAG_NO_NCQ = (1 << 24), 169 AHCI_FLAG_NO_NCQ = (1 << 24),
174 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */ 170 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
@@ -191,7 +187,6 @@ struct ahci_sg {
191}; 187};
192 188
193struct ahci_host_priv { 189struct ahci_host_priv {
194 unsigned long flags;
195 u32 cap; /* cache of HOST_CAP register */ 190 u32 cap; /* cache of HOST_CAP register */
196 u32 port_map; /* cache of HOST_PORTS_IMPL reg */ 191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
197}; 192};
@@ -229,7 +224,6 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
229static int ahci_port_resume(struct ata_port *ap); 224static int ahci_port_resume(struct ata_port *ap);
230static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 225static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
231static int ahci_pci_device_resume(struct pci_dev *pdev); 226static int ahci_pci_device_resume(struct pci_dev *pdev);
232static void ahci_remove_one (struct pci_dev *pdev);
233 227
234static struct scsi_host_template ahci_sht = { 228static struct scsi_host_template ahci_sht = {
235 .module = THIS_MODULE, 229 .module = THIS_MODULE,
@@ -266,6 +260,8 @@ static const struct ata_port_operations ahci_ops = {
266 260
267 .irq_handler = ahci_interrupt, 261 .irq_handler = ahci_interrupt,
268 .irq_clear = ahci_irq_clear, 262 .irq_clear = ahci_irq_clear,
263 .irq_on = ata_dummy_irq_on,
264 .irq_ack = ata_dummy_irq_ack,
269 265
270 .scr_read = ahci_scr_read, 266 .scr_read = ahci_scr_read,
271 .scr_write = ahci_scr_write, 267 .scr_write = ahci_scr_write,
@@ -297,6 +293,8 @@ static const struct ata_port_operations ahci_vt8251_ops = {
297 293
298 .irq_handler = ahci_interrupt, 294 .irq_handler = ahci_interrupt,
299 .irq_clear = ahci_irq_clear, 295 .irq_clear = ahci_irq_clear,
296 .irq_on = ata_dummy_irq_on,
297 .irq_ack = ata_dummy_irq_ack,
300 298
301 .scr_read = ahci_scr_read, 299 .scr_read = ahci_scr_read,
302 .scr_write = ahci_scr_write, 300 .scr_write = ahci_scr_write,
@@ -431,7 +429,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
431 429
432 /* Generic, PCI class code for AHCI */ 430 /* Generic, PCI class code for AHCI */
433 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 431 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
434 0x010601, 0xffffff, board_ahci }, 432 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
435 433
436 { } /* terminate list */ 434 { } /* terminate list */
437}; 435};
@@ -441,9 +439,9 @@ static struct pci_driver ahci_pci_driver = {
441 .name = DRV_NAME, 439 .name = DRV_NAME,
442 .id_table = ahci_pci_tbl, 440 .id_table = ahci_pci_tbl,
443 .probe = ahci_init_one, 441 .probe = ahci_init_one,
442 .remove = ata_pci_remove_one,
444 .suspend = ahci_pci_device_suspend, 443 .suspend = ahci_pci_device_suspend,
445 .resume = ahci_pci_device_resume, 444 .resume = ahci_pci_device_resume,
446 .remove = ahci_remove_one,
447}; 445};
448 446
449 447
@@ -452,16 +450,12 @@ static inline int ahci_nr_ports(u32 cap)
452 return (cap & 0x1f) + 1; 450 return (cap & 0x1f) + 1;
453} 451}
454 452
455static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port) 453static inline void __iomem *ahci_port_base(void __iomem *base,
454 unsigned int port)
456{ 455{
457 return base + 0x100 + (port * 0x80); 456 return base + 0x100 + (port * 0x80);
458} 457}
459 458
460static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
461{
462 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
463}
464
465static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) 459static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
466{ 460{
467 unsigned int sc_reg; 461 unsigned int sc_reg;
@@ -475,7 +469,7 @@ static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
475 return 0xffffffffU; 469 return 0xffffffffU;
476 } 470 }
477 471
478 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 472 return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
479} 473}
480 474
481 475
@@ -493,7 +487,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
493 return; 487 return;
494 } 488 }
495 489
496 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 490 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
497} 491}
498 492
499static void ahci_start_engine(void __iomem *port_mmio) 493static void ahci_start_engine(void __iomem *port_mmio)
@@ -735,7 +729,7 @@ static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
735 729
736static unsigned int ahci_dev_classify(struct ata_port *ap) 730static unsigned int ahci_dev_classify(struct ata_port *ap)
737{ 731{
738 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 732 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
739 struct ata_taskfile tf; 733 struct ata_taskfile tf;
740 u32 tmp; 734 u32 tmp;
741 735
@@ -763,7 +757,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
763 757
764static int ahci_clo(struct ata_port *ap) 758static int ahci_clo(struct ata_port *ap)
765{ 759{
766 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 760 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
767 struct ahci_host_priv *hpriv = ap->host->private_data; 761 struct ahci_host_priv *hpriv = ap->host->private_data;
768 u32 tmp; 762 u32 tmp;
769 763
@@ -785,7 +779,7 @@ static int ahci_clo(struct ata_port *ap)
785static int ahci_softreset(struct ata_port *ap, unsigned int *class) 779static int ahci_softreset(struct ata_port *ap, unsigned int *class)
786{ 780{
787 struct ahci_port_priv *pp = ap->private_data; 781 struct ahci_port_priv *pp = ap->private_data;
788 void __iomem *mmio = ap->host->mmio_base; 782 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
789 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 783 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
790 const u32 cmd_fis_len = 5; /* five dwords */ 784 const u32 cmd_fis_len = 5; /* five dwords */
791 const char *reason = NULL; 785 const char *reason = NULL;
@@ -893,7 +887,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
893 struct ahci_port_priv *pp = ap->private_data; 887 struct ahci_port_priv *pp = ap->private_data;
894 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 888 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
895 struct ata_taskfile tf; 889 struct ata_taskfile tf;
896 void __iomem *mmio = ap->host->mmio_base; 890 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
897 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 891 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
898 int rc; 892 int rc;
899 893
@@ -921,7 +915,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
921 915
922static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class) 916static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
923{ 917{
924 void __iomem *mmio = ap->host->mmio_base; 918 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
925 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 919 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
926 int rc; 920 int rc;
927 921
@@ -946,7 +940,7 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
946 940
947static void ahci_postreset(struct ata_port *ap, unsigned int *class) 941static void ahci_postreset(struct ata_port *ap, unsigned int *class)
948{ 942{
949 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 943 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
950 u32 new_tmp, tmp; 944 u32 new_tmp, tmp;
951 945
952 ata_std_postreset(ap, class); 946 ata_std_postreset(ap, class);
@@ -965,7 +959,7 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
965 959
966static u8 ahci_check_status(struct ata_port *ap) 960static u8 ahci_check_status(struct ata_port *ap)
967{ 961{
968 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; 962 void __iomem *mmio = ap->ioaddr.cmd_addr;
969 963
970 return readl(mmio + PORT_TFDATA) & 0xFF; 964 return readl(mmio + PORT_TFDATA) & 0xFF;
971} 965}
@@ -1111,7 +1105,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1111 1105
1112static void ahci_host_intr(struct ata_port *ap) 1106static void ahci_host_intr(struct ata_port *ap)
1113{ 1107{
1114 void __iomem *mmio = ap->host->mmio_base; 1108 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1115 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1109 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1116 struct ata_eh_info *ehi = &ap->eh_info; 1110 struct ata_eh_info *ehi = &ap->eh_info;
1117 struct ahci_port_priv *pp = ap->private_data; 1111 struct ahci_port_priv *pp = ap->private_data;
@@ -1173,7 +1167,7 @@ static void ahci_host_intr(struct ata_port *ap)
1173 * dangerous, we need to know more about them. Print 1167 * dangerous, we need to know more about them. Print
1174 * more of it. 1168 * more of it.
1175 */ 1169 */
1176 const u32 *f = pp->rx_fis + RX_FIS_SDB; 1170 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1177 1171
1178 ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ " 1172 ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ "
1179 "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n", 1173 "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n",
@@ -1209,7 +1203,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1209 VPRINTK("ENTER\n"); 1203 VPRINTK("ENTER\n");
1210 1204
1211 hpriv = host->private_data; 1205 hpriv = host->private_data;
1212 mmio = host->mmio_base; 1206 mmio = host->iomap[AHCI_PCI_BAR];
1213 1207
1214 /* sigh. 0xffffffff is a valid return from h/w */ 1208 /* sigh. 0xffffffff is a valid return from h/w */
1215 irq_stat = readl(mmio + HOST_IRQ_STAT); 1209 irq_stat = readl(mmio + HOST_IRQ_STAT);
@@ -1254,7 +1248,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1254static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1248static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1255{ 1249{
1256 struct ata_port *ap = qc->ap; 1250 struct ata_port *ap = qc->ap;
1257 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 1251 void __iomem *port_mmio = ap->ioaddr.cmd_addr;
1258 1252
1259 if (qc->tf.protocol == ATA_PROT_NCQ) 1253 if (qc->tf.protocol == ATA_PROT_NCQ)
1260 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1254 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
@@ -1266,7 +1260,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1266 1260
1267static void ahci_freeze(struct ata_port *ap) 1261static void ahci_freeze(struct ata_port *ap)
1268{ 1262{
1269 void __iomem *mmio = ap->host->mmio_base; 1263 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1270 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1264 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1271 1265
1272 /* turn IRQ off */ 1266 /* turn IRQ off */
@@ -1275,7 +1269,7 @@ static void ahci_freeze(struct ata_port *ap)
1275 1269
1276static void ahci_thaw(struct ata_port *ap) 1270static void ahci_thaw(struct ata_port *ap)
1277{ 1271{
1278 void __iomem *mmio = ap->host->mmio_base; 1272 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1279 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1273 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1280 u32 tmp; 1274 u32 tmp;
1281 1275
@@ -1290,7 +1284,7 @@ static void ahci_thaw(struct ata_port *ap)
1290 1284
1291static void ahci_error_handler(struct ata_port *ap) 1285static void ahci_error_handler(struct ata_port *ap)
1292{ 1286{
1293 void __iomem *mmio = ap->host->mmio_base; 1287 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1294 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1288 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1295 1289
1296 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1290 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
@@ -1306,7 +1300,7 @@ static void ahci_error_handler(struct ata_port *ap)
1306 1300
1307static void ahci_vt8251_error_handler(struct ata_port *ap) 1301static void ahci_vt8251_error_handler(struct ata_port *ap)
1308{ 1302{
1309 void __iomem *mmio = ap->host->mmio_base; 1303 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1310 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1304 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1311 1305
1312 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1306 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
@@ -1323,7 +1317,7 @@ static void ahci_vt8251_error_handler(struct ata_port *ap)
1323static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1317static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1324{ 1318{
1325 struct ata_port *ap = qc->ap; 1319 struct ata_port *ap = qc->ap;
1326 void __iomem *mmio = ap->host->mmio_base; 1320 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1327 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1321 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1328 1322
1329 if (qc->flags & ATA_QCFLAG_FAILED) 1323 if (qc->flags & ATA_QCFLAG_FAILED)
@@ -1340,7 +1334,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1340{ 1334{
1341 struct ahci_host_priv *hpriv = ap->host->private_data; 1335 struct ahci_host_priv *hpriv = ap->host->private_data;
1342 struct ahci_port_priv *pp = ap->private_data; 1336 struct ahci_port_priv *pp = ap->private_data;
1343 void __iomem *mmio = ap->host->mmio_base; 1337 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1344 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1338 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1345 const char *emsg = NULL; 1339 const char *emsg = NULL;
1346 int rc; 1340 int rc;
@@ -1361,7 +1355,7 @@ static int ahci_port_resume(struct ata_port *ap)
1361{ 1355{
1362 struct ahci_port_priv *pp = ap->private_data; 1356 struct ahci_port_priv *pp = ap->private_data;
1363 struct ahci_host_priv *hpriv = ap->host->private_data; 1357 struct ahci_host_priv *hpriv = ap->host->private_data;
1364 void __iomem *mmio = ap->host->mmio_base; 1358 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1365 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1359 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1366 1360
1367 ahci_power_up(port_mmio, hpriv->cap); 1361 ahci_power_up(port_mmio, hpriv->cap);
@@ -1373,7 +1367,7 @@ static int ahci_port_resume(struct ata_port *ap)
1373static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1367static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1374{ 1368{
1375 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1369 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1376 void __iomem *mmio = host->mmio_base; 1370 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1377 u32 ctl; 1371 u32 ctl;
1378 1372
1379 if (mesg.event == PM_EVENT_SUSPEND) { 1373 if (mesg.event == PM_EVENT_SUSPEND) {
@@ -1394,10 +1388,12 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
1394{ 1388{
1395 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1389 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1396 struct ahci_host_priv *hpriv = host->private_data; 1390 struct ahci_host_priv *hpriv = host->private_data;
1397 void __iomem *mmio = host->mmio_base; 1391 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1398 int rc; 1392 int rc;
1399 1393
1400 ata_pci_device_do_resume(pdev); 1394 rc = ata_pci_device_do_resume(pdev);
1395 if (rc)
1396 return rc;
1401 1397
1402 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 1398 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1403 rc = ahci_reset_controller(mmio, pdev); 1399 rc = ahci_reset_controller(mmio, pdev);
@@ -1418,29 +1414,24 @@ static int ahci_port_start(struct ata_port *ap)
1418 struct device *dev = ap->host->dev; 1414 struct device *dev = ap->host->dev;
1419 struct ahci_host_priv *hpriv = ap->host->private_data; 1415 struct ahci_host_priv *hpriv = ap->host->private_data;
1420 struct ahci_port_priv *pp; 1416 struct ahci_port_priv *pp;
1421 void __iomem *mmio = ap->host->mmio_base; 1417 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1422 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1418 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1423 void *mem; 1419 void *mem;
1424 dma_addr_t mem_dma; 1420 dma_addr_t mem_dma;
1425 int rc; 1421 int rc;
1426 1422
1427 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 1423 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1428 if (!pp) 1424 if (!pp)
1429 return -ENOMEM; 1425 return -ENOMEM;
1430 memset(pp, 0, sizeof(*pp));
1431 1426
1432 rc = ata_pad_alloc(ap, dev); 1427 rc = ata_pad_alloc(ap, dev);
1433 if (rc) { 1428 if (rc)
1434 kfree(pp);
1435 return rc; 1429 return rc;
1436 }
1437 1430
1438 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); 1431 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
1439 if (!mem) { 1432 GFP_KERNEL);
1440 ata_pad_free(ap, dev); 1433 if (!mem)
1441 kfree(pp);
1442 return -ENOMEM; 1434 return -ENOMEM;
1443 }
1444 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 1435 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1445 1436
1446 /* 1437 /*
@@ -1482,10 +1473,8 @@ static int ahci_port_start(struct ata_port *ap)
1482 1473
1483static void ahci_port_stop(struct ata_port *ap) 1474static void ahci_port_stop(struct ata_port *ap)
1484{ 1475{
1485 struct device *dev = ap->host->dev;
1486 struct ahci_host_priv *hpriv = ap->host->private_data; 1476 struct ahci_host_priv *hpriv = ap->host->private_data;
1487 struct ahci_port_priv *pp = ap->private_data; 1477 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1488 void __iomem *mmio = ap->host->mmio_base;
1489 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1478 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1490 const char *emsg = NULL; 1479 const char *emsg = NULL;
1491 int rc; 1480 int rc;
@@ -1494,19 +1483,13 @@ static void ahci_port_stop(struct ata_port *ap)
1494 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg); 1483 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1495 if (rc) 1484 if (rc)
1496 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 1485 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1497
1498 ap->private_data = NULL;
1499 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1500 pp->cmd_slot, pp->cmd_slot_dma);
1501 ata_pad_free(ap, dev);
1502 kfree(pp);
1503} 1486}
1504 1487
1505static void ahci_setup_port(struct ata_ioports *port, unsigned long base, 1488static void ahci_setup_port(struct ata_ioports *port, void __iomem *base,
1506 unsigned int port_idx) 1489 unsigned int port_idx)
1507{ 1490{
1508 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx); 1491 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1509 base = ahci_port_base_ul(base, port_idx); 1492 base = ahci_port_base(base, port_idx);
1510 VPRINTK("base now==0x%lx\n", base); 1493 VPRINTK("base now==0x%lx\n", base);
1511 1494
1512 port->cmd_addr = base; 1495 port->cmd_addr = base;
@@ -1519,7 +1502,7 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1519{ 1502{
1520 struct ahci_host_priv *hpriv = probe_ent->private_data; 1503 struct ahci_host_priv *hpriv = probe_ent->private_data;
1521 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1504 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1522 void __iomem *mmio = probe_ent->mmio_base; 1505 void __iomem *mmio = probe_ent->iomap[AHCI_PCI_BAR];
1523 unsigned int i, cap_n_ports, using_dac; 1506 unsigned int i, cap_n_ports, using_dac;
1524 int rc; 1507 int rc;
1525 1508
@@ -1586,7 +1569,7 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
1586 } 1569 }
1587 1570
1588 for (i = 0; i < probe_ent->n_ports; i++) 1571 for (i = 0; i < probe_ent->n_ports; i++)
1589 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i); 1572 ahci_setup_port(&probe_ent->port[i], mmio, i);
1590 1573
1591 ahci_init_controller(mmio, pdev, probe_ent->n_ports, 1574 ahci_init_controller(mmio, pdev, probe_ent->n_ports,
1592 probe_ent->port_flags, hpriv); 1575 probe_ent->port_flags, hpriv);
@@ -1600,7 +1583,7 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
1600{ 1583{
1601 struct ahci_host_priv *hpriv = probe_ent->private_data; 1584 struct ahci_host_priv *hpriv = probe_ent->private_data;
1602 struct pci_dev *pdev = to_pci_dev(probe_ent->dev); 1585 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1603 void __iomem *mmio = probe_ent->mmio_base; 1586 void __iomem *mmio = probe_ent->iomap[AHCI_PCI_BAR];
1604 u32 vers, cap, impl, speed; 1587 u32 vers, cap, impl, speed;
1605 const char *speed_s; 1588 const char *speed_s;
1606 u16 cc; 1589 u16 cc;
@@ -1619,11 +1602,11 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
1619 speed_s = "?"; 1602 speed_s = "?";
1620 1603
1621 pci_read_config_word(pdev, 0x0a, &cc); 1604 pci_read_config_word(pdev, 0x0a, &cc);
1622 if (cc == 0x0101) 1605 if (cc == PCI_CLASS_STORAGE_IDE)
1623 scc_s = "IDE"; 1606 scc_s = "IDE";
1624 else if (cc == 0x0106) 1607 else if (cc == PCI_CLASS_STORAGE_SATA)
1625 scc_s = "SATA"; 1608 scc_s = "SATA";
1626 else if (cc == 0x0104) 1609 else if (cc == PCI_CLASS_STORAGE_RAID)
1627 scc_s = "RAID"; 1610 scc_s = "RAID";
1628 else 1611 else
1629 scc_s = "unknown"; 1612 scc_s = "unknown";
@@ -1667,15 +1650,13 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
1667 ); 1650 );
1668} 1651}
1669 1652
1670static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1653static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1671{ 1654{
1672 static int printed_version; 1655 static int printed_version;
1673 struct ata_probe_ent *probe_ent = NULL;
1674 struct ahci_host_priv *hpriv;
1675 unsigned long base;
1676 void __iomem *mmio_base;
1677 unsigned int board_idx = (unsigned int) ent->driver_data; 1656 unsigned int board_idx = (unsigned int) ent->driver_data;
1678 int have_msi, pci_dev_busy = 0; 1657 struct device *dev = &pdev->dev;
1658 struct ata_probe_ent *probe_ent;
1659 struct ahci_host_priv *hpriv;
1679 int rc; 1660 int rc;
1680 1661
1681 VPRINTK("ENTER\n"); 1662 VPRINTK("ENTER\n");
@@ -1685,57 +1666,36 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1685 if (!printed_version++) 1666 if (!printed_version++)
1686 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1667 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1687 1668
1688 /* JMicron-specific fixup: make sure we're in AHCI mode */
1689 /* This is protected from races with ata_jmicron by the pci probe
1690 locking */
1691 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) { 1669 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1692 /* AHCI enable, AHCI on function 0 */ 1670 /* Function 1 is the PATA controller except on the 368, where
1693 pci_write_config_byte(pdev, 0x41, 0xa1); 1671 we are not AHCI anyway */
1694 /* Function 1 is the PATA controller */
1695 if (PCI_FUNC(pdev->devfn)) 1672 if (PCI_FUNC(pdev->devfn))
1696 return -ENODEV; 1673 return -ENODEV;
1697 } 1674 }
1698 1675
1699 rc = pci_enable_device(pdev); 1676 rc = pcim_enable_device(pdev);
1700 if (rc) 1677 if (rc)
1701 return rc; 1678 return rc;
1702 1679
1703 rc = pci_request_regions(pdev, DRV_NAME); 1680 rc = pcim_iomap_regions(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
1704 if (rc) { 1681 if (rc == -EBUSY)
1705 pci_dev_busy = 1; 1682 pcim_pin_device(pdev);
1706 goto err_out; 1683 if (rc)
1707 } 1684 return rc;
1708 1685
1709 if (pci_enable_msi(pdev) == 0) 1686 if (pci_enable_msi(pdev))
1710 have_msi = 1;
1711 else {
1712 pci_intx(pdev, 1); 1687 pci_intx(pdev, 1);
1713 have_msi = 0;
1714 }
1715 1688
1716 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1689 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
1717 if (probe_ent == NULL) { 1690 if (probe_ent == NULL)
1718 rc = -ENOMEM; 1691 return -ENOMEM;
1719 goto err_out_msi;
1720 }
1721 1692
1722 memset(probe_ent, 0, sizeof(*probe_ent));
1723 probe_ent->dev = pci_dev_to_dev(pdev); 1693 probe_ent->dev = pci_dev_to_dev(pdev);
1724 INIT_LIST_HEAD(&probe_ent->node); 1694 INIT_LIST_HEAD(&probe_ent->node);
1725 1695
1726 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0); 1696 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1727 if (mmio_base == NULL) { 1697 if (!hpriv)
1728 rc = -ENOMEM; 1698 return -ENOMEM;
1729 goto err_out_free_ent;
1730 }
1731 base = (unsigned long) mmio_base;
1732
1733 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1734 if (!hpriv) {
1735 rc = -ENOMEM;
1736 goto err_out_iounmap;
1737 }
1738 memset(hpriv, 0, sizeof(*hpriv));
1739 1699
1740 probe_ent->sht = ahci_port_info[board_idx].sht; 1700 probe_ent->sht = ahci_port_info[board_idx].sht;
1741 probe_ent->port_flags = ahci_port_info[board_idx].flags; 1701 probe_ent->port_flags = ahci_port_info[board_idx].flags;
@@ -1745,16 +1705,13 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1745 1705
1746 probe_ent->irq = pdev->irq; 1706 probe_ent->irq = pdev->irq;
1747 probe_ent->irq_flags = IRQF_SHARED; 1707 probe_ent->irq_flags = IRQF_SHARED;
1748 probe_ent->mmio_base = mmio_base; 1708 probe_ent->iomap = pcim_iomap_table(pdev);
1749 probe_ent->private_data = hpriv; 1709 probe_ent->private_data = hpriv;
1750 1710
1751 if (have_msi)
1752 hpriv->flags |= AHCI_FLAG_MSI;
1753
1754 /* initialize adapter */ 1711 /* initialize adapter */
1755 rc = ahci_host_init(probe_ent); 1712 rc = ahci_host_init(probe_ent);
1756 if (rc) 1713 if (rc)
1757 goto err_out_hpriv; 1714 return rc;
1758 1715
1759 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) && 1716 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1760 (hpriv->cap & HOST_CAP_NCQ)) 1717 (hpriv->cap & HOST_CAP_NCQ))
@@ -1762,62 +1719,11 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1762 1719
1763 ahci_print_info(probe_ent); 1720 ahci_print_info(probe_ent);
1764 1721
1765 /* FIXME: check ata_device_add return value */ 1722 if (!ata_device_add(probe_ent))
1766 ata_device_add(probe_ent); 1723 return -ENODEV;
1767 kfree(probe_ent);
1768 1724
1725 devm_kfree(dev, probe_ent);
1769 return 0; 1726 return 0;
1770
1771err_out_hpriv:
1772 kfree(hpriv);
1773err_out_iounmap:
1774 pci_iounmap(pdev, mmio_base);
1775err_out_free_ent:
1776 kfree(probe_ent);
1777err_out_msi:
1778 if (have_msi)
1779 pci_disable_msi(pdev);
1780 else
1781 pci_intx(pdev, 0);
1782 pci_release_regions(pdev);
1783err_out:
1784 if (!pci_dev_busy)
1785 pci_disable_device(pdev);
1786 return rc;
1787}
1788
1789static void ahci_remove_one (struct pci_dev *pdev)
1790{
1791 struct device *dev = pci_dev_to_dev(pdev);
1792 struct ata_host *host = dev_get_drvdata(dev);
1793 struct ahci_host_priv *hpriv = host->private_data;
1794 unsigned int i;
1795 int have_msi;
1796
1797 for (i = 0; i < host->n_ports; i++)
1798 ata_port_detach(host->ports[i]);
1799
1800 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1801 free_irq(host->irq, host);
1802
1803 for (i = 0; i < host->n_ports; i++) {
1804 struct ata_port *ap = host->ports[i];
1805
1806 ata_scsi_release(ap->scsi_host);
1807 scsi_host_put(ap->scsi_host);
1808 }
1809
1810 kfree(hpriv);
1811 pci_iounmap(pdev, host->mmio_base);
1812 kfree(host);
1813
1814 if (have_msi)
1815 pci_disable_msi(pdev);
1816 else
1817 pci_intx(pdev, 0);
1818 pci_release_regions(pdev);
1819 pci_disable_device(pdev);
1820 dev_set_drvdata(dev, NULL);
1821} 1727}
1822 1728
1823static int __init ahci_init(void) 1729static int __init ahci_init(void)
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 24af56081b5d..be66ea08da55 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -79,7 +79,7 @@ static int generic_set_mode(struct ata_port *ap, struct ata_device **unused)
79 79
80 /* Bits 5 and 6 indicate if DMA is active on master/slave */ 80 /* Bits 5 and 6 indicate if DMA is active on master/slave */
81 if (ap->ioaddr.bmdma_addr) 81 if (ap->ioaddr.bmdma_addr)
82 dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 82 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
83 83
84 for (i = 0; i < ATA_MAX_DEVICES; i++) { 84 for (i = 0; i < ATA_MAX_DEVICES; i++) {
85 struct ata_device *dev = &ap->device[i]; 85 struct ata_device *dev = &ap->device[i];
@@ -138,7 +138,7 @@ static struct ata_port_operations generic_port_ops = {
138 .bmdma_stop = ata_bmdma_stop, 138 .bmdma_stop = ata_bmdma_stop,
139 .bmdma_status = ata_bmdma_status, 139 .bmdma_status = ata_bmdma_status,
140 140
141 .data_xfer = ata_pio_data_xfer, 141 .data_xfer = ata_data_xfer,
142 142
143 .freeze = ata_bmdma_freeze, 143 .freeze = ata_bmdma_freeze,
144 .thaw = ata_bmdma_thaw, 144 .thaw = ata_bmdma_thaw,
@@ -150,10 +150,10 @@ static struct ata_port_operations generic_port_ops = {
150 150
151 .irq_handler = ata_interrupt, 151 .irq_handler = ata_interrupt,
152 .irq_clear = ata_bmdma_irq_clear, 152 .irq_clear = ata_bmdma_irq_clear,
153 .irq_on = ata_irq_on,
154 .irq_ack = ata_irq_ack,
153 155
154 .port_start = ata_port_start, 156 .port_start = ata_port_start,
155 .port_stop = ata_port_stop,
156 .host_stop = ata_host_stop
157}; 157};
158 158
159static int all_generic_ide; /* Set to claim all devices */ 159static int all_generic_ide; /* Set to claim all devices */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 47701b286f8b..4d716c7347e7 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -118,7 +118,7 @@ enum {
118 PIIX_80C_SEC = (1 << 7) | (1 << 6), 118 PIIX_80C_SEC = (1 << 7) | (1 << 6),
119 119
120 /* controller IDs */ 120 /* controller IDs */
121 piix_pata_33 = 0, /* PIIX3 or 4 at 33Mhz */ 121 piix_pata_33 = 0, /* PIIX4 at 33Mhz */
122 ich_pata_33 = 1, /* ICH up to UDMA 33 only */ 122 ich_pata_33 = 1, /* ICH up to UDMA 33 only */
123 ich_pata_66 = 2, /* ICH up to 66 Mhz */ 123 ich_pata_66 = 2, /* ICH up to 66 Mhz */
124 ich_pata_100 = 3, /* ICH up to UDMA 100 */ 124 ich_pata_100 = 3, /* ICH up to UDMA 100 */
@@ -128,6 +128,7 @@ enum {
128 ich6_sata_ahci = 7, 128 ich6_sata_ahci = 7,
129 ich6m_sata_ahci = 8, 129 ich6m_sata_ahci = 8,
130 ich8_sata_ahci = 9, 130 ich8_sata_ahci = 9,
131 piix_pata_mwdma = 10, /* PIIX3 MWDMA only */
131 132
132 /* constants for mapping table */ 133 /* constants for mapping table */
133 P0 = 0, /* port 0 */ 134 P0 = 0, /* port 0 */
@@ -153,7 +154,6 @@ struct piix_host_priv {
153 154
154static int piix_init_one (struct pci_dev *pdev, 155static int piix_init_one (struct pci_dev *pdev,
155 const struct pci_device_id *ent); 156 const struct pci_device_id *ent);
156static void piix_host_stop(struct ata_host *host);
157static void piix_pata_error_handler(struct ata_port *ap); 157static void piix_pata_error_handler(struct ata_port *ap);
158static void ich_pata_error_handler(struct ata_port *ap); 158static void ich_pata_error_handler(struct ata_port *ap);
159static void piix_sata_error_handler(struct ata_port *ap); 159static void piix_sata_error_handler(struct ata_port *ap);
@@ -164,7 +164,8 @@ static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
164static unsigned int in_module_init = 1; 164static unsigned int in_module_init = 1;
165 165
166static const struct pci_device_id piix_pci_tbl[] = { 166static const struct pci_device_id piix_pci_tbl[] = {
167#ifdef ATA_ENABLE_PATA 167 /* Intel PIIX3 for the 430HX etc */
168 { 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
168 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */ 169 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
169 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */ 170 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
170 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 171 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
@@ -202,7 +203,6 @@ static const struct pci_device_id piix_pci_tbl[] = {
202 /* ICH7/7-R (i945, i975) UDMA 100*/ 203 /* ICH7/7-R (i945, i975) UDMA 100*/
203 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, 204 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
204 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 205 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
205#endif
206 206
207 /* NOTE: The following PCI ids must be kept in sync with the 207 /* NOTE: The following PCI ids must be kept in sync with the
208 * list in drivers/pci/quirks.c. 208 * list in drivers/pci/quirks.c.
@@ -297,7 +297,7 @@ static const struct ata_port_operations piix_pata_ops = {
297 .bmdma_status = ata_bmdma_status, 297 .bmdma_status = ata_bmdma_status,
298 .qc_prep = ata_qc_prep, 298 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot, 299 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_pio_data_xfer, 300 .data_xfer = ata_data_xfer,
301 301
302 .freeze = ata_bmdma_freeze, 302 .freeze = ata_bmdma_freeze,
303 .thaw = ata_bmdma_thaw, 303 .thaw = ata_bmdma_thaw,
@@ -306,10 +306,10 @@ static const struct ata_port_operations piix_pata_ops = {
306 306
307 .irq_handler = ata_interrupt, 307 .irq_handler = ata_interrupt,
308 .irq_clear = ata_bmdma_irq_clear, 308 .irq_clear = ata_bmdma_irq_clear,
309 .irq_on = ata_irq_on,
310 .irq_ack = ata_irq_ack,
309 311
310 .port_start = ata_port_start, 312 .port_start = ata_port_start,
311 .port_stop = ata_port_stop,
312 .host_stop = piix_host_stop,
313}; 313};
314 314
315static const struct ata_port_operations ich_pata_ops = { 315static const struct ata_port_operations ich_pata_ops = {
@@ -330,7 +330,7 @@ static const struct ata_port_operations ich_pata_ops = {
330 .bmdma_status = ata_bmdma_status, 330 .bmdma_status = ata_bmdma_status,
331 .qc_prep = ata_qc_prep, 331 .qc_prep = ata_qc_prep,
332 .qc_issue = ata_qc_issue_prot, 332 .qc_issue = ata_qc_issue_prot,
333 .data_xfer = ata_pio_data_xfer, 333 .data_xfer = ata_data_xfer,
334 334
335 .freeze = ata_bmdma_freeze, 335 .freeze = ata_bmdma_freeze,
336 .thaw = ata_bmdma_thaw, 336 .thaw = ata_bmdma_thaw,
@@ -339,10 +339,10 @@ static const struct ata_port_operations ich_pata_ops = {
339 339
340 .irq_handler = ata_interrupt, 340 .irq_handler = ata_interrupt,
341 .irq_clear = ata_bmdma_irq_clear, 341 .irq_clear = ata_bmdma_irq_clear,
342 .irq_on = ata_irq_on,
343 .irq_ack = ata_irq_ack,
342 344
343 .port_start = ata_port_start, 345 .port_start = ata_port_start,
344 .port_stop = ata_port_stop,
345 .host_stop = piix_host_stop,
346}; 346};
347 347
348static const struct ata_port_operations piix_sata_ops = { 348static const struct ata_port_operations piix_sata_ops = {
@@ -360,7 +360,7 @@ static const struct ata_port_operations piix_sata_ops = {
360 .bmdma_status = ata_bmdma_status, 360 .bmdma_status = ata_bmdma_status,
361 .qc_prep = ata_qc_prep, 361 .qc_prep = ata_qc_prep,
362 .qc_issue = ata_qc_issue_prot, 362 .qc_issue = ata_qc_issue_prot,
363 .data_xfer = ata_pio_data_xfer, 363 .data_xfer = ata_data_xfer,
364 364
365 .freeze = ata_bmdma_freeze, 365 .freeze = ata_bmdma_freeze,
366 .thaw = ata_bmdma_thaw, 366 .thaw = ata_bmdma_thaw,
@@ -369,10 +369,10 @@ static const struct ata_port_operations piix_sata_ops = {
369 369
370 .irq_handler = ata_interrupt, 370 .irq_handler = ata_interrupt,
371 .irq_clear = ata_bmdma_irq_clear, 371 .irq_clear = ata_bmdma_irq_clear,
372 .irq_on = ata_irq_on,
373 .irq_ack = ata_irq_ack,
372 374
373 .port_start = ata_port_start, 375 .port_start = ata_port_start,
374 .port_stop = ata_port_stop,
375 .host_stop = piix_host_stop,
376}; 376};
377 377
378static const struct piix_map_db ich5_map_db = { 378static const struct piix_map_db ich5_map_db = {
@@ -441,7 +441,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
441}; 441};
442 442
443static struct ata_port_info piix_port_info[] = { 443static struct ata_port_info piix_port_info[] = {
444 /* piix_pata_33: 0: PIIX3 or 4 at 33MHz */ 444 /* piix_pata_33: 0: PIIX4 at 33MHz */
445 { 445 {
446 .sht = &piix_sht, 446 .sht = &piix_sht,
447 .flags = PIIX_PATA_FLAGS, 447 .flags = PIIX_PATA_FLAGS,
@@ -543,6 +543,14 @@ static struct ata_port_info piix_port_info[] = {
543 .port_ops = &piix_sata_ops, 543 .port_ops = &piix_sata_ops,
544 }, 544 },
545 545
546 /* piix_pata_mwdma: 10: PIIX3 MWDMA only */
547 {
548 .sht = &piix_sht,
549 .flags = PIIX_PATA_FLAGS,
550 .pio_mask = 0x1f, /* pio0-4 */
551 .mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
552 .port_ops = &piix_pata_ops,
553 },
546}; 554};
547 555
548static struct pci_bits piix_enable_bits[] = { 556static struct pci_bits piix_enable_bits[] = {
@@ -569,6 +577,7 @@ struct ich_laptop {
569static const struct ich_laptop ich_laptop[] = { 577static const struct ich_laptop ich_laptop[] = {
570 /* devid, subvendor, subdev */ 578 /* devid, subvendor, subdev */
571 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ 579 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
580 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
572 /* end marker */ 581 /* end marker */
573 { 0, } 582 { 0, }
574}; 583};
@@ -632,7 +641,7 @@ static int piix_pata_prereset(struct ata_port *ap)
632 641
633 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) 642 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
634 return -ENOENT; 643 return -ENOENT;
635 644
636 ap->cbl = ATA_CBL_PATA40; 645 ap->cbl = ATA_CBL_PATA40;
637 return ata_std_prereset(ap); 646 return ata_std_prereset(ap);
638} 647}
@@ -776,7 +785,7 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i
776 u16 master_data; 785 u16 master_data;
777 u8 speed = adev->dma_mode; 786 u8 speed = adev->dma_mode;
778 int devid = adev->devno + 2 * ap->port_no; 787 int devid = adev->devno + 2 * ap->port_no;
779 u8 udma_enable; 788 u8 udma_enable = 0;
780 789
781 static const /* ISP RTC */ 790 static const /* ISP RTC */
782 u8 timings[][2] = { { 0, 0 }, 791 u8 timings[][2] = { { 0, 0 },
@@ -786,7 +795,8 @@ static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, i
786 { 2, 3 }, }; 795 { 2, 3 }, };
787 796
788 pci_read_config_word(dev, master_port, &master_data); 797 pci_read_config_word(dev, master_port, &master_data);
789 pci_read_config_byte(dev, 0x48, &udma_enable); 798 if (ap->udma_mask)
799 pci_read_config_byte(dev, 0x48, &udma_enable);
790 800
791 if (speed >= XFER_UDMA_0) { 801 if (speed >= XFER_UDMA_0) {
792 unsigned int udma = adev->dma_mode - XFER_UDMA_0; 802 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
@@ -1059,6 +1069,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
1059static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1069static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1060{ 1070{
1061 static int printed_version; 1071 static int printed_version;
1072 struct device *dev = &pdev->dev;
1062 struct ata_port_info port_info[2]; 1073 struct ata_port_info port_info[2];
1063 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; 1074 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
1064 struct piix_host_priv *hpriv; 1075 struct piix_host_priv *hpriv;
@@ -1072,7 +1083,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1072 if (!in_module_init) 1083 if (!in_module_init)
1073 return -ENODEV; 1084 return -ENODEV;
1074 1085
1075 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); 1086 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1076 if (!hpriv) 1087 if (!hpriv)
1077 return -ENOMEM; 1088 return -ENOMEM;
1078 1089
@@ -1122,15 +1133,6 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1122 return ata_pci_init_one(pdev, ppinfo, 2); 1133 return ata_pci_init_one(pdev, ppinfo, 2);
1123} 1134}
1124 1135
1125static void piix_host_stop(struct ata_host *host)
1126{
1127 struct piix_host_priv *hpriv = host->private_data;
1128
1129 ata_host_stop(host);
1130
1131 kfree(hpriv);
1132}
1133
1134static int __init piix_init(void) 1136static int __init piix_init(void)
1135{ 1137{
1136 int rc; 1138 int rc;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cf707029352e..25d8d3f778a1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,6 +59,9 @@
59 59
60#include "libata.h" 60#include "libata.h"
61 61
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
62/* debounce timing parameters in msecs { interval, duration, timeout } */ 65/* debounce timing parameters in msecs { interval, duration, timeout } */
63const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
@@ -598,51 +601,7 @@ void ata_dev_disable(struct ata_device *dev)
598} 601}
599 602
600/** 603/**
601 * ata_pio_devchk - PATA device presence detection 604 * ata_devchk - PATA device presence detection
602 * @ap: ATA channel to examine
603 * @device: Device to examine (starting at zero)
604 *
605 * This technique was originally described in
606 * Hale Landis's ATADRVR (www.ata-atapi.com), and
607 * later found its way into the ATA/ATAPI spec.
608 *
609 * Write a pattern to the ATA shadow registers,
610 * and if a device is present, it will respond by
611 * correctly storing and echoing back the
612 * ATA shadow register contents.
613 *
614 * LOCKING:
615 * caller.
616 */
617
618static unsigned int ata_pio_devchk(struct ata_port *ap,
619 unsigned int device)
620{
621 struct ata_ioports *ioaddr = &ap->ioaddr;
622 u8 nsect, lbal;
623
624 ap->ops->dev_select(ap, device);
625
626 outb(0x55, ioaddr->nsect_addr);
627 outb(0xaa, ioaddr->lbal_addr);
628
629 outb(0xaa, ioaddr->nsect_addr);
630 outb(0x55, ioaddr->lbal_addr);
631
632 outb(0x55, ioaddr->nsect_addr);
633 outb(0xaa, ioaddr->lbal_addr);
634
635 nsect = inb(ioaddr->nsect_addr);
636 lbal = inb(ioaddr->lbal_addr);
637
638 if ((nsect == 0x55) && (lbal == 0xaa))
639 return 1; /* we found a device */
640
641 return 0; /* nothing found */
642}
643
644/**
645 * ata_mmio_devchk - PATA device presence detection
646 * @ap: ATA channel to examine 605 * @ap: ATA channel to examine
647 * @device: Device to examine (starting at zero) 606 * @device: Device to examine (starting at zero)
648 * 607 *
@@ -659,25 +618,24 @@ static unsigned int ata_pio_devchk(struct ata_port *ap,
659 * caller. 618 * caller.
660 */ 619 */
661 620
662static unsigned int ata_mmio_devchk(struct ata_port *ap, 621static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
663 unsigned int device)
664{ 622{
665 struct ata_ioports *ioaddr = &ap->ioaddr; 623 struct ata_ioports *ioaddr = &ap->ioaddr;
666 u8 nsect, lbal; 624 u8 nsect, lbal;
667 625
668 ap->ops->dev_select(ap, device); 626 ap->ops->dev_select(ap, device);
669 627
670 writeb(0x55, (void __iomem *) ioaddr->nsect_addr); 628 iowrite8(0x55, ioaddr->nsect_addr);
671 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); 629 iowrite8(0xaa, ioaddr->lbal_addr);
672 630
673 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr); 631 iowrite8(0xaa, ioaddr->nsect_addr);
674 writeb(0x55, (void __iomem *) ioaddr->lbal_addr); 632 iowrite8(0x55, ioaddr->lbal_addr);
675 633
676 writeb(0x55, (void __iomem *) ioaddr->nsect_addr); 634 iowrite8(0x55, ioaddr->nsect_addr);
677 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); 635 iowrite8(0xaa, ioaddr->lbal_addr);
678 636
679 nsect = readb((void __iomem *) ioaddr->nsect_addr); 637 nsect = ioread8(ioaddr->nsect_addr);
680 lbal = readb((void __iomem *) ioaddr->lbal_addr); 638 lbal = ioread8(ioaddr->lbal_addr);
681 639
682 if ((nsect == 0x55) && (lbal == 0xaa)) 640 if ((nsect == 0x55) && (lbal == 0xaa))
683 return 1; /* we found a device */ 641 return 1; /* we found a device */
@@ -686,27 +644,6 @@ static unsigned int ata_mmio_devchk(struct ata_port *ap,
686} 644}
687 645
688/** 646/**
689 * ata_devchk - PATA device presence detection
690 * @ap: ATA channel to examine
691 * @device: Device to examine (starting at zero)
692 *
693 * Dispatch ATA device presence detection, depending
694 * on whether we are using PIO or MMIO to talk to the
695 * ATA shadow registers.
696 *
697 * LOCKING:
698 * caller.
699 */
700
701static unsigned int ata_devchk(struct ata_port *ap,
702 unsigned int device)
703{
704 if (ap->flags & ATA_FLAG_MMIO)
705 return ata_mmio_devchk(ap, device);
706 return ata_pio_devchk(ap, device);
707}
708
709/**
710 * ata_dev_classify - determine device type based on ATA-spec signature 647 * ata_dev_classify - determine device type based on ATA-spec signature
711 * @tf: ATA taskfile register set for device to be identified 648 * @tf: ATA taskfile register set for device to be identified
712 * 649 *
@@ -923,11 +860,7 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
923 else 860 else
924 tmp = ATA_DEVICE_OBS | ATA_DEV1; 861 tmp = ATA_DEVICE_OBS | ATA_DEV1;
925 862
926 if (ap->flags & ATA_FLAG_MMIO) { 863 iowrite8(tmp, ap->ioaddr.device_addr);
927 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
928 } else {
929 outb(tmp, ap->ioaddr.device_addr);
930 }
931 ata_pause(ap); /* needed; also flushes, for mmio */ 864 ata_pause(ap); /* needed; also flushes, for mmio */
932} 865}
933 866
@@ -1156,7 +1089,7 @@ void ata_port_flush_task(struct ata_port *ap)
1156 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); 1089 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1157} 1090}
1158 1091
1159void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1092static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1160{ 1093{
1161 struct completion *waiting = qc->private_data; 1094 struct completion *waiting = qc->private_data;
1162 1095
@@ -1249,7 +1182,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1249 buflen += sg[i].length; 1182 buflen += sg[i].length;
1250 1183
1251 ata_sg_init(qc, sg, n_elem); 1184 ata_sg_init(qc, sg, n_elem);
1252 qc->nsect = buflen / ATA_SECT_SIZE; 1185 qc->nbytes = buflen;
1253 } 1186 }
1254 1187
1255 qc->private_data = &wait; 1188 qc->private_data = &wait;
@@ -1291,7 +1224,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1291 if (ap->ops->post_internal_cmd) 1224 if (ap->ops->post_internal_cmd)
1292 ap->ops->post_internal_cmd(qc); 1225 ap->ops->post_internal_cmd(qc);
1293 1226
1294 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) { 1227 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
1295 if (ata_msg_warn(ap)) 1228 if (ata_msg_warn(ap))
1296 ata_dev_printk(dev, KERN_WARNING, 1229 ata_dev_printk(dev, KERN_WARNING,
1297 "zero err_mask for failed " 1230 "zero err_mask for failed "
@@ -1607,6 +1540,8 @@ int ata_dev_configure(struct ata_device *dev)
1607 const u16 *id = dev->id; 1540 const u16 *id = dev->id;
1608 unsigned int xfer_mask; 1541 unsigned int xfer_mask;
1609 char revbuf[7]; /* XYZ-99\0 */ 1542 char revbuf[7]; /* XYZ-99\0 */
1543 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1544 char modelbuf[ATA_ID_PROD_LEN+1];
1610 int rc; 1545 int rc;
1611 1546
1612 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 1547 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
@@ -1661,6 +1596,16 @@ int ata_dev_configure(struct ata_device *dev)
1661 1596
1662 dev->n_sectors = ata_id_n_sectors(id); 1597 dev->n_sectors = ata_id_n_sectors(id);
1663 1598
1599 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1600 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1601 sizeof(fwrevbuf));
1602
1603 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1604 sizeof(modelbuf));
1605
1606 if (dev->id[59] & 0x100)
1607 dev->multi_count = dev->id[59] & 0xff;
1608
1664 if (ata_id_has_lba(id)) { 1609 if (ata_id_has_lba(id)) {
1665 const char *lba_desc; 1610 const char *lba_desc;
1666 char ncq_desc[20]; 1611 char ncq_desc[20];
@@ -1680,13 +1625,16 @@ int ata_dev_configure(struct ata_device *dev)
1680 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 1625 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1681 1626
1682 /* print device info to dmesg */ 1627 /* print device info to dmesg */
1683 if (ata_msg_drv(ap) && print_info) 1628 if (ata_msg_drv(ap) && print_info) {
1684 ata_dev_printk(dev, KERN_INFO, "%s, " 1629 ata_dev_printk(dev, KERN_INFO,
1685 "max %s, %Lu sectors: %s %s\n", 1630 "%s: %s, %s, max %s\n",
1686 revbuf, 1631 revbuf, modelbuf, fwrevbuf,
1687 ata_mode_string(xfer_mask), 1632 ata_mode_string(xfer_mask));
1633 ata_dev_printk(dev, KERN_INFO,
1634 "%Lu sectors, multi %u: %s %s\n",
1688 (unsigned long long)dev->n_sectors, 1635 (unsigned long long)dev->n_sectors,
1689 lba_desc, ncq_desc); 1636 dev->multi_count, lba_desc, ncq_desc);
1637 }
1690 } else { 1638 } else {
1691 /* CHS */ 1639 /* CHS */
1692 1640
@@ -1703,22 +1651,17 @@ int ata_dev_configure(struct ata_device *dev)
1703 } 1651 }
1704 1652
1705 /* print device info to dmesg */ 1653 /* print device info to dmesg */
1706 if (ata_msg_drv(ap) && print_info) 1654 if (ata_msg_drv(ap) && print_info) {
1707 ata_dev_printk(dev, KERN_INFO, "%s, "
1708 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1709 revbuf,
1710 ata_mode_string(xfer_mask),
1711 (unsigned long long)dev->n_sectors,
1712 dev->cylinders, dev->heads,
1713 dev->sectors);
1714 }
1715
1716 if (dev->id[59] & 0x100) {
1717 dev->multi_count = dev->id[59] & 0xff;
1718 if (ata_msg_drv(ap) && print_info)
1719 ata_dev_printk(dev, KERN_INFO, 1655 ata_dev_printk(dev, KERN_INFO,
1720 "ata%u: dev %u multi count %u\n", 1656 "%s: %s, %s, max %s\n",
1721 ap->id, dev->devno, dev->multi_count); 1657 revbuf, modelbuf, fwrevbuf,
1658 ata_mode_string(xfer_mask));
1659 ata_dev_printk(dev, KERN_INFO,
1660 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1661 (unsigned long long)dev->n_sectors,
1662 dev->multi_count, dev->cylinders,
1663 dev->heads, dev->sectors);
1664 }
1722 } 1665 }
1723 1666
1724 dev->cdb_len = 16; 1667 dev->cdb_len = 16;
@@ -2390,6 +2333,10 @@ static int ata_dev_set_mode(struct ata_device *dev)
2390 dev->flags |= ATA_DFLAG_PIO; 2333 dev->flags |= ATA_DFLAG_PIO;
2391 2334
2392 err_mask = ata_dev_set_xfermode(dev); 2335 err_mask = ata_dev_set_xfermode(dev);
2336 /* Old CFA may refuse this command, which is just fine */
2337 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2338 err_mask &= ~AC_ERR_DEV;
2339
2393 if (err_mask) { 2340 if (err_mask) {
2394 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2341 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2395 "(err_mask=0x%x)\n", err_mask); 2342 "(err_mask=0x%x)\n", err_mask);
@@ -2492,7 +2439,7 @@ int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2492 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2439 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2493 dev = &ap->device[i]; 2440 dev = &ap->device[i];
2494 2441
2495 /* don't udpate suspended devices' xfer mode */ 2442 /* don't update suspended devices' xfer mode */
2496 if (!ata_dev_ready(dev)) 2443 if (!ata_dev_ready(dev))
2497 continue; 2444 continue;
2498 2445
@@ -2613,13 +2560,8 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2613 u8 nsect, lbal; 2560 u8 nsect, lbal;
2614 2561
2615 ap->ops->dev_select(ap, 1); 2562 ap->ops->dev_select(ap, 1);
2616 if (ap->flags & ATA_FLAG_MMIO) { 2563 nsect = ioread8(ioaddr->nsect_addr);
2617 nsect = readb((void __iomem *) ioaddr->nsect_addr); 2564 lbal = ioread8(ioaddr->lbal_addr);
2618 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2619 } else {
2620 nsect = inb(ioaddr->nsect_addr);
2621 lbal = inb(ioaddr->lbal_addr);
2622 }
2623 if ((nsect == 1) && (lbal == 1)) 2565 if ((nsect == 1) && (lbal == 1))
2624 break; 2566 break;
2625 if (time_after(jiffies, timeout)) { 2567 if (time_after(jiffies, timeout)) {
@@ -2647,19 +2589,11 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2647 DPRINTK("ata%u: bus reset via SRST\n", ap->id); 2589 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2648 2590
2649 /* software reset. causes dev0 to be selected */ 2591 /* software reset. causes dev0 to be selected */
2650 if (ap->flags & ATA_FLAG_MMIO) { 2592 iowrite8(ap->ctl, ioaddr->ctl_addr);
2651 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 2593 udelay(20); /* FIXME: flush */
2652 udelay(20); /* FIXME: flush */ 2594 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2653 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr); 2595 udelay(20); /* FIXME: flush */
2654 udelay(20); /* FIXME: flush */ 2596 iowrite8(ap->ctl, ioaddr->ctl_addr);
2655 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2656 } else {
2657 outb(ap->ctl, ioaddr->ctl_addr);
2658 udelay(10);
2659 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2660 udelay(10);
2661 outb(ap->ctl, ioaddr->ctl_addr);
2662 }
2663 2597
2664 /* spec mandates ">= 2ms" before checking status. 2598 /* spec mandates ">= 2ms" before checking status.
2665 * We wait 150ms, because that was the magic delay used for 2599 * We wait 150ms, because that was the magic delay used for
@@ -2744,8 +2678,7 @@ void ata_bus_reset(struct ata_port *ap)
2744 ap->device[1].class = ata_dev_try_classify(ap, 1, &err); 2678 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2745 2679
2746 /* re-enable interrupts */ 2680 /* re-enable interrupts */
2747 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2681 ap->ops->irq_on(ap);
2748 ata_irq_on(ap);
2749 2682
2750 /* is double-select really necessary? */ 2683 /* is double-select really necessary? */
2751 if (ap->device[1].class != ATA_DEV_NONE) 2684 if (ap->device[1].class != ATA_DEV_NONE)
@@ -2760,10 +2693,7 @@ void ata_bus_reset(struct ata_port *ap)
2760 2693
2761 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 2694 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2762 /* set up device control for ATA_FLAG_SATA_RESET */ 2695 /* set up device control for ATA_FLAG_SATA_RESET */
2763 if (ap->flags & ATA_FLAG_MMIO) 2696 iowrite8(ap->ctl, ioaddr->ctl_addr);
2764 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2765 else
2766 outb(ap->ctl, ioaddr->ctl_addr);
2767 } 2697 }
2768 2698
2769 DPRINTK("EXIT\n"); 2699 DPRINTK("EXIT\n");
@@ -3097,6 +3027,9 @@ int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3097 return 0; 3027 return 0;
3098 } 3028 }
3099 3029
3030 /* wait a while before checking status, see SRST for more info */
3031 msleep(150);
3032
3100 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 3033 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
3101 ata_port_printk(ap, KERN_ERR, 3034 ata_port_printk(ap, KERN_ERR,
3102 "COMRESET failed (device not ready)\n"); 3035 "COMRESET failed (device not ready)\n");
@@ -3137,11 +3070,8 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3137 sata_scr_write(ap, SCR_ERROR, serror); 3070 sata_scr_write(ap, SCR_ERROR, serror);
3138 3071
3139 /* re-enable interrupts */ 3072 /* re-enable interrupts */
3140 if (!ap->ops->error_handler) { 3073 if (!ap->ops->error_handler)
3141 /* FIXME: hack. create a hook instead */ 3074 ap->ops->irq_on(ap);
3142 if (ap->ioaddr.ctl_addr)
3143 ata_irq_on(ap);
3144 }
3145 3075
3146 /* is double-select really necessary? */ 3076 /* is double-select really necessary? */
3147 if (classes[0] != ATA_DEV_NONE) 3077 if (classes[0] != ATA_DEV_NONE)
@@ -3156,12 +3086,8 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3156 } 3086 }
3157 3087
3158 /* set up device control */ 3088 /* set up device control */
3159 if (ap->ioaddr.ctl_addr) { 3089 if (ap->ioaddr.ctl_addr)
3160 if (ap->flags & ATA_FLAG_MMIO) 3090 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3161 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
3162 else
3163 outb(ap->ctl, ap->ioaddr.ctl_addr);
3164 }
3165 3091
3166 DPRINTK("EXIT\n"); 3092 DPRINTK("EXIT\n");
3167} 3093}
@@ -3186,7 +3112,8 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3186 const u16 *new_id) 3112 const u16 *new_id)
3187{ 3113{
3188 const u16 *old_id = dev->id; 3114 const u16 *old_id = dev->id;
3189 unsigned char model[2][41], serial[2][21]; 3115 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3116 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3190 u64 new_n_sectors; 3117 u64 new_n_sectors;
3191 3118
3192 if (dev->class != new_class) { 3119 if (dev->class != new_class) {
@@ -3195,10 +3122,10 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3195 return 0; 3122 return 0;
3196 } 3123 }
3197 3124
3198 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0])); 3125 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3199 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1])); 3126 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3200 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0])); 3127 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3201 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1])); 3128 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3202 new_n_sectors = ata_id_n_sectors(new_id); 3129 new_n_sectors = ata_id_n_sectors(new_id);
3203 3130
3204 if (strcmp(model[0], model[1])) { 3131 if (strcmp(model[0], model[1])) {
@@ -3323,37 +3250,20 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
3323 { } 3250 { }
3324}; 3251};
3325 3252
3326static int ata_strim(char *s, size_t len)
3327{
3328 len = strnlen(s, len);
3329
3330 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3331 while ((len > 0) && (s[len - 1] == ' ')) {
3332 len--;
3333 s[len] = 0;
3334 }
3335 return len;
3336}
3337
3338unsigned long ata_device_blacklisted(const struct ata_device *dev) 3253unsigned long ata_device_blacklisted(const struct ata_device *dev)
3339{ 3254{
3340 unsigned char model_num[40]; 3255 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3341 unsigned char model_rev[16]; 3256 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3342 unsigned int nlen, rlen;
3343 const struct ata_blacklist_entry *ad = ata_device_blacklist; 3257 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3344 3258
3345 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 3259 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3346 sizeof(model_num)); 3260 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3347 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3348 sizeof(model_rev));
3349 nlen = ata_strim(model_num, sizeof(model_num));
3350 rlen = ata_strim(model_rev, sizeof(model_rev));
3351 3261
3352 while (ad->model_num) { 3262 while (ad->model_num) {
3353 if (!strncmp(ad->model_num, model_num, nlen)) { 3263 if (!strcmp(ad->model_num, model_num)) {
3354 if (ad->model_rev == NULL) 3264 if (ad->model_rev == NULL)
3355 return ad->horkage; 3265 return ad->horkage;
3356 if (!strncmp(ad->model_rev, model_rev, rlen)) 3266 if (!strcmp(ad->model_rev, model_rev))
3357 return ad->horkage; 3267 return ad->horkage;
3358 } 3268 }
3359 ad++; 3269 ad++;
@@ -3893,53 +3803,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
3893} 3803}
3894 3804
3895/** 3805/**
3896 * ata_mmio_data_xfer - Transfer data by MMIO 3806 * ata_data_xfer - Transfer data by PIO
3897 * @adev: device for this I/O
3898 * @buf: data buffer
3899 * @buflen: buffer length
3900 * @write_data: read/write
3901 *
3902 * Transfer data from/to the device data register by MMIO.
3903 *
3904 * LOCKING:
3905 * Inherited from caller.
3906 */
3907
3908void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3909 unsigned int buflen, int write_data)
3910{
3911 struct ata_port *ap = adev->ap;
3912 unsigned int i;
3913 unsigned int words = buflen >> 1;
3914 u16 *buf16 = (u16 *) buf;
3915 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3916
3917 /* Transfer multiple of 2 bytes */
3918 if (write_data) {
3919 for (i = 0; i < words; i++)
3920 writew(le16_to_cpu(buf16[i]), mmio);
3921 } else {
3922 for (i = 0; i < words; i++)
3923 buf16[i] = cpu_to_le16(readw(mmio));
3924 }
3925
3926 /* Transfer trailing 1 byte, if any. */
3927 if (unlikely(buflen & 0x01)) {
3928 u16 align_buf[1] = { 0 };
3929 unsigned char *trailing_buf = buf + buflen - 1;
3930
3931 if (write_data) {
3932 memcpy(align_buf, trailing_buf, 1);
3933 writew(le16_to_cpu(align_buf[0]), mmio);
3934 } else {
3935 align_buf[0] = cpu_to_le16(readw(mmio));
3936 memcpy(trailing_buf, align_buf, 1);
3937 }
3938 }
3939}
3940
3941/**
3942 * ata_pio_data_xfer - Transfer data by PIO
3943 * @adev: device to target 3807 * @adev: device to target
3944 * @buf: data buffer 3808 * @buf: data buffer
3945 * @buflen: buffer length 3809 * @buflen: buffer length
@@ -3950,18 +3814,17 @@ void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3950 * LOCKING: 3814 * LOCKING:
3951 * Inherited from caller. 3815 * Inherited from caller.
3952 */ 3816 */
3953 3817void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3954void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, 3818 unsigned int buflen, int write_data)
3955 unsigned int buflen, int write_data)
3956{ 3819{
3957 struct ata_port *ap = adev->ap; 3820 struct ata_port *ap = adev->ap;
3958 unsigned int words = buflen >> 1; 3821 unsigned int words = buflen >> 1;
3959 3822
3960 /* Transfer multiple of 2 bytes */ 3823 /* Transfer multiple of 2 bytes */
3961 if (write_data) 3824 if (write_data)
3962 outsw(ap->ioaddr.data_addr, buf, words); 3825 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
3963 else 3826 else
3964 insw(ap->ioaddr.data_addr, buf, words); 3827 ioread16_rep(ap->ioaddr.data_addr, buf, words);
3965 3828
3966 /* Transfer trailing 1 byte, if any. */ 3829 /* Transfer trailing 1 byte, if any. */
3967 if (unlikely(buflen & 0x01)) { 3830 if (unlikely(buflen & 0x01)) {
@@ -3970,16 +3833,16 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3970 3833
3971 if (write_data) { 3834 if (write_data) {
3972 memcpy(align_buf, trailing_buf, 1); 3835 memcpy(align_buf, trailing_buf, 1);
3973 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); 3836 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3974 } else { 3837 } else {
3975 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr)); 3838 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
3976 memcpy(trailing_buf, align_buf, 1); 3839 memcpy(trailing_buf, align_buf, 1);
3977 } 3840 }
3978 } 3841 }
3979} 3842}
3980 3843
3981/** 3844/**
3982 * ata_pio_data_xfer_noirq - Transfer data by PIO 3845 * ata_data_xfer_noirq - Transfer data by PIO
3983 * @adev: device to target 3846 * @adev: device to target
3984 * @buf: data buffer 3847 * @buf: data buffer
3985 * @buflen: buffer length 3848 * @buflen: buffer length
@@ -3991,13 +3854,12 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3991 * LOCKING: 3854 * LOCKING:
3992 * Inherited from caller. 3855 * Inherited from caller.
3993 */ 3856 */
3994 3857void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3995void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 3858 unsigned int buflen, int write_data)
3996 unsigned int buflen, int write_data)
3997{ 3859{
3998 unsigned long flags; 3860 unsigned long flags;
3999 local_irq_save(flags); 3861 local_irq_save(flags);
4000 ata_pio_data_xfer(adev, buf, buflen, write_data); 3862 ata_data_xfer(adev, buf, buflen, write_data);
4001 local_irq_restore(flags); 3863 local_irq_restore(flags);
4002} 3864}
4003 3865
@@ -4021,11 +3883,11 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
4021 unsigned int offset; 3883 unsigned int offset;
4022 unsigned char *buf; 3884 unsigned char *buf;
4023 3885
4024 if (qc->cursect == (qc->nsect - 1)) 3886 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
4025 ap->hsm_task_state = HSM_ST_LAST; 3887 ap->hsm_task_state = HSM_ST_LAST;
4026 3888
4027 page = sg[qc->cursg].page; 3889 page = sg[qc->cursg].page;
4028 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 3890 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4029 3891
4030 /* get the current page and offset */ 3892 /* get the current page and offset */
4031 page = nth_page(page, (offset >> PAGE_SHIFT)); 3893 page = nth_page(page, (offset >> PAGE_SHIFT));
@@ -4050,10 +3912,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
4050 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write); 3912 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
4051 } 3913 }
4052 3914
4053 qc->cursect++; 3915 qc->curbytes += ATA_SECT_SIZE;
4054 qc->cursg_ofs++; 3916 qc->cursg_ofs += ATA_SECT_SIZE;
4055 3917
4056 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) { 3918 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4057 qc->cursg++; 3919 qc->cursg++;
4058 qc->cursg_ofs = 0; 3920 qc->cursg_ofs = 0;
4059 } 3921 }
@@ -4078,7 +3940,8 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
4078 3940
4079 WARN_ON(qc->dev->multi_count == 0); 3941 WARN_ON(qc->dev->multi_count == 0);
4080 3942
4081 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count); 3943 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3944 qc->dev->multi_count);
4082 while (nsect--) 3945 while (nsect--)
4083 ata_pio_sector(qc); 3946 ata_pio_sector(qc);
4084 } else 3947 } else
@@ -4319,7 +4182,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4319 qc = ata_qc_from_tag(ap, qc->tag); 4182 qc = ata_qc_from_tag(ap, qc->tag);
4320 if (qc) { 4183 if (qc) {
4321 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 4184 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4322 ata_irq_on(ap); 4185 ap->ops->irq_on(ap);
4323 ata_qc_complete(qc); 4186 ata_qc_complete(qc);
4324 } else 4187 } else
4325 ata_port_freeze(ap); 4188 ata_port_freeze(ap);
@@ -4335,7 +4198,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4335 } else { 4198 } else {
4336 if (in_wq) { 4199 if (in_wq) {
4337 spin_lock_irqsave(ap->lock, flags); 4200 spin_lock_irqsave(ap->lock, flags);
4338 ata_irq_on(ap); 4201 ap->ops->irq_on(ap);
4339 ata_qc_complete(qc); 4202 ata_qc_complete(qc);
4340 spin_unlock_irqrestore(ap->lock, flags); 4203 spin_unlock_irqrestore(ap->lock, flags);
4341 } else 4204 } else
@@ -5160,7 +5023,7 @@ idle_irq:
5160 5023
5161#ifdef ATA_IRQ_TRAP 5024#ifdef ATA_IRQ_TRAP
5162 if ((ap->stats.idle_irq % 1000) == 0) { 5025 if ((ap->stats.idle_irq % 1000) == 0) {
5163 ata_irq_ack(ap, 0); /* debug trap */ 5026 ap->ops->irq_ack(ap, 0); /* debug trap */
5164 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 5027 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5165 return 1; 5028 return 1;
5166 } 5029 }
@@ -5501,54 +5364,25 @@ void ata_host_resume(struct ata_host *host)
5501 * LOCKING: 5364 * LOCKING:
5502 * Inherited from caller. 5365 * Inherited from caller.
5503 */ 5366 */
5504 5367int ata_port_start(struct ata_port *ap)
5505int ata_port_start (struct ata_port *ap)
5506{ 5368{
5507 struct device *dev = ap->dev; 5369 struct device *dev = ap->dev;
5508 int rc; 5370 int rc;
5509 5371
5510 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); 5372 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5373 GFP_KERNEL);
5511 if (!ap->prd) 5374 if (!ap->prd)
5512 return -ENOMEM; 5375 return -ENOMEM;
5513 5376
5514 rc = ata_pad_alloc(ap, dev); 5377 rc = ata_pad_alloc(ap, dev);
5515 if (rc) { 5378 if (rc)
5516 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5517 return rc; 5379 return rc;
5518 }
5519
5520 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5521 5380
5381 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5382 (unsigned long long)ap->prd_dma);
5522 return 0; 5383 return 0;
5523} 5384}
5524 5385
5525
5526/**
5527 * ata_port_stop - Undo ata_port_start()
5528 * @ap: Port to shut down
5529 *
5530 * Frees the PRD table.
5531 *
5532 * May be used as the port_stop() entry in ata_port_operations.
5533 *
5534 * LOCKING:
5535 * Inherited from caller.
5536 */
5537
5538void ata_port_stop (struct ata_port *ap)
5539{
5540 struct device *dev = ap->dev;
5541
5542 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5543 ata_pad_free(ap, dev);
5544}
5545
5546void ata_host_stop (struct ata_host *host)
5547{
5548 if (host->mmio_base)
5549 iounmap(host->mmio_base);
5550}
5551
5552/** 5386/**
5553 * ata_dev_init - Initialize an ata_device structure 5387 * ata_dev_init - Initialize an ata_device structure
5554 * @dev: Device structure to initialize 5388 * @dev: Device structure to initialize
@@ -5722,6 +5556,27 @@ static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5722 return ap; 5556 return ap;
5723} 5557}
5724 5558
5559static void ata_host_release(struct device *gendev, void *res)
5560{
5561 struct ata_host *host = dev_get_drvdata(gendev);
5562 int i;
5563
5564 for (i = 0; i < host->n_ports; i++) {
5565 struct ata_port *ap = host->ports[i];
5566
5567 if (!ap)
5568 continue;
5569
5570 if (ap->ops->port_stop)
5571 ap->ops->port_stop(ap);
5572
5573 scsi_host_put(ap->scsi_host);
5574 }
5575
5576 if (host->ops->host_stop)
5577 host->ops->host_stop(host);
5578}
5579
5725/** 5580/**
5726 * ata_sas_host_init - Initialize a host struct 5581 * ata_sas_host_init - Initialize a host struct
5727 * @host: host to initialize 5582 * @host: host to initialize
@@ -5769,22 +5624,28 @@ int ata_device_add(const struct ata_probe_ent *ent)
5769 int rc; 5624 int rc;
5770 5625
5771 DPRINTK("ENTER\n"); 5626 DPRINTK("ENTER\n");
5772 5627
5773 if (ent->irq == 0) { 5628 if (ent->irq == 0) {
5774 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n"); 5629 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5775 return 0; 5630 return 0;
5776 } 5631 }
5632
5633 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5634 return 0;
5635
5777 /* alloc a container for our list of ATA ports (buses) */ 5636 /* alloc a container for our list of ATA ports (buses) */
5778 host = kzalloc(sizeof(struct ata_host) + 5637 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5779 (ent->n_ports * sizeof(void *)), GFP_KERNEL); 5638 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5780 if (!host) 5639 if (!host)
5781 return 0; 5640 goto err_out;
5641 devres_add(dev, host);
5642 dev_set_drvdata(dev, host);
5782 5643
5783 ata_host_init(host, dev, ent->_host_flags, ent->port_ops); 5644 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5784 host->n_ports = ent->n_ports; 5645 host->n_ports = ent->n_ports;
5785 host->irq = ent->irq; 5646 host->irq = ent->irq;
5786 host->irq2 = ent->irq2; 5647 host->irq2 = ent->irq2;
5787 host->mmio_base = ent->mmio_base; 5648 host->iomap = ent->iomap;
5788 host->private_data = ent->private_data; 5649 host->private_data = ent->private_data;
5789 5650
5790 /* register each port bound to this device */ 5651 /* register each port bound to this device */
@@ -5822,8 +5683,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
5822 (ap->pio_mask << ATA_SHIFT_PIO); 5683 (ap->pio_mask << ATA_SHIFT_PIO);
5823 5684
5824 /* print per-port info to dmesg */ 5685 /* print per-port info to dmesg */
5825 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX " 5686 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5826 "ctl 0x%lX bmdma 0x%lX irq %d\n", 5687 "ctl 0x%p bmdma 0x%p irq %d\n",
5827 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 5688 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5828 ata_mode_string(xfer_mode_mask), 5689 ata_mode_string(xfer_mode_mask),
5829 ap->ioaddr.cmd_addr, 5690 ap->ioaddr.cmd_addr,
@@ -5836,8 +5697,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
5836 } 5697 }
5837 5698
5838 /* obtain irq, that may be shared between channels */ 5699 /* obtain irq, that may be shared between channels */
5839 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 5700 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5840 DRV_NAME, host); 5701 ent->irq_flags, DRV_NAME, host);
5841 if (rc) { 5702 if (rc) {
5842 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", 5703 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5843 ent->irq, rc); 5704 ent->irq, rc);
@@ -5850,15 +5711,19 @@ int ata_device_add(const struct ata_probe_ent *ent)
5850 so trap it now */ 5711 so trap it now */
5851 BUG_ON(ent->irq == ent->irq2); 5712 BUG_ON(ent->irq == ent->irq2);
5852 5713
5853 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags, 5714 rc = devm_request_irq(dev, ent->irq2,
5854 DRV_NAME, host); 5715 ent->port_ops->irq_handler, ent->irq_flags,
5716 DRV_NAME, host);
5855 if (rc) { 5717 if (rc) {
5856 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", 5718 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5857 ent->irq2, rc); 5719 ent->irq2, rc);
5858 goto err_out_free_irq; 5720 goto err_out;
5859 } 5721 }
5860 } 5722 }
5861 5723
5724 /* resource acquisition complete */
5725 devres_remove_group(dev, ata_device_add);
5726
5862 /* perform each probe synchronously */ 5727 /* perform each probe synchronously */
5863 DPRINTK("probe begin\n"); 5728 DPRINTK("probe begin\n");
5864 for (i = 0; i < host->n_ports; i++) { 5729 for (i = 0; i < host->n_ports; i++) {
@@ -5927,24 +5792,13 @@ int ata_device_add(const struct ata_probe_ent *ent)
5927 ata_scsi_scan_host(ap); 5792 ata_scsi_scan_host(ap);
5928 } 5793 }
5929 5794
5930 dev_set_drvdata(dev, host);
5931
5932 VPRINTK("EXIT, returning %u\n", ent->n_ports); 5795 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5933 return ent->n_ports; /* success */ 5796 return ent->n_ports; /* success */
5934 5797
5935err_out_free_irq: 5798 err_out:
5936 free_irq(ent->irq, host); 5799 devres_release_group(dev, ata_device_add);
5937err_out: 5800 dev_set_drvdata(dev, NULL);
5938 for (i = 0; i < host->n_ports; i++) { 5801 VPRINTK("EXIT, returning %d\n", rc);
5939 struct ata_port *ap = host->ports[i];
5940 if (ap) {
5941 ap->ops->port_stop(ap);
5942 scsi_host_put(ap->scsi_host);
5943 }
5944 }
5945
5946 kfree(host);
5947 VPRINTK("EXIT, returning 0\n");
5948 return 0; 5802 return 0;
5949} 5803}
5950 5804
@@ -6007,76 +5861,20 @@ void ata_port_detach(struct ata_port *ap)
6007} 5861}
6008 5862
6009/** 5863/**
6010 * ata_host_remove - PCI layer callback for device removal 5864 * ata_host_detach - Detach all ports of an ATA host
6011 * @host: ATA host set that was removed 5865 * @host: Host to detach
6012 * 5866 *
6013 * Unregister all objects associated with this host set. Free those 5867 * Detach all ports of @host.
6014 * objects.
6015 * 5868 *
6016 * LOCKING: 5869 * LOCKING:
6017 * Inherited from calling layer (may sleep). 5870 * Kernel thread context (may sleep).
6018 */ 5871 */
6019 5872void ata_host_detach(struct ata_host *host)
6020void ata_host_remove(struct ata_host *host)
6021{ 5873{
6022 unsigned int i; 5874 int i;
6023 5875
6024 for (i = 0; i < host->n_ports; i++) 5876 for (i = 0; i < host->n_ports; i++)
6025 ata_port_detach(host->ports[i]); 5877 ata_port_detach(host->ports[i]);
6026
6027 free_irq(host->irq, host);
6028 if (host->irq2)
6029 free_irq(host->irq2, host);
6030
6031 for (i = 0; i < host->n_ports; i++) {
6032 struct ata_port *ap = host->ports[i];
6033
6034 ata_scsi_release(ap->scsi_host);
6035
6036 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
6037 struct ata_ioports *ioaddr = &ap->ioaddr;
6038
6039 /* FIXME: Add -ac IDE pci mods to remove these special cases */
6040 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
6041 release_region(ATA_PRIMARY_CMD, 8);
6042 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
6043 release_region(ATA_SECONDARY_CMD, 8);
6044 }
6045
6046 scsi_host_put(ap->scsi_host);
6047 }
6048
6049 if (host->ops->host_stop)
6050 host->ops->host_stop(host);
6051
6052 kfree(host);
6053}
6054
6055/**
6056 * ata_scsi_release - SCSI layer callback hook for host unload
6057 * @shost: libata host to be unloaded
6058 *
6059 * Performs all duties necessary to shut down a libata port...
6060 * Kill port kthread, disable port, and release resources.
6061 *
6062 * LOCKING:
6063 * Inherited from SCSI layer.
6064 *
6065 * RETURNS:
6066 * One.
6067 */
6068
6069int ata_scsi_release(struct Scsi_Host *shost)
6070{
6071 struct ata_port *ap = ata_shost_to_port(shost);
6072
6073 DPRINTK("ENTER\n");
6074
6075 ap->ops->port_disable(ap);
6076 ap->ops->port_stop(ap);
6077
6078 DPRINTK("EXIT\n");
6079 return 1;
6080} 5878}
6081 5879
6082struct ata_probe_ent * 5880struct ata_probe_ent *
@@ -6084,7 +5882,11 @@ ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6084{ 5882{
6085 struct ata_probe_ent *probe_ent; 5883 struct ata_probe_ent *probe_ent;
6086 5884
6087 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 5885 /* XXX - the following if can go away once all LLDs are managed */
5886 if (!list_empty(&dev->devres_head))
5887 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5888 else
5889 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
6088 if (!probe_ent) { 5890 if (!probe_ent) {
6089 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 5891 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6090 kobject_name(&(dev->kobj))); 5892 kobject_name(&(dev->kobj)));
@@ -6134,37 +5936,23 @@ void ata_std_ports(struct ata_ioports *ioaddr)
6134 5936
6135#ifdef CONFIG_PCI 5937#ifdef CONFIG_PCI
6136 5938
6137void ata_pci_host_stop (struct ata_host *host)
6138{
6139 struct pci_dev *pdev = to_pci_dev(host->dev);
6140
6141 pci_iounmap(pdev, host->mmio_base);
6142}
6143
6144/** 5939/**
6145 * ata_pci_remove_one - PCI layer callback for device removal 5940 * ata_pci_remove_one - PCI layer callback for device removal
6146 * @pdev: PCI device that was removed 5941 * @pdev: PCI device that was removed
6147 * 5942 *
6148 * PCI layer indicates to libata via this hook that 5943 * PCI layer indicates to libata via this hook that hot-unplug or
6149 * hot-unplug or module unload event has occurred. 5944 * module unload event has occurred. Detach all ports. Resource
6150 * Handle this by unregistering all objects associated 5945 * release is handled via devres.
6151 * with this PCI device. Free those objects. Then finally
6152 * release PCI resources and disable device.
6153 * 5946 *
6154 * LOCKING: 5947 * LOCKING:
6155 * Inherited from PCI layer (may sleep). 5948 * Inherited from PCI layer (may sleep).
6156 */ 5949 */
6157 5950void ata_pci_remove_one(struct pci_dev *pdev)
6158void ata_pci_remove_one (struct pci_dev *pdev)
6159{ 5951{
6160 struct device *dev = pci_dev_to_dev(pdev); 5952 struct device *dev = pci_dev_to_dev(pdev);
6161 struct ata_host *host = dev_get_drvdata(dev); 5953 struct ata_host *host = dev_get_drvdata(dev);
6162 5954
6163 ata_host_remove(host); 5955 ata_host_detach(host);
6164
6165 pci_release_regions(pdev);
6166 pci_disable_device(pdev);
6167 dev_set_drvdata(dev, NULL);
6168} 5956}
6169 5957
6170/* move to PCI subsystem */ 5958/* move to PCI subsystem */
@@ -6211,12 +5999,22 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6211 } 5999 }
6212} 6000}
6213 6001
6214void ata_pci_device_do_resume(struct pci_dev *pdev) 6002int ata_pci_device_do_resume(struct pci_dev *pdev)
6215{ 6003{
6004 int rc;
6005
6216 pci_set_power_state(pdev, PCI_D0); 6006 pci_set_power_state(pdev, PCI_D0);
6217 pci_restore_state(pdev); 6007 pci_restore_state(pdev);
6218 pci_enable_device(pdev); 6008
6009 rc = pcim_enable_device(pdev);
6010 if (rc) {
6011 dev_printk(KERN_ERR, &pdev->dev,
6012 "failed to enable device after resume (%d)\n", rc);
6013 return rc;
6014 }
6015
6219 pci_set_master(pdev); 6016 pci_set_master(pdev);
6017 return 0;
6220} 6018}
6221 6019
6222int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6020int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
@@ -6236,10 +6034,12 @@ int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6236int ata_pci_device_resume(struct pci_dev *pdev) 6034int ata_pci_device_resume(struct pci_dev *pdev)
6237{ 6035{
6238 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6036 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6037 int rc;
6239 6038
6240 ata_pci_device_do_resume(pdev); 6039 rc = ata_pci_device_do_resume(pdev);
6241 ata_host_resume(host); 6040 if (rc == 0)
6242 return 0; 6041 ata_host_resume(host);
6042 return rc;
6243} 6043}
6244#endif /* CONFIG_PCI */ 6044#endif /* CONFIG_PCI */
6245 6045
@@ -6385,8 +6185,7 @@ EXPORT_SYMBOL_GPL(ata_std_bios_param);
6385EXPORT_SYMBOL_GPL(ata_std_ports); 6185EXPORT_SYMBOL_GPL(ata_std_ports);
6386EXPORT_SYMBOL_GPL(ata_host_init); 6186EXPORT_SYMBOL_GPL(ata_host_init);
6387EXPORT_SYMBOL_GPL(ata_device_add); 6187EXPORT_SYMBOL_GPL(ata_device_add);
6388EXPORT_SYMBOL_GPL(ata_port_detach); 6188EXPORT_SYMBOL_GPL(ata_host_detach);
6389EXPORT_SYMBOL_GPL(ata_host_remove);
6390EXPORT_SYMBOL_GPL(ata_sg_init); 6189EXPORT_SYMBOL_GPL(ata_sg_init);
6391EXPORT_SYMBOL_GPL(ata_sg_init_one); 6190EXPORT_SYMBOL_GPL(ata_sg_init_one);
6392EXPORT_SYMBOL_GPL(ata_hsm_move); 6191EXPORT_SYMBOL_GPL(ata_hsm_move);
@@ -6403,12 +6202,9 @@ EXPORT_SYMBOL_GPL(ata_check_status);
6403EXPORT_SYMBOL_GPL(ata_altstatus); 6202EXPORT_SYMBOL_GPL(ata_altstatus);
6404EXPORT_SYMBOL_GPL(ata_exec_command); 6203EXPORT_SYMBOL_GPL(ata_exec_command);
6405EXPORT_SYMBOL_GPL(ata_port_start); 6204EXPORT_SYMBOL_GPL(ata_port_start);
6406EXPORT_SYMBOL_GPL(ata_port_stop);
6407EXPORT_SYMBOL_GPL(ata_host_stop);
6408EXPORT_SYMBOL_GPL(ata_interrupt); 6205EXPORT_SYMBOL_GPL(ata_interrupt);
6409EXPORT_SYMBOL_GPL(ata_mmio_data_xfer); 6206EXPORT_SYMBOL_GPL(ata_data_xfer);
6410EXPORT_SYMBOL_GPL(ata_pio_data_xfer); 6207EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6411EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6412EXPORT_SYMBOL_GPL(ata_qc_prep); 6208EXPORT_SYMBOL_GPL(ata_qc_prep);
6413EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6209EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6414EXPORT_SYMBOL_GPL(ata_bmdma_setup); 6210EXPORT_SYMBOL_GPL(ata_bmdma_setup);
@@ -6445,7 +6241,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6445EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6241EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6446EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6242EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6447EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6243EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6448EXPORT_SYMBOL_GPL(ata_scsi_release);
6449EXPORT_SYMBOL_GPL(ata_host_intr); 6244EXPORT_SYMBOL_GPL(ata_host_intr);
6450EXPORT_SYMBOL_GPL(sata_scr_valid); 6245EXPORT_SYMBOL_GPL(sata_scr_valid);
6451EXPORT_SYMBOL_GPL(sata_scr_read); 6246EXPORT_SYMBOL_GPL(sata_scr_read);
@@ -6466,7 +6261,6 @@ EXPORT_SYMBOL_GPL(ata_timing_merge);
6466 6261
6467#ifdef CONFIG_PCI 6262#ifdef CONFIG_PCI
6468EXPORT_SYMBOL_GPL(pci_test_config_bits); 6263EXPORT_SYMBOL_GPL(pci_test_config_bits);
6469EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6470EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 6264EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6471EXPORT_SYMBOL_GPL(ata_pci_init_one); 6265EXPORT_SYMBOL_GPL(ata_pci_init_one);
6472EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6266EXPORT_SYMBOL_GPL(ata_pci_remove_one);
@@ -6490,3 +6284,7 @@ EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6490EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6284EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6491EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6285EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6492EXPORT_SYMBOL_GPL(ata_do_eh); 6286EXPORT_SYMBOL_GPL(ata_do_eh);
6287EXPORT_SYMBOL_GPL(ata_irq_on);
6288EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6289EXPORT_SYMBOL_GPL(ata_irq_ack);
6290EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 748435807d68..52c85af7fe99 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1443,15 +1443,10 @@ static void ata_eh_report(struct ata_port *ap)
1443 }; 1443 };
1444 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1444 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1445 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 1445 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
1446 unsigned int nbytes;
1447 1446
1448 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask) 1447 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1449 continue; 1448 continue;
1450 1449
1451 nbytes = qc->nbytes;
1452 if (!nbytes)
1453 nbytes = qc->nsect << 9;
1454
1455 ata_dev_printk(qc->dev, KERN_ERR, 1450 ata_dev_printk(qc->dev, KERN_ERR,
1456 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 1451 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
1457 "tag %d cdb 0x%x data %u %s\n " 1452 "tag %d cdb 0x%x data %u %s\n "
@@ -1461,7 +1456,7 @@ static void ata_eh_report(struct ata_port *ap)
1461 cmd->lbal, cmd->lbam, cmd->lbah, 1456 cmd->lbal, cmd->lbam, cmd->lbah,
1462 cmd->hob_feature, cmd->hob_nsect, 1457 cmd->hob_feature, cmd->hob_nsect,
1463 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 1458 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
1464 cmd->device, qc->tag, qc->cdb[0], nbytes, 1459 cmd->device, qc->tag, qc->cdb[0], qc->nbytes,
1465 dma_str[qc->dma_dir], 1460 dma_str[qc->dma_dir],
1466 res->command, res->feature, res->nsect, 1461 res->command, res->feature, res->nsect,
1467 res->lbal, res->lbam, res->lbah, 1462 res->lbal, res->lbam, res->lbah,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 73902d335767..0009818a4306 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -149,6 +149,45 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
149} 149}
150 150
151/** 151/**
152 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl
153 * @sdev: SCSI device to get identify data for
154 * @arg: User buffer area for identify data
155 *
156 * LOCKING:
157 * Defined by the SCSI layer. We don't really care.
158 *
159 * RETURNS:
160 * Zero on success, negative errno on error.
161 */
162static int ata_get_identity(struct scsi_device *sdev, void __user *arg)
163{
164 struct ata_port *ap = ata_shost_to_port(sdev->host);
165 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
166 u16 __user *dst = arg;
167 char buf[40];
168
169 if (!dev)
170 return -ENOMSG;
171
172 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
173 return -EFAULT;
174
175 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
176 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
177 return -EFAULT;
178
179 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
180 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
181 return -EFAULT;
182
183 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
184 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
185 return -EFAULT;
186
187 return 0;
188}
189
190/**
152 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl 191 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl
153 * @scsidev: Device to which we are issuing command 192 * @scsidev: Device to which we are issuing command
154 * @arg: User provided data for issuing command 193 * @arg: User provided data for issuing command
@@ -159,7 +198,6 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
159 * RETURNS: 198 * RETURNS:
160 * Zero on success, negative errno on error. 199 * Zero on success, negative errno on error.
161 */ 200 */
162
163int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) 201int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
164{ 202{
165 int rc = 0; 203 int rc = 0;
@@ -359,6 +397,9 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
359 return -EINVAL; 397 return -EINVAL;
360 return 0; 398 return 0;
361 399
400 case HDIO_GET_IDENTITY:
401 return ata_get_identity(scsidev, arg);
402
362 case HDIO_DRIVE_CMD: 403 case HDIO_DRIVE_CMD:
363 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 404 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
364 return -EACCES; 405 return -EACCES;
@@ -397,9 +438,9 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
397 * RETURNS: 438 * RETURNS:
398 * Command allocated, or %NULL if none available. 439 * Command allocated, or %NULL if none available.
399 */ 440 */
400struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 441static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
401 struct scsi_cmnd *cmd, 442 struct scsi_cmnd *cmd,
402 void (*done)(struct scsi_cmnd *)) 443 void (*done)(struct scsi_cmnd *))
403{ 444{
404 struct ata_queued_cmd *qc; 445 struct ata_queued_cmd *qc;
405 446
@@ -435,7 +476,7 @@ struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
435 * LOCKING: 476 * LOCKING:
436 * inherited from caller 477 * inherited from caller
437 */ 478 */
438void ata_dump_status(unsigned id, struct ata_taskfile *tf) 479static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
439{ 480{
440 u8 stat = tf->command, err = tf->feature; 481 u8 stat = tf->command, err = tf->feature;
441 482
@@ -610,8 +651,8 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
610 * LOCKING: 651 * LOCKING:
611 * spin_lock_irqsave(host lock) 652 * spin_lock_irqsave(host lock)
612 */ 653 */
613void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 654static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
614 u8 *ascq, int verbose) 655 u8 *asc, u8 *ascq, int verbose)
615{ 656{
616 int i; 657 int i;
617 658
@@ -1359,7 +1400,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1359 goto nothing_to_do; 1400 goto nothing_to_do;
1360 1401
1361 qc->flags |= ATA_QCFLAG_IO; 1402 qc->flags |= ATA_QCFLAG_IO;
1362 qc->nsect = n_block; 1403 qc->nbytes = n_block * ATA_SECT_SIZE;
1363 1404
1364 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, 1405 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1365 qc->tag); 1406 qc->tag);
@@ -1698,8 +1739,8 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
1698 1739
1699 if (buflen > 35) { 1740 if (buflen > 35) {
1700 memcpy(&rbuf[8], "ATA ", 8); 1741 memcpy(&rbuf[8], "ATA ", 8);
1701 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD_OFS, 16); 1742 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1702 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV_OFS, 4); 1743 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1703 if (rbuf[32] == 0 || rbuf[32] == ' ') 1744 if (rbuf[32] == 0 || rbuf[32] == ' ')
1704 memcpy(&rbuf[32], "n/a ", 4); 1745 memcpy(&rbuf[32], "n/a ", 4);
1705 } 1746 }
@@ -1768,13 +1809,13 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
1768 0, 1809 0,
1769 0x80, /* this page code */ 1810 0x80, /* this page code */
1770 0, 1811 0,
1771 ATA_SERNO_LEN, /* page len */ 1812 ATA_ID_SERNO_LEN, /* page len */
1772 }; 1813 };
1773 memcpy(rbuf, hdr, sizeof(hdr)); 1814 memcpy(rbuf, hdr, sizeof(hdr));
1774 1815
1775 if (buflen > (ATA_SERNO_LEN + 4 - 1)) 1816 if (buflen > (ATA_ID_SERNO_LEN + 4 - 1))
1776 ata_id_string(args->id, (unsigned char *) &rbuf[4], 1817 ata_id_string(args->id, (unsigned char *) &rbuf[4],
1777 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1818 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1778 1819
1779 return 0; 1820 return 0;
1780} 1821}
@@ -1799,19 +1840,18 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1799{ 1840{
1800 int num; 1841 int num;
1801 const int sat_model_serial_desc_len = 68; 1842 const int sat_model_serial_desc_len = 68;
1802 const int ata_model_byte_len = 40;
1803 1843
1804 rbuf[1] = 0x83; /* this page code */ 1844 rbuf[1] = 0x83; /* this page code */
1805 num = 4; 1845 num = 4;
1806 1846
1807 if (buflen > (ATA_SERNO_LEN + num + 3)) { 1847 if (buflen > (ATA_ID_SERNO_LEN + num + 3)) {
1808 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1848 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
1809 rbuf[num + 0] = 2; 1849 rbuf[num + 0] = 2;
1810 rbuf[num + 3] = ATA_SERNO_LEN; 1850 rbuf[num + 3] = ATA_ID_SERNO_LEN;
1811 num += 4; 1851 num += 4;
1812 ata_id_string(args->id, (unsigned char *) rbuf + num, 1852 ata_id_string(args->id, (unsigned char *) rbuf + num,
1813 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1853 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1814 num += ATA_SERNO_LEN; 1854 num += ATA_ID_SERNO_LEN;
1815 } 1855 }
1816 if (buflen > (sat_model_serial_desc_len + num + 3)) { 1856 if (buflen > (sat_model_serial_desc_len + num + 3)) {
1817 /* SAT defined lu model and serial numbers descriptor */ 1857 /* SAT defined lu model and serial numbers descriptor */
@@ -1823,11 +1863,11 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1823 memcpy(rbuf + num, "ATA ", 8); 1863 memcpy(rbuf + num, "ATA ", 8);
1824 num += 8; 1864 num += 8;
1825 ata_id_string(args->id, (unsigned char *) rbuf + num, 1865 ata_id_string(args->id, (unsigned char *) rbuf + num,
1826 ATA_ID_PROD_OFS, ata_model_byte_len); 1866 ATA_ID_PROD, ATA_ID_PROD_LEN);
1827 num += ata_model_byte_len; 1867 num += ATA_ID_PROD_LEN;
1828 ata_id_string(args->id, (unsigned char *) rbuf + num, 1868 ata_id_string(args->id, (unsigned char *) rbuf + num,
1829 ATA_ID_SERNO_OFS, ATA_SERNO_LEN); 1869 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
1830 num += ATA_SERNO_LEN; 1870 num += ATA_ID_SERNO_LEN;
1831 } 1871 }
1832 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 1872 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
1833 return 0; 1873 return 0;
@@ -1955,15 +1995,15 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
1955 */ 1995 */
1956static int ata_dev_supports_fua(u16 *id) 1996static int ata_dev_supports_fua(u16 *id)
1957{ 1997{
1958 unsigned char model[41], fw[9]; 1998 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
1959 1999
1960 if (!libata_fua) 2000 if (!libata_fua)
1961 return 0; 2001 return 0;
1962 if (!ata_id_has_fua(id)) 2002 if (!ata_id_has_fua(id))
1963 return 0; 2003 return 0;
1964 2004
1965 ata_id_c_string(id, model, ATA_ID_PROD_OFS, sizeof(model)); 2005 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
1966 ata_id_c_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw)); 2006 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
1967 2007
1968 if (strcmp(model, "Maxtor")) 2008 if (strcmp(model, "Maxtor"))
1969 return 1; 2009 return 1;
@@ -2661,7 +2701,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2661 * TODO: find out if we need to do more here to 2701 * TODO: find out if we need to do more here to
2662 * cover scatter/gather case. 2702 * cover scatter/gather case.
2663 */ 2703 */
2664 qc->nsect = scmd->request_bufflen / ATA_SECT_SIZE; 2704 qc->nbytes = scmd->request_bufflen;
2665 2705
2666 /* request result TF */ 2706 /* request result TF */
2667 qc->flags |= ATA_QCFLAG_RESULT_TF; 2707 qc->flags |= ATA_QCFLAG_RESULT_TF;
@@ -3059,7 +3099,8 @@ void ata_scsi_hotplug(struct work_struct *work)
3059 for (i = 0; i < ATA_MAX_DEVICES; i++) { 3099 for (i = 0; i < ATA_MAX_DEVICES; i++) {
3060 struct ata_device *dev = &ap->device[i]; 3100 struct ata_device *dev = &ap->device[i];
3061 if (ata_dev_enabled(dev) && !dev->sdev) { 3101 if (ata_dev_enabled(dev) && !dev->sdev) {
3062 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, HZ); 3102 queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
3103 round_jiffies_relative(HZ));
3063 break; 3104 break;
3064 } 3105 }
3065 } 3106 }
@@ -3264,7 +3305,8 @@ EXPORT_SYMBOL_GPL(ata_sas_port_init);
3264 3305
3265void ata_sas_port_destroy(struct ata_port *ap) 3306void ata_sas_port_destroy(struct ata_port *ap)
3266{ 3307{
3267 ap->ops->port_stop(ap); 3308 if (ap->ops->port_stop)
3309 ap->ops->port_stop(ap);
3268 kfree(ap); 3310 kfree(ap);
3269} 3311}
3270EXPORT_SYMBOL_GPL(ata_sas_port_destroy); 3312EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 12c88c588039..16bc3e35bdd4 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -56,10 +56,7 @@ u8 ata_irq_on(struct ata_port *ap)
56 ap->ctl &= ~ATA_NIEN; 56 ap->ctl &= ~ATA_NIEN;
57 ap->last_ctl = ap->ctl; 57 ap->last_ctl = ap->ctl;
58 58
59 if (ap->flags & ATA_FLAG_MMIO) 59 iowrite8(ap->ctl, ioaddr->ctl_addr);
60 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
61 else
62 outb(ap->ctl, ioaddr->ctl_addr);
63 tmp = ata_wait_idle(ap); 60 tmp = ata_wait_idle(ap);
64 61
65 ap->ops->irq_clear(ap); 62 ap->ops->irq_clear(ap);
@@ -67,92 +64,74 @@ u8 ata_irq_on(struct ata_port *ap)
67 return tmp; 64 return tmp;
68} 65}
69 66
67u8 ata_dummy_irq_on (struct ata_port *ap) { return 0; }
68
70/** 69/**
71 * ata_tf_load_pio - send taskfile registers to host controller 70 * ata_irq_ack - Acknowledge a device interrupt.
72 * @ap: Port to which output is sent 71 * @ap: Port on which interrupts are enabled.
73 * @tf: ATA taskfile register set
74 * 72 *
75 * Outputs ATA taskfile to standard ATA host controller. 73 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
74 * or BUSY+DRQ clear). Obtain dma status and port status from
75 * device. Clear the interrupt. Return port status.
76 * 76 *
77 * LOCKING: 77 * LOCKING:
78 * Inherited from caller.
79 */ 78 */
80 79
81static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) 80u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
82{ 81{
83 struct ata_ioports *ioaddr = &ap->ioaddr; 82 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
84 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 83 u8 host_stat, post_stat, status;
85 84
86 if (tf->ctl != ap->last_ctl) { 85 status = ata_busy_wait(ap, bits, 1000);
87 outb(tf->ctl, ioaddr->ctl_addr); 86 if (status & bits)
88 ap->last_ctl = tf->ctl; 87 if (ata_msg_err(ap))
89 ata_wait_idle(ap); 88 printk(KERN_ERR "abnormal status 0x%X\n", status);
90 }
91 89
92 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 90 /* get controller status; clear intr, err bits */
93 outb(tf->hob_feature, ioaddr->feature_addr); 91 host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
94 outb(tf->hob_nsect, ioaddr->nsect_addr); 92 iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
95 outb(tf->hob_lbal, ioaddr->lbal_addr); 93 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
96 outb(tf->hob_lbam, ioaddr->lbam_addr);
97 outb(tf->hob_lbah, ioaddr->lbah_addr);
98 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
99 tf->hob_feature,
100 tf->hob_nsect,
101 tf->hob_lbal,
102 tf->hob_lbam,
103 tf->hob_lbah);
104 }
105 94
106 if (is_addr) { 95 post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
107 outb(tf->feature, ioaddr->feature_addr);
108 outb(tf->nsect, ioaddr->nsect_addr);
109 outb(tf->lbal, ioaddr->lbal_addr);
110 outb(tf->lbam, ioaddr->lbam_addr);
111 outb(tf->lbah, ioaddr->lbah_addr);
112 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
113 tf->feature,
114 tf->nsect,
115 tf->lbal,
116 tf->lbam,
117 tf->lbah);
118 }
119 96
120 if (tf->flags & ATA_TFLAG_DEVICE) { 97 if (ata_msg_intr(ap))
121 outb(tf->device, ioaddr->device_addr); 98 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
122 VPRINTK("device 0x%X\n", tf->device); 99 __FUNCTION__,
123 } 100 host_stat, post_stat, status);
124 101
125 ata_wait_idle(ap); 102 return status;
126} 103}
127 104
105u8 ata_dummy_irq_ack(struct ata_port *ap, unsigned int chk_drq) { return 0; }
106
128/** 107/**
129 * ata_tf_load_mmio - send taskfile registers to host controller 108 * ata_tf_load - send taskfile registers to host controller
130 * @ap: Port to which output is sent 109 * @ap: Port to which output is sent
131 * @tf: ATA taskfile register set 110 * @tf: ATA taskfile register set
132 * 111 *
133 * Outputs ATA taskfile to standard ATA host controller using MMIO. 112 * Outputs ATA taskfile to standard ATA host controller.
134 * 113 *
135 * LOCKING: 114 * LOCKING:
136 * Inherited from caller. 115 * Inherited from caller.
137 */ 116 */
138 117
139static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 118void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
140{ 119{
141 struct ata_ioports *ioaddr = &ap->ioaddr; 120 struct ata_ioports *ioaddr = &ap->ioaddr;
142 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 121 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
143 122
144 if (tf->ctl != ap->last_ctl) { 123 if (tf->ctl != ap->last_ctl) {
145 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); 124 iowrite8(tf->ctl, ioaddr->ctl_addr);
146 ap->last_ctl = tf->ctl; 125 ap->last_ctl = tf->ctl;
147 ata_wait_idle(ap); 126 ata_wait_idle(ap);
148 } 127 }
149 128
150 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 129 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
151 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); 130 iowrite8(tf->hob_feature, ioaddr->feature_addr);
152 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); 131 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
153 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); 132 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
154 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); 133 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
155 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); 134 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
156 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 135 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
157 tf->hob_feature, 136 tf->hob_feature,
158 tf->hob_nsect, 137 tf->hob_nsect,
@@ -162,11 +141,11 @@ static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
162 } 141 }
163 142
164 if (is_addr) { 143 if (is_addr) {
165 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); 144 iowrite8(tf->feature, ioaddr->feature_addr);
166 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); 145 iowrite8(tf->nsect, ioaddr->nsect_addr);
167 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); 146 iowrite8(tf->lbal, ioaddr->lbal_addr);
168 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); 147 iowrite8(tf->lbam, ioaddr->lbam_addr);
169 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); 148 iowrite8(tf->lbah, ioaddr->lbah_addr);
170 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 149 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
171 tf->feature, 150 tf->feature,
172 tf->nsect, 151 tf->nsect,
@@ -176,108 +155,34 @@ static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
176 } 155 }
177 156
178 if (tf->flags & ATA_TFLAG_DEVICE) { 157 if (tf->flags & ATA_TFLAG_DEVICE) {
179 writeb(tf->device, (void __iomem *) ioaddr->device_addr); 158 iowrite8(tf->device, ioaddr->device_addr);
180 VPRINTK("device 0x%X\n", tf->device); 159 VPRINTK("device 0x%X\n", tf->device);
181 } 160 }
182 161
183 ata_wait_idle(ap); 162 ata_wait_idle(ap);
184} 163}
185 164
186
187/**
188 * ata_tf_load - send taskfile registers to host controller
189 * @ap: Port to which output is sent
190 * @tf: ATA taskfile register set
191 *
192 * Outputs ATA taskfile to standard ATA host controller using MMIO
193 * or PIO as indicated by the ATA_FLAG_MMIO flag.
194 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
195 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
196 * hob_lbal, hob_lbam, and hob_lbah.
197 *
198 * This function waits for idle (!BUSY and !DRQ) after writing
199 * registers. If the control register has a new value, this
200 * function also waits for idle after writing control and before
201 * writing the remaining registers.
202 *
203 * May be used as the tf_load() entry in ata_port_operations.
204 *
205 * LOCKING:
206 * Inherited from caller.
207 */
208void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
209{
210 if (ap->flags & ATA_FLAG_MMIO)
211 ata_tf_load_mmio(ap, tf);
212 else
213 ata_tf_load_pio(ap, tf);
214}
215
216/** 165/**
217 * ata_exec_command_pio - issue ATA command to host controller 166 * ata_exec_command - issue ATA command to host controller
218 * @ap: port to which command is being issued
219 * @tf: ATA taskfile register set
220 *
221 * Issues PIO write to ATA command register, with proper
222 * synchronization with interrupt handler / other threads.
223 *
224 * LOCKING:
225 * spin_lock_irqsave(host lock)
226 */
227
228static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
229{
230 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
231
232 outb(tf->command, ap->ioaddr.command_addr);
233 ata_pause(ap);
234}
235
236
237/**
238 * ata_exec_command_mmio - issue ATA command to host controller
239 * @ap: port to which command is being issued 167 * @ap: port to which command is being issued
240 * @tf: ATA taskfile register set 168 * @tf: ATA taskfile register set
241 * 169 *
242 * Issues MMIO write to ATA command register, with proper 170 * Issues ATA command, with proper synchronization with interrupt
243 * synchronization with interrupt handler / other threads. 171 * handler / other threads.
244 *
245 * FIXME: missing write posting for 400nS delay enforcement
246 * 172 *
247 * LOCKING: 173 * LOCKING:
248 * spin_lock_irqsave(host lock) 174 * spin_lock_irqsave(host lock)
249 */ 175 */
250 176void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
251static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
252{ 177{
253 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 178 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
254 179
255 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); 180 iowrite8(tf->command, ap->ioaddr.command_addr);
256 ata_pause(ap); 181 ata_pause(ap);
257} 182}
258 183
259
260/**
261 * ata_exec_command - issue ATA command to host controller
262 * @ap: port to which command is being issued
263 * @tf: ATA taskfile register set
264 *
265 * Issues PIO/MMIO write to ATA command register, with proper
266 * synchronization with interrupt handler / other threads.
267 *
268 * LOCKING:
269 * spin_lock_irqsave(host lock)
270 */
271void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
272{
273 if (ap->flags & ATA_FLAG_MMIO)
274 ata_exec_command_mmio(ap, tf);
275 else
276 ata_exec_command_pio(ap, tf);
277}
278
279/** 184/**
280 * ata_tf_read_pio - input device's ATA taskfile shadow registers 185 * ata_tf_read - input device's ATA taskfile shadow registers
281 * @ap: Port from which input is read 186 * @ap: Port from which input is read
282 * @tf: ATA taskfile register set for storing input 187 * @tf: ATA taskfile register set for storing input
283 * 188 *
@@ -287,121 +192,28 @@ void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
287 * LOCKING: 192 * LOCKING:
288 * Inherited from caller. 193 * Inherited from caller.
289 */ 194 */
290 195void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
291static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
292{
293 struct ata_ioports *ioaddr = &ap->ioaddr;
294
295 tf->command = ata_check_status(ap);
296 tf->feature = inb(ioaddr->error_addr);
297 tf->nsect = inb(ioaddr->nsect_addr);
298 tf->lbal = inb(ioaddr->lbal_addr);
299 tf->lbam = inb(ioaddr->lbam_addr);
300 tf->lbah = inb(ioaddr->lbah_addr);
301 tf->device = inb(ioaddr->device_addr);
302
303 if (tf->flags & ATA_TFLAG_LBA48) {
304 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
305 tf->hob_feature = inb(ioaddr->error_addr);
306 tf->hob_nsect = inb(ioaddr->nsect_addr);
307 tf->hob_lbal = inb(ioaddr->lbal_addr);
308 tf->hob_lbam = inb(ioaddr->lbam_addr);
309 tf->hob_lbah = inb(ioaddr->lbah_addr);
310 }
311}
312
313/**
314 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
315 * @ap: Port from which input is read
316 * @tf: ATA taskfile register set for storing input
317 *
318 * Reads ATA taskfile registers for currently-selected device
319 * into @tf via MMIO.
320 *
321 * LOCKING:
322 * Inherited from caller.
323 */
324
325static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
326{ 196{
327 struct ata_ioports *ioaddr = &ap->ioaddr; 197 struct ata_ioports *ioaddr = &ap->ioaddr;
328 198
329 tf->command = ata_check_status(ap); 199 tf->command = ata_check_status(ap);
330 tf->feature = readb((void __iomem *)ioaddr->error_addr); 200 tf->feature = ioread8(ioaddr->error_addr);
331 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); 201 tf->nsect = ioread8(ioaddr->nsect_addr);
332 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); 202 tf->lbal = ioread8(ioaddr->lbal_addr);
333 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); 203 tf->lbam = ioread8(ioaddr->lbam_addr);
334 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); 204 tf->lbah = ioread8(ioaddr->lbah_addr);
335 tf->device = readb((void __iomem *)ioaddr->device_addr); 205 tf->device = ioread8(ioaddr->device_addr);
336 206
337 if (tf->flags & ATA_TFLAG_LBA48) { 207 if (tf->flags & ATA_TFLAG_LBA48) {
338 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); 208 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
339 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); 209 tf->hob_feature = ioread8(ioaddr->error_addr);
340 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); 210 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
341 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); 211 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
342 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); 212 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
343 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); 213 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
344 } 214 }
345} 215}
346 216
347
348/**
349 * ata_tf_read - input device's ATA taskfile shadow registers
350 * @ap: Port from which input is read
351 * @tf: ATA taskfile register set for storing input
352 *
353 * Reads ATA taskfile registers for currently-selected device
354 * into @tf.
355 *
356 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
357 * is set, also reads the hob registers.
358 *
359 * May be used as the tf_read() entry in ata_port_operations.
360 *
361 * LOCKING:
362 * Inherited from caller.
363 */
364void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
365{
366 if (ap->flags & ATA_FLAG_MMIO)
367 ata_tf_read_mmio(ap, tf);
368 else
369 ata_tf_read_pio(ap, tf);
370}
371
372/**
373 * ata_check_status_pio - Read device status reg & clear interrupt
374 * @ap: port where the device is
375 *
376 * Reads ATA taskfile status register for currently-selected device
377 * and return its value. This also clears pending interrupts
378 * from this device
379 *
380 * LOCKING:
381 * Inherited from caller.
382 */
383static u8 ata_check_status_pio(struct ata_port *ap)
384{
385 return inb(ap->ioaddr.status_addr);
386}
387
388/**
389 * ata_check_status_mmio - Read device status reg & clear interrupt
390 * @ap: port where the device is
391 *
392 * Reads ATA taskfile status register for currently-selected device
393 * via MMIO and return its value. This also clears pending interrupts
394 * from this device
395 *
396 * LOCKING:
397 * Inherited from caller.
398 */
399static u8 ata_check_status_mmio(struct ata_port *ap)
400{
401 return readb((void __iomem *) ap->ioaddr.status_addr);
402}
403
404
405/** 217/**
406 * ata_check_status - Read device status reg & clear interrupt 218 * ata_check_status - Read device status reg & clear interrupt
407 * @ap: port where the device is 219 * @ap: port where the device is
@@ -410,19 +222,14 @@ static u8 ata_check_status_mmio(struct ata_port *ap)
410 * and return its value. This also clears pending interrupts 222 * and return its value. This also clears pending interrupts
411 * from this device 223 * from this device
412 * 224 *
413 * May be used as the check_status() entry in ata_port_operations.
414 *
415 * LOCKING: 225 * LOCKING:
416 * Inherited from caller. 226 * Inherited from caller.
417 */ 227 */
418u8 ata_check_status(struct ata_port *ap) 228u8 ata_check_status(struct ata_port *ap)
419{ 229{
420 if (ap->flags & ATA_FLAG_MMIO) 230 return ioread8(ap->ioaddr.status_addr);
421 return ata_check_status_mmio(ap);
422 return ata_check_status_pio(ap);
423} 231}
424 232
425
426/** 233/**
427 * ata_altstatus - Read device alternate status reg 234 * ata_altstatus - Read device alternate status reg
428 * @ap: port where the device is 235 * @ap: port where the device is
@@ -441,58 +248,52 @@ u8 ata_altstatus(struct ata_port *ap)
441 if (ap->ops->check_altstatus) 248 if (ap->ops->check_altstatus)
442 return ap->ops->check_altstatus(ap); 249 return ap->ops->check_altstatus(ap);
443 250
444 if (ap->flags & ATA_FLAG_MMIO) 251 return ioread8(ap->ioaddr.altstatus_addr);
445 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
446 return inb(ap->ioaddr.altstatus_addr);
447} 252}
448 253
449/** 254/**
450 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction 255 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
451 * @qc: Info associated with this ATA transaction. 256 * @qc: Info associated with this ATA transaction.
452 * 257 *
453 * LOCKING: 258 * LOCKING:
454 * spin_lock_irqsave(host lock) 259 * spin_lock_irqsave(host lock)
455 */ 260 */
456 261void ata_bmdma_setup(struct ata_queued_cmd *qc)
457static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
458{ 262{
459 struct ata_port *ap = qc->ap; 263 struct ata_port *ap = qc->ap;
460 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 264 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
461 u8 dmactl; 265 u8 dmactl;
462 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
463 266
464 /* load PRD table addr. */ 267 /* load PRD table addr. */
465 mb(); /* make sure PRD table writes are visible to controller */ 268 mb(); /* make sure PRD table writes are visible to controller */
466 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); 269 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
467 270
468 /* specify data direction, triple-check start bit is clear */ 271 /* specify data direction, triple-check start bit is clear */
469 dmactl = readb(mmio + ATA_DMA_CMD); 272 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
470 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 273 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
471 if (!rw) 274 if (!rw)
472 dmactl |= ATA_DMA_WR; 275 dmactl |= ATA_DMA_WR;
473 writeb(dmactl, mmio + ATA_DMA_CMD); 276 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
474 277
475 /* issue r/w command */ 278 /* issue r/w command */
476 ap->ops->exec_command(ap, &qc->tf); 279 ap->ops->exec_command(ap, &qc->tf);
477} 280}
478 281
479/** 282/**
480 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction 283 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
481 * @qc: Info associated with this ATA transaction. 284 * @qc: Info associated with this ATA transaction.
482 * 285 *
483 * LOCKING: 286 * LOCKING:
484 * spin_lock_irqsave(host lock) 287 * spin_lock_irqsave(host lock)
485 */ 288 */
486 289void ata_bmdma_start (struct ata_queued_cmd *qc)
487static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
488{ 290{
489 struct ata_port *ap = qc->ap; 291 struct ata_port *ap = qc->ap;
490 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
491 u8 dmactl; 292 u8 dmactl;
492 293
493 /* start host DMA transaction */ 294 /* start host DMA transaction */
494 dmactl = readb(mmio + ATA_DMA_CMD); 295 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
495 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); 296 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
496 297
497 /* Strictly, one may wish to issue a readb() here, to 298 /* Strictly, one may wish to issue a readb() here, to
498 * flush the mmio write. However, control also passes 299 * flush the mmio write. However, control also passes
@@ -508,96 +309,6 @@ static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
508} 309}
509 310
510/** 311/**
511 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
512 * @qc: Info associated with this ATA transaction.
513 *
514 * LOCKING:
515 * spin_lock_irqsave(host lock)
516 */
517
518static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
519{
520 struct ata_port *ap = qc->ap;
521 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
522 u8 dmactl;
523
524 /* load PRD table addr. */
525 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
526
527 /* specify data direction, triple-check start bit is clear */
528 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
529 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
530 if (!rw)
531 dmactl |= ATA_DMA_WR;
532 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
533
534 /* issue r/w command */
535 ap->ops->exec_command(ap, &qc->tf);
536}
537
538/**
539 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
540 * @qc: Info associated with this ATA transaction.
541 *
542 * LOCKING:
543 * spin_lock_irqsave(host lock)
544 */
545
546static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
547{
548 struct ata_port *ap = qc->ap;
549 u8 dmactl;
550
551 /* start host DMA transaction */
552 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
553 outb(dmactl | ATA_DMA_START,
554 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
555}
556
557
558/**
559 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
560 * @qc: Info associated with this ATA transaction.
561 *
562 * Writes the ATA_DMA_START flag to the DMA command register.
563 *
564 * May be used as the bmdma_start() entry in ata_port_operations.
565 *
566 * LOCKING:
567 * spin_lock_irqsave(host lock)
568 */
569void ata_bmdma_start(struct ata_queued_cmd *qc)
570{
571 if (qc->ap->flags & ATA_FLAG_MMIO)
572 ata_bmdma_start_mmio(qc);
573 else
574 ata_bmdma_start_pio(qc);
575}
576
577
578/**
579 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
580 * @qc: Info associated with this ATA transaction.
581 *
582 * Writes address of PRD table to device's PRD Table Address
583 * register, sets the DMA control register, and calls
584 * ops->exec_command() to start the transfer.
585 *
586 * May be used as the bmdma_setup() entry in ata_port_operations.
587 *
588 * LOCKING:
589 * spin_lock_irqsave(host lock)
590 */
591void ata_bmdma_setup(struct ata_queued_cmd *qc)
592{
593 if (qc->ap->flags & ATA_FLAG_MMIO)
594 ata_bmdma_setup_mmio(qc);
595 else
596 ata_bmdma_setup_pio(qc);
597}
598
599
600/**
601 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 312 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
602 * @ap: Port associated with this ATA transaction. 313 * @ap: Port associated with this ATA transaction.
603 * 314 *
@@ -608,23 +319,16 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
608 * LOCKING: 319 * LOCKING:
609 * spin_lock_irqsave(host lock) 320 * spin_lock_irqsave(host lock)
610 */ 321 */
611
612void ata_bmdma_irq_clear(struct ata_port *ap) 322void ata_bmdma_irq_clear(struct ata_port *ap)
613{ 323{
614 if (!ap->ioaddr.bmdma_addr) 324 void __iomem *mmio = ap->ioaddr.bmdma_addr;
325
326 if (!mmio)
615 return; 327 return;
616 328
617 if (ap->flags & ATA_FLAG_MMIO) { 329 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
618 void __iomem *mmio =
619 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
620 writeb(readb(mmio), mmio);
621 } else {
622 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
623 outb(inb(addr), addr);
624 }
625} 330}
626 331
627
628/** 332/**
629 * ata_bmdma_status - Read PCI IDE BMDMA status 333 * ata_bmdma_status - Read PCI IDE BMDMA status
630 * @ap: Port associated with this ATA transaction. 334 * @ap: Port associated with this ATA transaction.
@@ -636,19 +340,11 @@ void ata_bmdma_irq_clear(struct ata_port *ap)
636 * LOCKING: 340 * LOCKING:
637 * spin_lock_irqsave(host lock) 341 * spin_lock_irqsave(host lock)
638 */ 342 */
639
640u8 ata_bmdma_status(struct ata_port *ap) 343u8 ata_bmdma_status(struct ata_port *ap)
641{ 344{
642 u8 host_stat; 345 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
643 if (ap->flags & ATA_FLAG_MMIO) {
644 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
645 host_stat = readb(mmio + ATA_DMA_STATUS);
646 } else
647 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
648 return host_stat;
649} 346}
650 347
651
652/** 348/**
653 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 349 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
654 * @qc: Command we are ending DMA for 350 * @qc: Command we are ending DMA for
@@ -660,21 +356,14 @@ u8 ata_bmdma_status(struct ata_port *ap)
660 * LOCKING: 356 * LOCKING:
661 * spin_lock_irqsave(host lock) 357 * spin_lock_irqsave(host lock)
662 */ 358 */
663
664void ata_bmdma_stop(struct ata_queued_cmd *qc) 359void ata_bmdma_stop(struct ata_queued_cmd *qc)
665{ 360{
666 struct ata_port *ap = qc->ap; 361 struct ata_port *ap = qc->ap;
667 if (ap->flags & ATA_FLAG_MMIO) { 362 void __iomem *mmio = ap->ioaddr.bmdma_addr;
668 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
669 363
670 /* clear start/stop bit */ 364 /* clear start/stop bit */
671 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 365 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
672 mmio + ATA_DMA_CMD); 366 mmio + ATA_DMA_CMD);
673 } else {
674 /* clear start/stop bit */
675 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
676 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
677 }
678 367
679 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 368 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
680 ata_altstatus(ap); /* dummy read */ 369 ata_altstatus(ap); /* dummy read */
@@ -696,10 +385,7 @@ void ata_bmdma_freeze(struct ata_port *ap)
696 ap->ctl |= ATA_NIEN; 385 ap->ctl |= ATA_NIEN;
697 ap->last_ctl = ap->ctl; 386 ap->last_ctl = ap->ctl;
698 387
699 if (ap->flags & ATA_FLAG_MMIO) 388 iowrite8(ap->ctl, ioaddr->ctl_addr);
700 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
701 else
702 outb(ap->ctl, ioaddr->ctl_addr);
703 389
704 /* Under certain circumstances, some controllers raise IRQ on 390 /* Under certain circumstances, some controllers raise IRQ on
705 * ATA_NIEN manipulation. Also, many controllers fail to mask 391 * ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -724,8 +410,7 @@ void ata_bmdma_thaw(struct ata_port *ap)
724 /* clear & re-enable interrupts */ 410 /* clear & re-enable interrupts */
725 ata_chk_status(ap); 411 ata_chk_status(ap);
726 ap->ops->irq_clear(ap); 412 ap->ops->irq_clear(ap);
727 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 413 ap->ops->irq_on(ap);
728 ata_irq_on(ap);
729} 414}
730 415
731/** 416/**
@@ -775,7 +460,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
775 * really a timeout event, adjust error mask and 460 * really a timeout event, adjust error mask and
776 * cancel frozen state. 461 * cancel frozen state.
777 */ 462 */
778 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) { 463 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
779 qc->err_mask = AC_ERR_HOST_BUS; 464 qc->err_mask = AC_ERR_HOST_BUS;
780 thaw = 1; 465 thaw = 1;
781 } 466 }
@@ -832,6 +517,21 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
832} 517}
833 518
834#ifdef CONFIG_PCI 519#ifdef CONFIG_PCI
520
521static int ata_resources_present(struct pci_dev *pdev, int port)
522{
523 int i;
524
525 /* Check the PCI resources for this channel are enabled */
526 port = port * 2;
527 for (i = 0; i < 2; i ++) {
528 if (pci_resource_start(pdev, port + i) == 0 ||
529 pci_resource_len(pdev, port + i) == 0)
530 return 0;
531 }
532 return 1;
533}
534
835/** 535/**
836 * ata_pci_init_native_mode - Initialize native-mode driver 536 * ata_pci_init_native_mode - Initialize native-mode driver
837 * @pdev: pci device to be initialized 537 * @pdev: pci device to be initialized
@@ -853,45 +553,62 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
853struct ata_probe_ent * 553struct ata_probe_ent *
854ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) 554ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
855{ 555{
856 struct ata_probe_ent *probe_ent = 556 struct ata_probe_ent *probe_ent;
857 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 557 int i, p = 0;
858 int p = 0; 558 void __iomem * const *iomap;
859 unsigned long bmdma; 559
560 /* iomap BARs */
561 for (i = 0; i < 4; i++) {
562 if (pcim_iomap(pdev, i, 0) == NULL) {
563 dev_printk(KERN_ERR, &pdev->dev,
564 "failed to iomap PCI BAR %d\n", i);
565 return NULL;
566 }
567 }
860 568
569 pcim_iomap(pdev, 4, 0); /* may fail */
570 iomap = pcim_iomap_table(pdev);
571
572 /* alloc and init probe_ent */
573 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
861 if (!probe_ent) 574 if (!probe_ent)
862 return NULL; 575 return NULL;
863 576
864 probe_ent->irq = pdev->irq; 577 probe_ent->irq = pdev->irq;
865 probe_ent->irq_flags = IRQF_SHARED; 578 probe_ent->irq_flags = IRQF_SHARED;
579
580 /* Discard disabled ports. Some controllers show their
581 unused channels this way */
582 if (ata_resources_present(pdev, 0) == 0)
583 ports &= ~ATA_PORT_PRIMARY;
584 if (ata_resources_present(pdev, 1) == 0)
585 ports &= ~ATA_PORT_SECONDARY;
866 586
867 if (ports & ATA_PORT_PRIMARY) { 587 if (ports & ATA_PORT_PRIMARY) {
868 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); 588 probe_ent->port[p].cmd_addr = iomap[0];
869 probe_ent->port[p].altstatus_addr = 589 probe_ent->port[p].altstatus_addr =
870 probe_ent->port[p].ctl_addr = 590 probe_ent->port[p].ctl_addr = (void __iomem *)
871 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 591 ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS);
872 bmdma = pci_resource_start(pdev, 4); 592 if (iomap[4]) {
873 if (bmdma) {
874 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && 593 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
875 (inb(bmdma + 2) & 0x80)) 594 (ioread8(iomap[4] + 2) & 0x80))
876 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 595 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
877 probe_ent->port[p].bmdma_addr = bmdma; 596 probe_ent->port[p].bmdma_addr = iomap[4];
878 } 597 }
879 ata_std_ports(&probe_ent->port[p]); 598 ata_std_ports(&probe_ent->port[p]);
880 p++; 599 p++;
881 } 600 }
882 601
883 if (ports & ATA_PORT_SECONDARY) { 602 if (ports & ATA_PORT_SECONDARY) {
884 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); 603 probe_ent->port[p].cmd_addr = iomap[2];
885 probe_ent->port[p].altstatus_addr = 604 probe_ent->port[p].altstatus_addr =
886 probe_ent->port[p].ctl_addr = 605 probe_ent->port[p].ctl_addr = (void __iomem *)
887 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; 606 ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS);
888 bmdma = pci_resource_start(pdev, 4); 607 if (iomap[4]) {
889 if (bmdma) {
890 bmdma += 8;
891 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) && 608 if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
892 (inb(bmdma + 2) & 0x80)) 609 (ioread8(iomap[4] + 10) & 0x80))
893 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 610 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
894 probe_ent->port[p].bmdma_addr = bmdma; 611 probe_ent->port[p].bmdma_addr = iomap[4] + 8;
895 } 612 }
896 ata_std_ports(&probe_ent->port[p]); 613 ata_std_ports(&probe_ent->port[p]);
897 probe_ent->pinfo2 = port[1]; 614 probe_ent->pinfo2 = port[1];
@@ -902,13 +619,29 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
902 return probe_ent; 619 return probe_ent;
903} 620}
904 621
905
906static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, 622static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
907 struct ata_port_info **port, int port_mask) 623 struct ata_port_info **port, int port_mask)
908{ 624{
909 struct ata_probe_ent *probe_ent; 625 struct ata_probe_ent *probe_ent;
910 unsigned long bmdma = pci_resource_start(pdev, 4); 626 void __iomem *iomap[5] = { }, *bmdma;
627
628 if (port_mask & ATA_PORT_PRIMARY) {
629 iomap[0] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CMD, 8);
630 iomap[1] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CTL, 1);
631 if (!iomap[0] || !iomap[1])
632 return NULL;
633 }
634
635 if (port_mask & ATA_PORT_SECONDARY) {
636 iomap[2] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CMD, 8);
637 iomap[3] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CTL, 1);
638 if (!iomap[2] || !iomap[3])
639 return NULL;
640 }
911 641
642 bmdma = pcim_iomap(pdev, 4, 16); /* may fail */
643
644 /* alloc and init probe_ent */
912 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 645 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
913 if (!probe_ent) 646 if (!probe_ent)
914 return NULL; 647 return NULL;
@@ -918,13 +651,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
918 651
919 if (port_mask & ATA_PORT_PRIMARY) { 652 if (port_mask & ATA_PORT_PRIMARY) {
920 probe_ent->irq = ATA_PRIMARY_IRQ(pdev); 653 probe_ent->irq = ATA_PRIMARY_IRQ(pdev);
921 probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD; 654 probe_ent->port[0].cmd_addr = iomap[0];
922 probe_ent->port[0].altstatus_addr = 655 probe_ent->port[0].altstatus_addr =
923 probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL; 656 probe_ent->port[0].ctl_addr = iomap[1];
924 if (bmdma) { 657 if (bmdma) {
925 probe_ent->port[0].bmdma_addr = bmdma; 658 probe_ent->port[0].bmdma_addr = bmdma;
926 if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) && 659 if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
927 (inb(bmdma + 2) & 0x80)) 660 (ioread8(bmdma + 2) & 0x80))
928 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 661 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
929 } 662 }
930 ata_std_ports(&probe_ent->port[0]); 663 ata_std_ports(&probe_ent->port[0]);
@@ -936,13 +669,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
936 probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev); 669 probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev);
937 else 670 else
938 probe_ent->irq = ATA_SECONDARY_IRQ(pdev); 671 probe_ent->irq = ATA_SECONDARY_IRQ(pdev);
939 probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD; 672 probe_ent->port[1].cmd_addr = iomap[2];
940 probe_ent->port[1].altstatus_addr = 673 probe_ent->port[1].altstatus_addr =
941 probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL; 674 probe_ent->port[1].ctl_addr = iomap[3];
942 if (bmdma) { 675 if (bmdma) {
943 probe_ent->port[1].bmdma_addr = bmdma + 8; 676 probe_ent->port[1].bmdma_addr = bmdma + 8;
944 if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) && 677 if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
945 (inb(bmdma + 10) & 0x80)) 678 (ioread8(bmdma + 10) & 0x80))
946 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 679 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
947 } 680 }
948 ata_std_ports(&probe_ent->port[1]); 681 ata_std_ports(&probe_ent->port[1]);
@@ -984,15 +717,18 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
984int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 717int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
985 unsigned int n_ports) 718 unsigned int n_ports)
986{ 719{
720 struct device *dev = &pdev->dev;
987 struct ata_probe_ent *probe_ent = NULL; 721 struct ata_probe_ent *probe_ent = NULL;
988 struct ata_port_info *port[2]; 722 struct ata_port_info *port[2];
989 u8 mask; 723 u8 mask;
990 unsigned int legacy_mode = 0; 724 unsigned int legacy_mode = 0;
991 int disable_dev_on_err = 1;
992 int rc; 725 int rc;
993 726
994 DPRINTK("ENTER\n"); 727 DPRINTK("ENTER\n");
995 728
729 if (!devres_open_group(dev, NULL, GFP_KERNEL))
730 return -ENOMEM;
731
996 BUG_ON(n_ports < 1 || n_ports > 2); 732 BUG_ON(n_ports < 1 || n_ports > 2);
997 733
998 port[0] = port_info[0]; 734 port[0] = port_info[0];
@@ -1009,9 +745,9 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1009 boot for the primary video which is BIOS enabled 745 boot for the primary video which is BIOS enabled
1010 */ 746 */
1011 747
1012 rc = pci_enable_device(pdev); 748 rc = pcim_enable_device(pdev);
1013 if (rc) 749 if (rc)
1014 return rc; 750 goto err_out;
1015 751
1016 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 752 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
1017 u8 tmp8; 753 u8 tmp8;
@@ -1027,7 +763,8 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1027 left a device in compatibility mode */ 763 left a device in compatibility mode */
1028 if (legacy_mode) { 764 if (legacy_mode) {
1029 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 765 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
1030 return -EOPNOTSUPP; 766 rc = -EOPNOTSUPP;
767 goto err_out;
1031 } 768 }
1032#endif 769#endif
1033 } 770 }
@@ -1035,13 +772,13 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1035 if (!legacy_mode) { 772 if (!legacy_mode) {
1036 rc = pci_request_regions(pdev, DRV_NAME); 773 rc = pci_request_regions(pdev, DRV_NAME);
1037 if (rc) { 774 if (rc) {
1038 disable_dev_on_err = 0; 775 pcim_pin_device(pdev);
1039 goto err_out; 776 goto err_out;
1040 } 777 }
1041 } else { 778 } else {
1042 /* Deal with combined mode hack. This side of the logic all 779 /* Deal with combined mode hack. This side of the logic all
1043 goes away once the combined mode hack is killed in 2.6.21 */ 780 goes away once the combined mode hack is killed in 2.6.21 */
1044 if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) { 781 if (!devm_request_region(dev, ATA_PRIMARY_CMD, 8, "libata")) {
1045 struct resource *conflict, res; 782 struct resource *conflict, res;
1046 res.start = ATA_PRIMARY_CMD; 783 res.start = ATA_PRIMARY_CMD;
1047 res.end = ATA_PRIMARY_CMD + 8 - 1; 784 res.end = ATA_PRIMARY_CMD + 8 - 1;
@@ -1051,7 +788,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1051 if (!strcmp(conflict->name, "libata")) 788 if (!strcmp(conflict->name, "libata"))
1052 legacy_mode |= ATA_PORT_PRIMARY; 789 legacy_mode |= ATA_PORT_PRIMARY;
1053 else { 790 else {
1054 disable_dev_on_err = 0; 791 pcim_pin_device(pdev);
1055 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \ 792 printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \
1056 "ata: conflict with %s\n", 793 "ata: conflict with %s\n",
1057 ATA_PRIMARY_CMD, 794 ATA_PRIMARY_CMD,
@@ -1060,7 +797,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1060 } else 797 } else
1061 legacy_mode |= ATA_PORT_PRIMARY; 798 legacy_mode |= ATA_PORT_PRIMARY;
1062 799
1063 if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) { 800 if (!devm_request_region(dev, ATA_SECONDARY_CMD, 8, "libata")) {
1064 struct resource *conflict, res; 801 struct resource *conflict, res;
1065 res.start = ATA_SECONDARY_CMD; 802 res.start = ATA_SECONDARY_CMD;
1066 res.end = ATA_SECONDARY_CMD + 8 - 1; 803 res.end = ATA_SECONDARY_CMD + 8 - 1;
@@ -1070,7 +807,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1070 if (!strcmp(conflict->name, "libata")) 807 if (!strcmp(conflict->name, "libata"))
1071 legacy_mode |= ATA_PORT_SECONDARY; 808 legacy_mode |= ATA_PORT_SECONDARY;
1072 else { 809 else {
1073 disable_dev_on_err = 0; 810 pcim_pin_device(pdev);
1074 printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \ 811 printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \
1075 "ata: conflict with %s\n", 812 "ata: conflict with %s\n",
1076 ATA_SECONDARY_CMD, 813 ATA_SECONDARY_CMD,
@@ -1090,16 +827,16 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1090 /* we have legacy mode, but all ports are unavailable */ 827 /* we have legacy mode, but all ports are unavailable */
1091 if (legacy_mode == (1 << 3)) { 828 if (legacy_mode == (1 << 3)) {
1092 rc = -EBUSY; 829 rc = -EBUSY;
1093 goto err_out_regions; 830 goto err_out;
1094 } 831 }
1095 832
1096 /* TODO: If we get no DMA mask we should fall back to PIO */ 833 /* TODO: If we get no DMA mask we should fall back to PIO */
1097 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 834 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1098 if (rc) 835 if (rc)
1099 goto err_out_regions; 836 goto err_out;
1100 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 837 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1101 if (rc) 838 if (rc)
1102 goto err_out_regions; 839 goto err_out;
1103 840
1104 if (legacy_mode) { 841 if (legacy_mode) {
1105 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode); 842 probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode);
@@ -1111,40 +848,22 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1111 } 848 }
1112 if (!probe_ent) { 849 if (!probe_ent) {
1113 rc = -ENOMEM; 850 rc = -ENOMEM;
1114 goto err_out_regions; 851 goto err_out;
1115 } 852 }
1116 853
1117 pci_set_master(pdev); 854 pci_set_master(pdev);
1118 855
1119 if (!ata_device_add(probe_ent)) { 856 if (!ata_device_add(probe_ent)) {
1120 rc = -ENODEV; 857 rc = -ENODEV;
1121 goto err_out_ent; 858 goto err_out;
1122 } 859 }
1123 860
1124 kfree(probe_ent); 861 devm_kfree(dev, probe_ent);
1125 862 devres_remove_group(dev, NULL);
1126 return 0; 863 return 0;
1127 864
1128err_out_ent:
1129 kfree(probe_ent);
1130err_out_regions:
1131 /* All this conditional stuff is needed for the combined mode hack
1132 until 2.6.21 when it can go */
1133 if (legacy_mode) {
1134 pci_release_region(pdev, 4);
1135 if (legacy_mode & ATA_PORT_PRIMARY) {
1136 release_region(ATA_PRIMARY_CMD, 8);
1137 pci_release_region(pdev, 1);
1138 }
1139 if (legacy_mode & ATA_PORT_SECONDARY) {
1140 release_region(ATA_SECONDARY_CMD, 8);
1141 pci_release_region(pdev, 3);
1142 }
1143 } else
1144 pci_release_regions(pdev);
1145err_out: 865err_out:
1146 if (disable_dev_on_err) 866 devres_release_group(dev, NULL);
1147 pci_disable_device(pdev);
1148 return rc; 867 return rc;
1149} 868}
1150 869
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 81ae41d5f23f..06ccf230e3c2 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -29,7 +29,6 @@
29#define __LIBATA_H__ 29#define __LIBATA_H__
30 30
31#define DRV_NAME "libata" 31#define DRV_NAME "libata"
32#define DRV_VERSION "2.00" /* must be exactly four chars */
33 32
34struct ata_scsi_args { 33struct ata_scsi_args {
35 struct ata_device *dev; 34 struct ata_device *dev;
@@ -136,4 +135,7 @@ extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
136/* libata-sff.c */ 135/* libata-sff.c */
137extern u8 ata_irq_on(struct ata_port *ap); 136extern u8 ata_irq_on(struct ata_port *ap);
138 137
138/* pata_sis.c */
139extern struct ata_port_info sis_info133;
140
139#endif /* __LIBATA_H__ */ 141#endif /* __LIBATA_H__ */
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index c5d61d1911a5..ab44d18850f6 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -153,11 +153,11 @@ static void ali_early_error_handler(struct ata_port *ap)
153 153
154static unsigned long ali_20_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) 154static unsigned long ali_20_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
155{ 155{
156 char model_num[40]; 156 char model_num[ATA_ID_PROD_LEN + 1];
157 /* No DMA on anything but a disk for now */ 157 /* No DMA on anything but a disk for now */
158 if (adev->class != ATA_DEV_ATA) 158 if (adev->class != ATA_DEV_ATA)
159 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 159 mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
160 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); 160 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
161 if (strstr(model_num, "WDC")) 161 if (strstr(model_num, "WDC"))
162 return mask &= ~ATA_MASK_UDMA; 162 return mask &= ~ATA_MASK_UDMA;
163 return ata_pci_default_filter(ap, adev, mask); 163 return ata_pci_default_filter(ap, adev, mask);
@@ -370,14 +370,14 @@ static struct ata_port_operations ali_early_port_ops = {
370 .qc_prep = ata_qc_prep, 370 .qc_prep = ata_qc_prep,
371 .qc_issue = ata_qc_issue_prot, 371 .qc_issue = ata_qc_issue_prot,
372 372
373 .data_xfer = ata_pio_data_xfer, 373 .data_xfer = ata_data_xfer,
374 374
375 .irq_handler = ata_interrupt, 375 .irq_handler = ata_interrupt,
376 .irq_clear = ata_bmdma_irq_clear, 376 .irq_clear = ata_bmdma_irq_clear,
377 .irq_on = ata_irq_on,
378 .irq_ack = ata_irq_ack,
377 379
378 .port_start = ata_port_start, 380 .port_start = ata_port_start,
379 .port_stop = ata_port_stop,
380 .host_stop = ata_host_stop
381}; 381};
382 382
383/* 383/*
@@ -411,14 +411,14 @@ static struct ata_port_operations ali_20_port_ops = {
411 .qc_prep = ata_qc_prep, 411 .qc_prep = ata_qc_prep,
412 .qc_issue = ata_qc_issue_prot, 412 .qc_issue = ata_qc_issue_prot,
413 413
414 .data_xfer = ata_pio_data_xfer, 414 .data_xfer = ata_data_xfer,
415 415
416 .irq_handler = ata_interrupt, 416 .irq_handler = ata_interrupt,
417 .irq_clear = ata_bmdma_irq_clear, 417 .irq_clear = ata_bmdma_irq_clear,
418 .irq_on = ata_irq_on,
419 .irq_ack = ata_irq_ack,
418 420
419 .port_start = ata_port_start, 421 .port_start = ata_port_start,
420 .port_stop = ata_port_stop,
421 .host_stop = ata_host_stop
422}; 422};
423 423
424/* 424/*
@@ -449,14 +449,14 @@ static struct ata_port_operations ali_c2_port_ops = {
449 .qc_prep = ata_qc_prep, 449 .qc_prep = ata_qc_prep,
450 .qc_issue = ata_qc_issue_prot, 450 .qc_issue = ata_qc_issue_prot,
451 451
452 .data_xfer = ata_pio_data_xfer, 452 .data_xfer = ata_data_xfer,
453 453
454 .irq_handler = ata_interrupt, 454 .irq_handler = ata_interrupt,
455 .irq_clear = ata_bmdma_irq_clear, 455 .irq_clear = ata_bmdma_irq_clear,
456 .irq_on = ata_irq_on,
457 .irq_ack = ata_irq_ack,
456 458
457 .port_start = ata_port_start, 459 .port_start = ata_port_start,
458 .port_stop = ata_port_stop,
459 .host_stop = ata_host_stop
460}; 460};
461 461
462/* 462/*
@@ -486,14 +486,14 @@ static struct ata_port_operations ali_c5_port_ops = {
486 .qc_prep = ata_qc_prep, 486 .qc_prep = ata_qc_prep,
487 .qc_issue = ata_qc_issue_prot, 487 .qc_issue = ata_qc_issue_prot,
488 488
489 .data_xfer = ata_pio_data_xfer, 489 .data_xfer = ata_data_xfer,
490 490
491 .irq_handler = ata_interrupt, 491 .irq_handler = ata_interrupt,
492 .irq_clear = ata_bmdma_irq_clear, 492 .irq_clear = ata_bmdma_irq_clear,
493 .irq_on = ata_irq_on,
494 .irq_ack = ata_irq_ack,
493 495
494 .port_start = ata_port_start, 496 .port_start = ata_port_start,
495 .port_stop = ata_port_stop,
496 .host_stop = ata_host_stop
497}; 497};
498 498
499 499
@@ -504,7 +504,7 @@ static struct ata_port_operations ali_c5_port_ops = {
504 * Perform the setup on the device that must be done both at boot 504 * Perform the setup on the device that must be done both at boot
505 * and at resume time. 505 * and at resume time.
506 */ 506 */
507 507
508static void ali_init_chipset(struct pci_dev *pdev) 508static void ali_init_chipset(struct pci_dev *pdev)
509{ 509{
510 u8 rev, tmp; 510 u8 rev, tmp;
@@ -655,7 +655,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
655 port_info[0] = port_info[1] = &info_c5; 655 port_info[0] = port_info[1] = &info_c5;
656 656
657 ali_init_chipset(pdev); 657 ali_init_chipset(pdev);
658 658
659 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 659 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
660 if (isa_bridge && rev >= 0x20 && rev < 0xC2) { 660 if (isa_bridge && rev >= 0x20 && rev < 0xC2) {
661 /* Are we paired with a UDMA capable chip */ 661 /* Are we paired with a UDMA capable chip */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index a6b330089f22..619e44b04032 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -362,14 +362,14 @@ static struct ata_port_operations amd33_port_ops = {
362 .qc_prep = ata_qc_prep, 362 .qc_prep = ata_qc_prep,
363 .qc_issue = ata_qc_issue_prot, 363 .qc_issue = ata_qc_issue_prot,
364 364
365 .data_xfer = ata_pio_data_xfer, 365 .data_xfer = ata_data_xfer,
366 366
367 .irq_handler = ata_interrupt, 367 .irq_handler = ata_interrupt,
368 .irq_clear = ata_bmdma_irq_clear, 368 .irq_clear = ata_bmdma_irq_clear,
369 .irq_on = ata_irq_on,
370 .irq_ack = ata_irq_ack,
369 371
370 .port_start = ata_port_start, 372 .port_start = ata_port_start,
371 .port_stop = ata_port_stop,
372 .host_stop = ata_host_stop
373}; 373};
374 374
375static struct ata_port_operations amd66_port_ops = { 375static struct ata_port_operations amd66_port_ops = {
@@ -396,14 +396,14 @@ static struct ata_port_operations amd66_port_ops = {
396 .qc_prep = ata_qc_prep, 396 .qc_prep = ata_qc_prep,
397 .qc_issue = ata_qc_issue_prot, 397 .qc_issue = ata_qc_issue_prot,
398 398
399 .data_xfer = ata_pio_data_xfer, 399 .data_xfer = ata_data_xfer,
400 400
401 .irq_handler = ata_interrupt, 401 .irq_handler = ata_interrupt,
402 .irq_clear = ata_bmdma_irq_clear, 402 .irq_clear = ata_bmdma_irq_clear,
403 .irq_on = ata_irq_on,
404 .irq_ack = ata_irq_ack,
403 405
404 .port_start = ata_port_start, 406 .port_start = ata_port_start,
405 .port_stop = ata_port_stop,
406 .host_stop = ata_host_stop
407}; 407};
408 408
409static struct ata_port_operations amd100_port_ops = { 409static struct ata_port_operations amd100_port_ops = {
@@ -430,14 +430,14 @@ static struct ata_port_operations amd100_port_ops = {
430 .qc_prep = ata_qc_prep, 430 .qc_prep = ata_qc_prep,
431 .qc_issue = ata_qc_issue_prot, 431 .qc_issue = ata_qc_issue_prot,
432 432
433 .data_xfer = ata_pio_data_xfer, 433 .data_xfer = ata_data_xfer,
434 434
435 .irq_handler = ata_interrupt, 435 .irq_handler = ata_interrupt,
436 .irq_clear = ata_bmdma_irq_clear, 436 .irq_clear = ata_bmdma_irq_clear,
437 .irq_on = ata_irq_on,
438 .irq_ack = ata_irq_ack,
437 439
438 .port_start = ata_port_start, 440 .port_start = ata_port_start,
439 .port_stop = ata_port_stop,
440 .host_stop = ata_host_stop
441}; 441};
442 442
443static struct ata_port_operations amd133_port_ops = { 443static struct ata_port_operations amd133_port_ops = {
@@ -464,14 +464,14 @@ static struct ata_port_operations amd133_port_ops = {
464 .qc_prep = ata_qc_prep, 464 .qc_prep = ata_qc_prep,
465 .qc_issue = ata_qc_issue_prot, 465 .qc_issue = ata_qc_issue_prot,
466 466
467 .data_xfer = ata_pio_data_xfer, 467 .data_xfer = ata_data_xfer,
468 468
469 .irq_handler = ata_interrupt, 469 .irq_handler = ata_interrupt,
470 .irq_clear = ata_bmdma_irq_clear, 470 .irq_clear = ata_bmdma_irq_clear,
471 .irq_on = ata_irq_on,
472 .irq_ack = ata_irq_ack,
471 473
472 .port_start = ata_port_start, 474 .port_start = ata_port_start,
473 .port_stop = ata_port_stop,
474 .host_stop = ata_host_stop
475}; 475};
476 476
477static struct ata_port_operations nv100_port_ops = { 477static struct ata_port_operations nv100_port_ops = {
@@ -498,14 +498,14 @@ static struct ata_port_operations nv100_port_ops = {
498 .qc_prep = ata_qc_prep, 498 .qc_prep = ata_qc_prep,
499 .qc_issue = ata_qc_issue_prot, 499 .qc_issue = ata_qc_issue_prot,
500 500
501 .data_xfer = ata_pio_data_xfer, 501 .data_xfer = ata_data_xfer,
502 502
503 .irq_handler = ata_interrupt, 503 .irq_handler = ata_interrupt,
504 .irq_clear = ata_bmdma_irq_clear, 504 .irq_clear = ata_bmdma_irq_clear,
505 .irq_on = ata_irq_on,
506 .irq_ack = ata_irq_ack,
505 507
506 .port_start = ata_port_start, 508 .port_start = ata_port_start,
507 .port_stop = ata_port_stop,
508 .host_stop = ata_host_stop
509}; 509};
510 510
511static struct ata_port_operations nv133_port_ops = { 511static struct ata_port_operations nv133_port_ops = {
@@ -532,14 +532,14 @@ static struct ata_port_operations nv133_port_ops = {
532 .qc_prep = ata_qc_prep, 532 .qc_prep = ata_qc_prep,
533 .qc_issue = ata_qc_issue_prot, 533 .qc_issue = ata_qc_issue_prot,
534 534
535 .data_xfer = ata_pio_data_xfer, 535 .data_xfer = ata_data_xfer,
536 536
537 .irq_handler = ata_interrupt, 537 .irq_handler = ata_interrupt,
538 .irq_clear = ata_bmdma_irq_clear, 538 .irq_clear = ata_bmdma_irq_clear,
539 .irq_on = ata_irq_on,
540 .irq_ack = ata_irq_ack,
539 541
540 .port_start = ata_port_start, 542 .port_start = ata_port_start,
541 .port_stop = ata_port_stop,
542 .host_stop = ata_host_stop
543}; 543};
544 544
545static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 545static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 37bc1323bda7..21c30282717c 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -341,14 +341,14 @@ static const struct ata_port_operations artop6210_ops = {
341 .qc_prep = ata_qc_prep, 341 .qc_prep = ata_qc_prep,
342 .qc_issue = ata_qc_issue_prot, 342 .qc_issue = ata_qc_issue_prot,
343 343
344 .data_xfer = ata_pio_data_xfer, 344 .data_xfer = ata_data_xfer,
345 345
346 .irq_handler = ata_interrupt, 346 .irq_handler = ata_interrupt,
347 .irq_clear = ata_bmdma_irq_clear, 347 .irq_clear = ata_bmdma_irq_clear,
348 .irq_on = ata_irq_on,
349 .irq_ack = ata_irq_ack,
348 350
349 .port_start = ata_port_start, 351 .port_start = ata_port_start,
350 .port_stop = ata_port_stop,
351 .host_stop = ata_host_stop,
352}; 352};
353 353
354static const struct ata_port_operations artop6260_ops = { 354static const struct ata_port_operations artop6260_ops = {
@@ -373,14 +373,14 @@ static const struct ata_port_operations artop6260_ops = {
373 .bmdma_status = ata_bmdma_status, 373 .bmdma_status = ata_bmdma_status,
374 .qc_prep = ata_qc_prep, 374 .qc_prep = ata_qc_prep,
375 .qc_issue = ata_qc_issue_prot, 375 .qc_issue = ata_qc_issue_prot,
376 .data_xfer = ata_pio_data_xfer, 376 .data_xfer = ata_data_xfer,
377 377
378 .irq_handler = ata_interrupt, 378 .irq_handler = ata_interrupt,
379 .irq_clear = ata_bmdma_irq_clear, 379 .irq_clear = ata_bmdma_irq_clear,
380 .irq_on = ata_irq_on,
381 .irq_ack = ata_irq_ack,
380 382
381 .port_start = ata_port_start, 383 .port_start = ata_port_start,
382 .port_stop = ata_port_stop,
383 .host_stop = ata_host_stop,
384}; 384};
385 385
386 386
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6f6672c55131..c3eb40c91c80 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -36,15 +36,22 @@ enum {
36static int atiixp_pre_reset(struct ata_port *ap) 36static int atiixp_pre_reset(struct ata_port *ap)
37{ 37{
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 38 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 static struct pci_bits atiixp_enable_bits[] = { 39 static const struct pci_bits atiixp_enable_bits[] = {
40 { 0x48, 1, 0x01, 0x00 }, 40 { 0x48, 1, 0x01, 0x00 },
41 { 0x48, 1, 0x08, 0x00 } 41 { 0x48, 1, 0x08, 0x00 }
42 }; 42 };
43 u8 udma;
43 44
44 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) 45 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
45 return -ENOENT; 46 return -ENOENT;
46 47
47 ap->cbl = ATA_CBL_PATA80; 48 /* Hack from drivers/ide/pci. Really we want to know how to do the
49 raw detection not play follow the bios mode guess */
50 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
51 if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
52 ap->cbl = ATA_CBL_PATA80;
53 else
54 ap->cbl = ATA_CBL_PATA40;
48 return ata_std_prereset(ap); 55 return ata_std_prereset(ap);
49} 56}
50 57
@@ -245,14 +252,14 @@ static struct ata_port_operations atiixp_port_ops = {
245 .qc_prep = ata_qc_prep, 252 .qc_prep = ata_qc_prep,
246 .qc_issue = ata_qc_issue_prot, 253 .qc_issue = ata_qc_issue_prot,
247 254
248 .data_xfer = ata_pio_data_xfer, 255 .data_xfer = ata_data_xfer,
249 256
250 .irq_handler = ata_interrupt, 257 .irq_handler = ata_interrupt,
251 .irq_clear = ata_bmdma_irq_clear, 258 .irq_clear = ata_bmdma_irq_clear,
259 .irq_on = ata_irq_on,
260 .irq_ack = ata_irq_ack,
252 261
253 .port_start = ata_port_start, 262 .port_start = ata_port_start,
254 .port_stop = ata_port_stop,
255 .host_stop = ata_host_stop
256}; 263};
257 264
258static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) 265static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 449162cbf93e..da098282b5f6 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -313,14 +313,14 @@ static struct ata_port_operations cmd64x_port_ops = {
313 .qc_prep = ata_qc_prep, 313 .qc_prep = ata_qc_prep,
314 .qc_issue = ata_qc_issue_prot, 314 .qc_issue = ata_qc_issue_prot,
315 315
316 .data_xfer = ata_pio_data_xfer, 316 .data_xfer = ata_data_xfer,
317 317
318 .irq_handler = ata_interrupt, 318 .irq_handler = ata_interrupt,
319 .irq_clear = ata_bmdma_irq_clear, 319 .irq_clear = ata_bmdma_irq_clear,
320 .irq_on = ata_irq_on,
321 .irq_ack = ata_irq_ack,
320 322
321 .port_start = ata_port_start, 323 .port_start = ata_port_start,
322 .port_stop = ata_port_stop,
323 .host_stop = ata_host_stop
324}; 324};
325 325
326static struct ata_port_operations cmd646r1_port_ops = { 326static struct ata_port_operations cmd646r1_port_ops = {
@@ -347,14 +347,14 @@ static struct ata_port_operations cmd646r1_port_ops = {
347 .qc_prep = ata_qc_prep, 347 .qc_prep = ata_qc_prep,
348 .qc_issue = ata_qc_issue_prot, 348 .qc_issue = ata_qc_issue_prot,
349 349
350 .data_xfer = ata_pio_data_xfer, 350 .data_xfer = ata_data_xfer,
351 351
352 .irq_handler = ata_interrupt, 352 .irq_handler = ata_interrupt,
353 .irq_clear = ata_bmdma_irq_clear, 353 .irq_clear = ata_bmdma_irq_clear,
354 .irq_on = ata_irq_on,
355 .irq_ack = ata_irq_ack,
354 356
355 .port_start = ata_port_start, 357 .port_start = ata_port_start,
356 .port_stop = ata_port_stop,
357 .host_stop = ata_host_stop
358}; 358};
359 359
360static struct ata_port_operations cmd648_port_ops = { 360static struct ata_port_operations cmd648_port_ops = {
@@ -381,14 +381,14 @@ static struct ata_port_operations cmd648_port_ops = {
381 .qc_prep = ata_qc_prep, 381 .qc_prep = ata_qc_prep,
382 .qc_issue = ata_qc_issue_prot, 382 .qc_issue = ata_qc_issue_prot,
383 383
384 .data_xfer = ata_pio_data_xfer, 384 .data_xfer = ata_data_xfer,
385 385
386 .irq_handler = ata_interrupt, 386 .irq_handler = ata_interrupt,
387 .irq_clear = ata_bmdma_irq_clear, 387 .irq_clear = ata_bmdma_irq_clear,
388 .irq_on = ata_irq_on,
389 .irq_ack = ata_irq_ack,
388 390
389 .port_start = ata_port_start, 391 .port_start = ata_port_start,
390 .port_stop = ata_port_stop,
391 .host_stop = ata_host_stop
392}; 392};
393 393
394static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 394static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 9f165a8e032d..1ce8fcfd7826 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -99,9 +99,9 @@ static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int
99static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev) 99static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
100{ 100{
101 /* Set the DMA enable/disable flag */ 101 /* Set the DMA enable/disable flag */
102 u8 reg = inb(ap->ioaddr.bmdma_addr + 0x02); 102 u8 reg = ioread8(ap->ioaddr.bmdma_addr + 0x02);
103 reg |= 1<<(adev->devno + 5); 103 reg |= 1<<(adev->devno + 5);
104 outb(reg, ap->ioaddr.bmdma_addr + 0x02); 104 iowrite8(reg, ap->ioaddr.bmdma_addr + 0x02);
105} 105}
106 106
107/** 107/**
@@ -193,19 +193,20 @@ static struct ata_port_operations cs5520_port_ops = {
193 .bmdma_status = ata_bmdma_status, 193 .bmdma_status = ata_bmdma_status,
194 .qc_prep = ata_qc_prep, 194 .qc_prep = ata_qc_prep,
195 .qc_issue = ata_qc_issue_prot, 195 .qc_issue = ata_qc_issue_prot,
196 .data_xfer = ata_pio_data_xfer, 196 .data_xfer = ata_data_xfer,
197 197
198 .irq_handler = ata_interrupt, 198 .irq_handler = ata_interrupt,
199 .irq_clear = ata_bmdma_irq_clear, 199 .irq_clear = ata_bmdma_irq_clear,
200 .irq_on = ata_irq_on,
201 .irq_ack = ata_irq_ack,
200 202
201 .port_start = ata_port_start, 203 .port_start = ata_port_start,
202 .port_stop = ata_port_stop,
203 .host_stop = ata_host_stop,
204}; 204};
205 205
206static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 206static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
207{ 207{
208 u8 pcicfg; 208 u8 pcicfg;
209 void *iomap[5];
209 static struct ata_probe_ent probe[2]; 210 static struct ata_probe_ent probe[2];
210 int ports = 0; 211 int ports = 0;
211 212
@@ -236,6 +237,16 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
236 return -ENODEV; 237 return -ENODEV;
237 } 238 }
238 239
240 /* Map IO ports */
241 iomap[0] = devm_ioport_map(&dev->dev, 0x1F0, 8);
242 iomap[1] = devm_ioport_map(&dev->dev, 0x3F6, 1);
243 iomap[2] = devm_ioport_map(&dev->dev, 0x170, 8);
244 iomap[3] = devm_ioport_map(&dev->dev, 0x376, 1);
245 iomap[4] = pcim_iomap(dev, 2, 0);
246
247 if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
248 return -ENOMEM;
249
239 /* We have to do our own plumbing as the PCI setup for this 250 /* We have to do our own plumbing as the PCI setup for this
240 chipset is non-standard so we can't punt to the libata code */ 251 chipset is non-standard so we can't punt to the libata code */
241 252
@@ -249,10 +260,10 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
249 probe[0].irq_flags = 0; 260 probe[0].irq_flags = 0;
250 probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST; 261 probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
251 probe[0].n_ports = 1; 262 probe[0].n_ports = 1;
252 probe[0].port[0].cmd_addr = 0x1F0; 263 probe[0].port[0].cmd_addr = iomap[0];
253 probe[0].port[0].ctl_addr = 0x3F6; 264 probe[0].port[0].ctl_addr = iomap[1];
254 probe[0].port[0].altstatus_addr = 0x3F6; 265 probe[0].port[0].altstatus_addr = iomap[1];
255 probe[0].port[0].bmdma_addr = pci_resource_start(dev, 2); 266 probe[0].port[0].bmdma_addr = iomap[4];
256 267
257 /* The secondary lurks at different addresses but is otherwise 268 /* The secondary lurks at different addresses but is otherwise
258 the same beastie */ 269 the same beastie */
@@ -260,10 +271,10 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
260 probe[1] = probe[0]; 271 probe[1] = probe[0];
261 INIT_LIST_HEAD(&probe[1].node); 272 INIT_LIST_HEAD(&probe[1].node);
262 probe[1].irq = 15; 273 probe[1].irq = 15;
263 probe[1].port[0].cmd_addr = 0x170; 274 probe[1].port[0].cmd_addr = iomap[2];
264 probe[1].port[0].ctl_addr = 0x376; 275 probe[1].port[0].ctl_addr = iomap[3];
265 probe[1].port[0].altstatus_addr = 0x376; 276 probe[1].port[0].altstatus_addr = iomap[3];
266 probe[1].port[0].bmdma_addr = pci_resource_start(dev, 2) + 8; 277 probe[1].port[0].bmdma_addr = iomap[4] + 8;
267 278
268 /* Let libata fill in the port details */ 279 /* Let libata fill in the port details */
269 ata_std_ports(&probe[0].port[0]); 280 ata_std_ports(&probe[0].port[0]);
@@ -294,7 +305,7 @@ static void __devexit cs5520_remove_one(struct pci_dev *pdev)
294 struct device *dev = pci_dev_to_dev(pdev); 305 struct device *dev = pci_dev_to_dev(pdev);
295 struct ata_host *host = dev_get_drvdata(dev); 306 struct ata_host *host = dev_get_drvdata(dev);
296 307
297 ata_host_remove(host); 308 ata_host_detach(host);
298 dev_set_drvdata(dev, NULL); 309 dev_set_drvdata(dev, NULL);
299} 310}
300 311
@@ -305,7 +316,7 @@ static void __devexit cs5520_remove_one(struct pci_dev *pdev)
305 * Do any reconfiguration work needed by a resume from RAM. We need 316 * Do any reconfiguration work needed by a resume from RAM. We need
306 * to restore DMA mode support on BIOSen which disabled it 317 * to restore DMA mode support on BIOSen which disabled it
307 */ 318 */
308 319
309static int cs5520_reinit_one(struct pci_dev *pdev) 320static int cs5520_reinit_one(struct pci_dev *pdev)
310{ 321{
311 u8 pcicfg; 322 u8 pcicfg;
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index b1ca207e3545..3d7b7d87ec6f 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -37,6 +37,13 @@
37#define DRV_NAME "pata_cs5530" 37#define DRV_NAME "pata_cs5530"
38#define DRV_VERSION "0.7.1" 38#define DRV_VERSION "0.7.1"
39 39
40static void __iomem *cs5530_port_base(struct ata_port *ap)
41{
42 unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr;
43
44 return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no);
45}
46
40/** 47/**
41 * cs5530_set_piomode - PIO setup 48 * cs5530_set_piomode - PIO setup
42 * @ap: ATA interface 49 * @ap: ATA interface
@@ -52,19 +59,19 @@ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
52 {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, 59 {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
53 {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010} 60 {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
54 }; 61 };
55 unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no; 62 void __iomem *base = cs5530_port_base(ap);
56 u32 tuning; 63 u32 tuning;
57 int format; 64 int format;
58 65
59 /* Find out which table to use */ 66 /* Find out which table to use */
60 tuning = inl(base + 0x04); 67 tuning = ioread32(base + 0x04);
61 format = (tuning & 0x80000000UL) ? 1 : 0; 68 format = (tuning & 0x80000000UL) ? 1 : 0;
62 69
63 /* Now load the right timing register */ 70 /* Now load the right timing register */
64 if (adev->devno) 71 if (adev->devno)
65 base += 0x08; 72 base += 0x08;
66 73
67 outl(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base); 74 iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
68} 75}
69 76
70/** 77/**
@@ -79,12 +86,12 @@ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
79 86
80static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) 87static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
81{ 88{
82 unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no; 89 void __iomem *base = cs5530_port_base(ap);
83 u32 tuning, timing = 0; 90 u32 tuning, timing = 0;
84 u8 reg; 91 u8 reg;
85 92
86 /* Find out which table to use */ 93 /* Find out which table to use */
87 tuning = inl(base + 0x04); 94 tuning = ioread32(base + 0x04);
88 95
89 switch(adev->dma_mode) { 96 switch(adev->dma_mode) {
90 case XFER_UDMA_0: 97 case XFER_UDMA_0:
@@ -105,20 +112,20 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
105 /* Merge in the PIO format bit */ 112 /* Merge in the PIO format bit */
106 timing |= (tuning & 0x80000000UL); 113 timing |= (tuning & 0x80000000UL);
107 if (adev->devno == 0) /* Master */ 114 if (adev->devno == 0) /* Master */
108 outl(timing, base + 0x04); 115 iowrite32(timing, base + 0x04);
109 else { 116 else {
110 if (timing & 0x00100000) 117 if (timing & 0x00100000)
111 tuning |= 0x00100000; /* UDMA for both */ 118 tuning |= 0x00100000; /* UDMA for both */
112 else 119 else
113 tuning &= ~0x00100000; /* MWDMA for both */ 120 tuning &= ~0x00100000; /* MWDMA for both */
114 outl(tuning, base + 0x04); 121 iowrite32(tuning, base + 0x04);
115 outl(timing, base + 0x0C); 122 iowrite32(timing, base + 0x0C);
116 } 123 }
117 124
118 /* Set the DMA capable bit in the BMDMA area */ 125 /* Set the DMA capable bit in the BMDMA area */
119 reg = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 126 reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
120 reg |= (1 << (5 + adev->devno)); 127 reg |= (1 << (5 + adev->devno));
121 outb(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 128 iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
122 129
123 /* Remember the last DMA setup we did */ 130 /* Remember the last DMA setup we did */
124 131
@@ -210,14 +217,14 @@ static struct ata_port_operations cs5530_port_ops = {
210 .qc_prep = ata_qc_prep, 217 .qc_prep = ata_qc_prep,
211 .qc_issue = cs5530_qc_issue_prot, 218 .qc_issue = cs5530_qc_issue_prot,
212 219
213 .data_xfer = ata_pio_data_xfer, 220 .data_xfer = ata_data_xfer,
214 221
215 .irq_handler = ata_interrupt, 222 .irq_handler = ata_interrupt,
216 .irq_clear = ata_bmdma_irq_clear, 223 .irq_clear = ata_bmdma_irq_clear,
224 .irq_on = ata_irq_on,
225 .irq_ack = ata_irq_ack,
217 226
218 .port_start = ata_port_start, 227 .port_start = ata_port_start,
219 .port_stop = ata_port_stop,
220 .host_stop = ata_host_stop
221}; 228};
222 229
223static struct dmi_system_id palmax_dmi_table[] = { 230static struct dmi_system_id palmax_dmi_table[] = {
@@ -247,7 +254,7 @@ static int cs5530_is_palmax(void)
247 * Perform the chip initialisation work that is shared between both 254 * Perform the chip initialisation work that is shared between both
248 * setup and resume paths 255 * setup and resume paths
249 */ 256 */
250 257
251static int cs5530_init_chip(void) 258static int cs5530_init_chip(void)
252{ 259{
253 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL; 260 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL;
@@ -357,11 +364,11 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
357 .port_ops = &cs5530_port_ops 364 .port_ops = &cs5530_port_ops
358 }; 365 };
359 static struct ata_port_info *port_info[2] = { &info, &info }; 366 static struct ata_port_info *port_info[2] = { &info, &info };
360 367
361 /* Chip initialisation */ 368 /* Chip initialisation */
362 if (cs5530_init_chip()) 369 if (cs5530_init_chip())
363 return -ENODEV; 370 return -ENODEV;
364 371
365 if (cs5530_is_palmax()) 372 if (cs5530_is_palmax())
366 port_info[1] = &info_palmax_secondary; 373 port_info[1] = &info_palmax_secondary;
367 374
@@ -376,7 +383,7 @@ static int cs5530_reinit_one(struct pci_dev *pdev)
376 BUG(); 383 BUG();
377 return ata_pci_device_resume(pdev); 384 return ata_pci_device_resume(pdev);
378} 385}
379 386
380static const struct pci_device_id cs5530[] = { 387static const struct pci_device_id cs5530[] = {
381 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, 388 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
382 389
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index e3efec4ffc79..17bc693cc514 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -214,14 +214,14 @@ static struct ata_port_operations cs5535_port_ops = {
214 .qc_prep = ata_qc_prep, 214 .qc_prep = ata_qc_prep,
215 .qc_issue = ata_qc_issue_prot, 215 .qc_issue = ata_qc_issue_prot,
216 216
217 .data_xfer = ata_pio_data_xfer, 217 .data_xfer = ata_data_xfer,
218 218
219 .irq_handler = ata_interrupt, 219 .irq_handler = ata_interrupt,
220 .irq_clear = ata_bmdma_irq_clear, 220 .irq_clear = ata_bmdma_irq_clear,
221 .irq_on = ata_irq_on,
222 .irq_ack = ata_irq_ack,
221 223
222 .port_start = ata_port_start, 224 .port_start = ata_port_start,
223 .port_stop = ata_port_stop,
224 .host_stop = ata_host_stop
225}; 225};
226 226
227/** 227/**
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index e2a95699bae7..63f48f08763d 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -165,14 +165,14 @@ static struct ata_port_operations cy82c693_port_ops = {
165 .qc_prep = ata_qc_prep, 165 .qc_prep = ata_qc_prep,
166 .qc_issue = ata_qc_issue_prot, 166 .qc_issue = ata_qc_issue_prot,
167 167
168 .data_xfer = ata_pio_data_xfer, 168 .data_xfer = ata_data_xfer,
169 169
170 .irq_handler = ata_interrupt, 170 .irq_handler = ata_interrupt,
171 .irq_clear = ata_bmdma_irq_clear, 171 .irq_clear = ata_bmdma_irq_clear,
172 .irq_on = ata_irq_on,
173 .irq_ack = ata_irq_ack,
172 174
173 .port_start = ata_port_start, 175 .port_start = ata_port_start,
174 .port_stop = ata_port_stop,
175 .host_stop = ata_host_stop
176}; 176};
177 177
178static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 178static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index edf8a63f50af..c19b6a8a7dc6 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -261,14 +261,14 @@ static const struct ata_port_operations efar_ops = {
261 .bmdma_status = ata_bmdma_status, 261 .bmdma_status = ata_bmdma_status,
262 .qc_prep = ata_qc_prep, 262 .qc_prep = ata_qc_prep,
263 .qc_issue = ata_qc_issue_prot, 263 .qc_issue = ata_qc_issue_prot,
264 .data_xfer = ata_pio_data_xfer, 264 .data_xfer = ata_data_xfer,
265 265
266 .irq_handler = ata_interrupt, 266 .irq_handler = ata_interrupt,
267 .irq_clear = ata_bmdma_irq_clear, 267 .irq_clear = ata_bmdma_irq_clear,
268 .irq_on = ata_irq_on,
269 .irq_ack = ata_irq_ack,
268 270
269 .port_start = ata_port_start, 271 .port_start = ata_port_start,
270 .port_stop = ata_port_stop,
271 .host_stop = ata_host_stop,
272}; 272};
273 273
274 274
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 2663599a7c02..27d724b5eea2 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -151,23 +151,13 @@ static const char *bad_ata66_3[] = {
151 151
152static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[]) 152static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
153{ 153{
154 unsigned char model_num[40]; 154 unsigned char model_num[ATA_ID_PROD_LEN + 1];
155 char *s;
156 unsigned int len;
157 int i = 0; 155 int i = 0;
158 156
159 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); 157 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
160 s = &model_num[0];
161 len = strnlen(s, sizeof(model_num));
162 158
163 /* ATAPI specifies that empty space is blank-filled; remove blanks */ 159 while (list[i] != NULL) {
164 while ((len > 0) && (s[len - 1] == ' ')) { 160 if (!strcmp(list[i], model_num)) {
165 len--;
166 s[len] = 0;
167 }
168
169 while(list[i] != NULL) {
170 if (!strncmp(list[i], s, len)) {
171 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", 161 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
172 modestr, list[i]); 162 modestr, list[i]);
173 return 1; 163 return 1;
@@ -232,7 +222,7 @@ static int hpt36x_pre_reset(struct ata_port *ap)
232 222
233 if (!pci_test_config_bits(pdev, &hpt36x_enable_bits[ap->port_no])) 223 if (!pci_test_config_bits(pdev, &hpt36x_enable_bits[ap->port_no]))
234 return -ENOENT; 224 return -ENOENT;
235 225
236 pci_read_config_byte(pdev, 0x5A, &ata66); 226 pci_read_config_byte(pdev, 0x5A, &ata66);
237 if (ata66 & (1 << ap->port_no)) 227 if (ata66 & (1 << ap->port_no))
238 ap->cbl = ATA_CBL_PATA40; 228 ap->cbl = ATA_CBL_PATA40;
@@ -371,14 +361,14 @@ static struct ata_port_operations hpt366_port_ops = {
371 .qc_prep = ata_qc_prep, 361 .qc_prep = ata_qc_prep,
372 .qc_issue = ata_qc_issue_prot, 362 .qc_issue = ata_qc_issue_prot,
373 363
374 .data_xfer = ata_pio_data_xfer, 364 .data_xfer = ata_data_xfer,
375 365
376 .irq_handler = ata_interrupt, 366 .irq_handler = ata_interrupt,
377 .irq_clear = ata_bmdma_irq_clear, 367 .irq_clear = ata_bmdma_irq_clear,
368 .irq_on = ata_irq_on,
369 .irq_ack = ata_irq_ack,
378 370
379 .port_start = ata_port_start, 371 .port_start = ata_port_start,
380 .port_stop = ata_port_stop,
381 .host_stop = ata_host_stop
382}; 372};
383 373
384/** 374/**
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index dfb306057cf4..4ffc392052c0 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -349,24 +349,13 @@ static u32 hpt37x_find_mode(struct ata_port *ap, int speed)
349 349
350static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[]) 350static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
351{ 351{
352 unsigned char model_num[40]; 352 unsigned char model_num[ATA_ID_PROD_LEN + 1];
353 char *s;
354 unsigned int len;
355 int i = 0; 353 int i = 0;
356 354
357 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 355 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
358 sizeof(model_num));
359 s = &model_num[0];
360 len = strnlen(s, sizeof(model_num));
361 356
362 /* ATAPI specifies that empty space is blank-filled; remove blanks */ 357 while (list[i] != NULL) {
363 while ((len > 0) && (s[len - 1] == ' ')) { 358 if (!strcmp(list[i], model_num)) {
364 len--;
365 s[len] = 0;
366 }
367
368 while(list[i] != NULL) {
369 if (!strncmp(list[i], s, len)) {
370 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n", 359 printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
371 modestr, list[i]); 360 modestr, list[i]);
372 return 1; 361 return 1;
@@ -459,7 +448,7 @@ static int hpt37x_pre_reset(struct ata_port *ap)
459 }; 448 };
460 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) 449 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
461 return -ENOENT; 450 return -ENOENT;
462 451
463 pci_read_config_byte(pdev, 0x5B, &scr2); 452 pci_read_config_byte(pdev, 0x5B, &scr2);
464 pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01); 453 pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
465 /* Cable register now active */ 454 /* Cable register now active */
@@ -504,7 +493,7 @@ static int hpt374_pre_reset(struct ata_port *ap)
504 493
505 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no])) 494 if (!pci_test_config_bits(pdev, &hpt37x_enable_bits[ap->port_no]))
506 return -ENOENT; 495 return -ENOENT;
507 496
508 /* Do the extra channel work */ 497 /* Do the extra channel work */
509 pci_read_config_word(pdev, 0x52, &mcr3); 498 pci_read_config_word(pdev, 0x52, &mcr3);
510 pci_read_config_word(pdev, 0x56, &mcr6); 499 pci_read_config_word(pdev, 0x56, &mcr6);
@@ -645,24 +634,24 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
645{ 634{
646 struct ata_port *ap = qc->ap; 635 struct ata_port *ap = qc->ap;
647 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 636 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
648 u8 dma_stat = inb(ap->ioaddr.bmdma_addr + 2); 637 u8 dma_stat = ioread8(ap->ioaddr.bmdma_addr + 2);
649 u8 dma_cmd; 638 u8 dma_cmd;
650 unsigned long bmdma = ap->ioaddr.bmdma_addr; 639 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
651 640
652 if (dma_stat & 0x01) { 641 if (dma_stat & 0x01) {
653 udelay(20); 642 udelay(20);
654 dma_stat = inb(bmdma + 2); 643 dma_stat = ioread8(bmdma + 2);
655 } 644 }
656 if (dma_stat & 0x01) { 645 if (dma_stat & 0x01) {
657 /* Clear the engine */ 646 /* Clear the engine */
658 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); 647 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
659 udelay(10); 648 udelay(10);
660 /* Stop DMA */ 649 /* Stop DMA */
661 dma_cmd = inb(bmdma ); 650 dma_cmd = ioread8(bmdma );
662 outb(dma_cmd & 0xFE, bmdma); 651 iowrite8(dma_cmd & 0xFE, bmdma);
663 /* Clear Error */ 652 /* Clear Error */
664 dma_stat = inb(bmdma + 2); 653 dma_stat = ioread8(bmdma + 2);
665 outb(dma_stat | 0x06 , bmdma + 2); 654 iowrite8(dma_stat | 0x06 , bmdma + 2);
666 /* Clear the engine */ 655 /* Clear the engine */
667 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); 656 pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
668 udelay(10); 657 udelay(10);
@@ -807,14 +796,14 @@ static struct ata_port_operations hpt370_port_ops = {
807 .qc_prep = ata_qc_prep, 796 .qc_prep = ata_qc_prep,
808 .qc_issue = ata_qc_issue_prot, 797 .qc_issue = ata_qc_issue_prot,
809 798
810 .data_xfer = ata_pio_data_xfer, 799 .data_xfer = ata_data_xfer,
811 800
812 .irq_handler = ata_interrupt, 801 .irq_handler = ata_interrupt,
813 .irq_clear = ata_bmdma_irq_clear, 802 .irq_clear = ata_bmdma_irq_clear,
803 .irq_on = ata_irq_on,
804 .irq_ack = ata_irq_ack,
814 805
815 .port_start = ata_port_start, 806 .port_start = ata_port_start,
816 .port_stop = ata_port_stop,
817 .host_stop = ata_host_stop
818}; 807};
819 808
820/* 809/*
@@ -846,14 +835,14 @@ static struct ata_port_operations hpt370a_port_ops = {
846 .qc_prep = ata_qc_prep, 835 .qc_prep = ata_qc_prep,
847 .qc_issue = ata_qc_issue_prot, 836 .qc_issue = ata_qc_issue_prot,
848 837
849 .data_xfer = ata_pio_data_xfer, 838 .data_xfer = ata_data_xfer,
850 839
851 .irq_handler = ata_interrupt, 840 .irq_handler = ata_interrupt,
852 .irq_clear = ata_bmdma_irq_clear, 841 .irq_clear = ata_bmdma_irq_clear,
842 .irq_on = ata_irq_on,
843 .irq_ack = ata_irq_ack,
853 844
854 .port_start = ata_port_start, 845 .port_start = ata_port_start,
855 .port_stop = ata_port_stop,
856 .host_stop = ata_host_stop
857}; 846};
858 847
859/* 848/*
@@ -886,14 +875,14 @@ static struct ata_port_operations hpt372_port_ops = {
886 .qc_prep = ata_qc_prep, 875 .qc_prep = ata_qc_prep,
887 .qc_issue = ata_qc_issue_prot, 876 .qc_issue = ata_qc_issue_prot,
888 877
889 .data_xfer = ata_pio_data_xfer, 878 .data_xfer = ata_data_xfer,
890 879
891 .irq_handler = ata_interrupt, 880 .irq_handler = ata_interrupt,
892 .irq_clear = ata_bmdma_irq_clear, 881 .irq_clear = ata_bmdma_irq_clear,
882 .irq_on = ata_irq_on,
883 .irq_ack = ata_irq_ack,
893 884
894 .port_start = ata_port_start, 885 .port_start = ata_port_start,
895 .port_stop = ata_port_stop,
896 .host_stop = ata_host_stop
897}; 886};
898 887
899/* 888/*
@@ -926,14 +915,14 @@ static struct ata_port_operations hpt374_port_ops = {
926 .qc_prep = ata_qc_prep, 915 .qc_prep = ata_qc_prep,
927 .qc_issue = ata_qc_issue_prot, 916 .qc_issue = ata_qc_issue_prot,
928 917
929 .data_xfer = ata_pio_data_xfer, 918 .data_xfer = ata_data_xfer,
930 919
931 .irq_handler = ata_interrupt, 920 .irq_handler = ata_interrupt,
932 .irq_clear = ata_bmdma_irq_clear, 921 .irq_clear = ata_bmdma_irq_clear,
922 .irq_on = ata_irq_on,
923 .irq_ack = ata_irq_ack,
933 924
934 .port_start = ata_port_start, 925 .port_start = ata_port_start,
935 .port_stop = ata_port_stop,
936 .host_stop = ata_host_stop
937}; 926};
938 927
939/** 928/**
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 886fab9aa62c..65f2e180e7fa 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -263,26 +263,26 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
263 263
264static void hpt3x2n_set_clock(struct ata_port *ap, int source) 264static void hpt3x2n_set_clock(struct ata_port *ap, int source)
265{ 265{
266 unsigned long bmdma = ap->ioaddr.bmdma_addr; 266 void __iomem *bmdma = ap->ioaddr.bmdma_addr;
267 267
268 /* Tristate the bus */ 268 /* Tristate the bus */
269 outb(0x80, bmdma+0x73); 269 iowrite8(0x80, bmdma+0x73);
270 outb(0x80, bmdma+0x77); 270 iowrite8(0x80, bmdma+0x77);
271 271
272 /* Switch clock and reset channels */ 272 /* Switch clock and reset channels */
273 outb(source, bmdma+0x7B); 273 iowrite8(source, bmdma+0x7B);
274 outb(0xC0, bmdma+0x79); 274 iowrite8(0xC0, bmdma+0x79);
275 275
276 /* Reset state machines */ 276 /* Reset state machines */
277 outb(0x37, bmdma+0x70); 277 iowrite8(0x37, bmdma+0x70);
278 outb(0x37, bmdma+0x74); 278 iowrite8(0x37, bmdma+0x74);
279 279
280 /* Complete reset */ 280 /* Complete reset */
281 outb(0x00, bmdma+0x79); 281 iowrite8(0x00, bmdma+0x79);
282 282
283 /* Reconnect channels to bus */ 283 /* Reconnect channels to bus */
284 outb(0x00, bmdma+0x73); 284 iowrite8(0x00, bmdma+0x73);
285 outb(0x00, bmdma+0x77); 285 iowrite8(0x00, bmdma+0x77);
286} 286}
287 287
288/* Check if our partner interface is busy */ 288/* Check if our partner interface is busy */
@@ -373,14 +373,14 @@ static struct ata_port_operations hpt3x2n_port_ops = {
373 .qc_prep = ata_qc_prep, 373 .qc_prep = ata_qc_prep,
374 .qc_issue = hpt3x2n_qc_issue_prot, 374 .qc_issue = hpt3x2n_qc_issue_prot,
375 375
376 .data_xfer = ata_pio_data_xfer, 376 .data_xfer = ata_data_xfer,
377 377
378 .irq_handler = ata_interrupt, 378 .irq_handler = ata_interrupt,
379 .irq_clear = ata_bmdma_irq_clear, 379 .irq_clear = ata_bmdma_irq_clear,
380 .irq_on = ata_irq_on,
381 .irq_ack = ata_irq_ack,
380 382
381 .port_start = ata_port_start, 383 .port_start = ata_port_start,
382 .port_stop = ata_port_stop,
383 .host_stop = ata_host_stop
384}; 384};
385 385
386/** 386/**
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 5f1d385eb592..483ce7c12c9a 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -148,14 +148,14 @@ static struct ata_port_operations hpt3x3_port_ops = {
148 .qc_prep = ata_qc_prep, 148 .qc_prep = ata_qc_prep,
149 .qc_issue = ata_qc_issue_prot, 149 .qc_issue = ata_qc_issue_prot,
150 150
151 .data_xfer = ata_pio_data_xfer, 151 .data_xfer = ata_data_xfer,
152 152
153 .irq_handler = ata_interrupt, 153 .irq_handler = ata_interrupt,
154 .irq_clear = ata_bmdma_irq_clear, 154 .irq_clear = ata_bmdma_irq_clear,
155 .irq_on = ata_irq_on,
156 .irq_ack = ata_irq_ack,
155 157
156 .port_start = ata_port_start, 158 .port_start = ata_port_start,
157 .port_stop = ata_port_stop,
158 .host_stop = ata_host_stop
159}; 159};
160 160
161/** 161/**
@@ -164,7 +164,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
164 * 164 *
165 * Perform the setup required at boot and on resume. 165 * Perform the setup required at boot and on resume.
166 */ 166 */
167 167
168static void hpt3x3_init_chipset(struct pci_dev *dev) 168static void hpt3x3_init_chipset(struct pci_dev *dev)
169{ 169{
170 u16 cmd; 170 u16 cmd;
diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
index a97d55ae95c9..1bf5ec18b2e3 100644
--- a/drivers/ata/pata_isapnp.c
+++ b/drivers/ata/pata_isapnp.c
@@ -53,14 +53,14 @@ static struct ata_port_operations isapnp_port_ops = {
53 .qc_prep = ata_qc_prep, 53 .qc_prep = ata_qc_prep,
54 .qc_issue = ata_qc_issue_prot, 54 .qc_issue = ata_qc_issue_prot,
55 55
56 .data_xfer = ata_pio_data_xfer, 56 .data_xfer = ata_data_xfer,
57 57
58 .irq_handler = ata_interrupt, 58 .irq_handler = ata_interrupt,
59 .irq_clear = ata_bmdma_irq_clear, 59 .irq_clear = ata_bmdma_irq_clear,
60 .irq_on = ata_irq_on,
61 .irq_ack = ata_irq_ack,
60 62
61 .port_start = ata_port_start, 63 .port_start = ata_port_start,
62 .port_stop = ata_port_stop,
63 .host_stop = ata_host_stop
64}; 64};
65 65
66/** 66/**
@@ -75,6 +75,7 @@ static struct ata_port_operations isapnp_port_ops = {
75static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id) 75static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
76{ 76{
77 struct ata_probe_ent ae; 77 struct ata_probe_ent ae;
78 void __iomem *cmd_addr, *ctl_addr;
78 79
79 if (pnp_port_valid(idev, 0) == 0) 80 if (pnp_port_valid(idev, 0) == 0)
80 return -ENODEV; 81 return -ENODEV;
@@ -83,6 +84,10 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
83 if (pnp_irq_valid(idev, 0) == 0) 84 if (pnp_irq_valid(idev, 0) == 0)
84 return -ENODEV; 85 return -ENODEV;
85 86
87 cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
88 if (!cmd_addr)
89 return -ENOMEM;
90
86 memset(&ae, 0, sizeof(struct ata_probe_ent)); 91 memset(&ae, 0, sizeof(struct ata_probe_ent));
87 INIT_LIST_HEAD(&ae.node); 92 INIT_LIST_HEAD(&ae.node);
88 ae.dev = &idev->dev; 93 ae.dev = &idev->dev;
@@ -93,11 +98,13 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
93 ae.irq = pnp_irq(idev, 0); 98 ae.irq = pnp_irq(idev, 0);
94 ae.irq_flags = 0; 99 ae.irq_flags = 0;
95 ae.port_flags = ATA_FLAG_SLAVE_POSS; 100 ae.port_flags = ATA_FLAG_SLAVE_POSS;
96 ae.port[0].cmd_addr = pnp_port_start(idev, 0); 101 ae.port[0].cmd_addr = cmd_addr;
97 102
98 if (pnp_port_valid(idev, 1) == 0) { 103 if (pnp_port_valid(idev, 1) == 0) {
99 ae.port[0].altstatus_addr = pnp_port_start(idev, 1); 104 ctl_addr = devm_ioport_map(&idev->dev,
100 ae.port[0].ctl_addr = pnp_port_start(idev, 1); 105 pnp_port_start(idev, 1), 1);
106 ae.port[0].altstatus_addr = ctl_addr;
107 ae.port[0].ctl_addr = ctl_addr;
101 ae.port_flags |= ATA_FLAG_SRST; 108 ae.port_flags |= ATA_FLAG_SRST;
102 } 109 }
103 ata_std_ports(&ae.port[0]); 110 ata_std_ports(&ae.port[0]);
@@ -120,7 +127,7 @@ static void isapnp_remove_one(struct pnp_dev *idev)
120 struct device *dev = &idev->dev; 127 struct device *dev = &idev->dev;
121 struct ata_host *host = dev_get_drvdata(dev); 128 struct ata_host *host = dev_get_drvdata(dev);
122 129
123 ata_host_remove(host); 130 ata_host_detach(host);
124 dev_set_drvdata(dev, NULL); 131 dev_set_drvdata(dev, NULL);
125} 132}
126 133
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
new file mode 100644
index 000000000000..7eac869dfcd3
--- /dev/null
+++ b/drivers/ata/pata_it8213.c
@@ -0,0 +1,354 @@
1/*
2 * pata_it8213.c - iTE Tech. Inc. IT8213 PATA driver
3 *
4 * The IT8213 is a very Intel ICH like device for timing purposes, having
5 * a similar register layout and the same split clock arrangement. Cable
6 * detection is different, and it does not have slave channels or all the
7 * clutter of later ICH/SATA setups.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/init.h>
14#include <linux/blkdev.h>
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <scsi/scsi_host.h>
18#include <linux/libata.h>
19#include <linux/ata.h>
20
21#define DRV_NAME "pata_it8213"
22#define DRV_VERSION "0.0.2"
23
24/**
25 * it8213_pre_reset - check for 40/80 pin
26 * @ap: Port
27 *
28 * Perform cable detection for the 8213 ATA interface. This is
29 * different to the PIIX arrangement
30 */
31
32static int it8213_pre_reset(struct ata_port *ap)
33{
34 static const struct pci_bits it8213_enable_bits[] = {
35 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
36 };
37
38 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
39 u8 tmp;
40
41 if (!pci_test_config_bits(pdev, &it8213_enable_bits[ap->port_no]))
42 return -ENOENT;
43
44 pci_read_config_byte(pdev, 0x42, &tmp);
45 if (tmp & 2) /* The initial docs are incorrect */
46 ap->cbl = ATA_CBL_PATA40;
47 else
48 ap->cbl = ATA_CBL_PATA80;
49 return ata_std_prereset(ap);
50}
51
52/**
53 * it8213_probe_reset - Probe specified port on PATA host controller
54 * @ap: Port to probe
55 *
56 * LOCKING:
57 * None (inherited from caller).
58 */
59
60static void it8213_error_handler(struct ata_port *ap)
61{
62 ata_bmdma_drive_eh(ap, it8213_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
63}
64
65/**
66 * it8213_set_piomode - Initialize host controller PATA PIO timings
67 * @ap: Port whose timings we are configuring
68 * @adev: um
69 *
70 * Set PIO mode for device, in host controller PCI config space.
71 *
72 * LOCKING:
73 * None (inherited from caller).
74 */
75
76static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
77{
78 unsigned int pio = adev->pio_mode - XFER_PIO_0;
79 struct pci_dev *dev = to_pci_dev(ap->host->dev);
80 unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
81 u16 idetm_data;
82 int control = 0;
83
84 /*
85 * See Intel Document 298600-004 for the timing programing rules
86 * for PIIX/ICH. The 8213 is a clone so very similar
87 */
88
89 static const /* ISP RTC */
90 u8 timings[][2] = { { 0, 0 },
91 { 0, 0 },
92 { 1, 0 },
93 { 2, 1 },
94 { 2, 3 }, };
95
96 if (pio > 2)
97 control |= 1; /* TIME1 enable */
98 if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
99 control |= 2; /* IORDY enable */
100 /* Bit 2 is set for ATAPI on the IT8213 - reverse of ICH/PIIX */
101 if (adev->class != ATA_DEV_ATA)
102 control |= 4;
103
104 pci_read_config_word(dev, idetm_port, &idetm_data);
105
106 /* Enable PPE, IE and TIME as appropriate */
107
108 if (adev->devno == 0) {
109 idetm_data &= 0xCCF0;
110 idetm_data |= control;
111 idetm_data |= (timings[pio][0] << 12) |
112 (timings[pio][1] << 8);
113 } else {
114 u8 slave_data;
115
116 idetm_data &= 0xCC0F;
117 idetm_data |= (control << 4);
118
119 /* Slave timing in seperate register */
120 pci_read_config_byte(dev, 0x44, &slave_data);
121 slave_data &= 0xF0;
122 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << 4;
123 pci_write_config_byte(dev, 0x44, slave_data);
124 }
125
126 idetm_data |= 0x4000; /* Ensure SITRE is enabled */
127 pci_write_config_word(dev, idetm_port, idetm_data);
128}
129
130/**
131 * it8213_set_dmamode - Initialize host controller PATA DMA timings
132 * @ap: Port whose timings we are configuring
133 * @adev: Device to program
134 *
135 * Set UDMA/MWDMA mode for device, in host controller PCI config space.
136 * This device is basically an ICH alike.
137 *
138 * LOCKING:
139 * None (inherited from caller).
140 */
141
142static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
143{
144 struct pci_dev *dev = to_pci_dev(ap->host->dev);
145 u16 master_data;
146 u8 speed = adev->dma_mode;
147 int devid = adev->devno;
148 u8 udma_enable;
149
150 static const /* ISP RTC */
151 u8 timings[][2] = { { 0, 0 },
152 { 0, 0 },
153 { 1, 0 },
154 { 2, 1 },
155 { 2, 3 }, };
156
157 pci_read_config_word(dev, 0x40, &master_data);
158 pci_read_config_byte(dev, 0x48, &udma_enable);
159
160 if (speed >= XFER_UDMA_0) {
161 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
162 u16 udma_timing;
163 u16 ideconf;
164 int u_clock, u_speed;
165
166 /* Clocks follow the PIIX style */
167 u_speed = min(2 - (udma & 1), udma);
168 if (udma == 5)
169 u_clock = 0x1000; /* 100Mhz */
170 else if (udma > 2)
171 u_clock = 1; /* 66Mhz */
172 else
173 u_clock = 0; /* 33Mhz */
174
175 udma_enable |= (1 << devid);
176
177 /* Load the UDMA mode number */
178 pci_read_config_word(dev, 0x4A, &udma_timing);
179 udma_timing &= ~(3 << (4 * devid));
180 udma_timing |= (udma & 3) << (4 * devid);
181 pci_write_config_word(dev, 0x4A, udma_timing);
182
183 /* Load the clock selection */
184 pci_read_config_word(dev, 0x54, &ideconf);
185 ideconf &= ~(0x1001 << devid);
186 ideconf |= u_clock << devid;
187 pci_write_config_word(dev, 0x54, ideconf);
188 } else {
189 /*
190 * MWDMA is driven by the PIO timings. We must also enable
191 * IORDY unconditionally along with TIME1. PPE has already
192 * been set when the PIO timing was set.
193 */
194 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
195 unsigned int control;
196 u8 slave_data;
197 static const unsigned int needed_pio[3] = {
198 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
199 };
200 int pio = needed_pio[mwdma] - XFER_PIO_0;
201
202 control = 3; /* IORDY|TIME1 */
203
204 /* If the drive MWDMA is faster than it can do PIO then
205 we must force PIO into PIO0 */
206
207 if (adev->pio_mode < needed_pio[mwdma])
208 /* Enable DMA timing only */
209 control |= 8; /* PIO cycles in PIO0 */
210
211 if (devid) { /* Slave */
212 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
213 master_data |= control << 4;
214 pci_read_config_byte(dev, 0x44, &slave_data);
215 slave_data &= (0x0F + 0xE1 * ap->port_no);
216 /* Load the matching timing */
217 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
218 pci_write_config_byte(dev, 0x44, slave_data);
219 } else { /* Master */
220 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
221 and master timing bits */
222 master_data |= control;
223 master_data |=
224 (timings[pio][0] << 12) |
225 (timings[pio][1] << 8);
226 }
227 udma_enable &= ~(1 << devid);
228 pci_write_config_word(dev, 0x40, master_data);
229 }
230 pci_write_config_byte(dev, 0x48, udma_enable);
231}
232
233static struct scsi_host_template it8213_sht = {
234 .module = THIS_MODULE,
235 .name = DRV_NAME,
236 .ioctl = ata_scsi_ioctl,
237 .queuecommand = ata_scsi_queuecmd,
238 .can_queue = ATA_DEF_QUEUE,
239 .this_id = ATA_SHT_THIS_ID,
240 .sg_tablesize = LIBATA_MAX_PRD,
241 .max_sectors = ATA_MAX_SECTORS,
242 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
243 .emulated = ATA_SHT_EMULATED,
244 .use_clustering = ATA_SHT_USE_CLUSTERING,
245 .proc_name = DRV_NAME,
246 .dma_boundary = ATA_DMA_BOUNDARY,
247 .slave_configure = ata_scsi_slave_config,
248 .bios_param = ata_std_bios_param,
249 .resume = ata_scsi_device_resume,
250 .suspend = ata_scsi_device_suspend,
251};
252
253static const struct ata_port_operations it8213_ops = {
254 .port_disable = ata_port_disable,
255 .set_piomode = it8213_set_piomode,
256 .set_dmamode = it8213_set_dmamode,
257 .mode_filter = ata_pci_default_filter,
258
259 .tf_load = ata_tf_load,
260 .tf_read = ata_tf_read,
261 .check_status = ata_check_status,
262 .exec_command = ata_exec_command,
263 .dev_select = ata_std_dev_select,
264
265 .freeze = ata_bmdma_freeze,
266 .thaw = ata_bmdma_thaw,
267 .error_handler = it8213_error_handler,
268 .post_internal_cmd = ata_bmdma_post_internal_cmd,
269
270 .bmdma_setup = ata_bmdma_setup,
271 .bmdma_start = ata_bmdma_start,
272 .bmdma_stop = ata_bmdma_stop,
273 .bmdma_status = ata_bmdma_status,
274 .qc_prep = ata_qc_prep,
275 .qc_issue = ata_qc_issue_prot,
276 .data_xfer = ata_data_xfer,
277
278 .irq_handler = ata_interrupt,
279 .irq_clear = ata_bmdma_irq_clear,
280 .irq_on = ata_irq_on,
281 .irq_ack = ata_irq_ack,
282
283 .port_start = ata_port_start,
284};
285
286
287/**
288 * it8213_init_one - Register 8213 ATA PCI device with kernel services
289 * @pdev: PCI device to register
290 * @ent: Entry in it8213_pci_tbl matching with @pdev
291 *
292 * Called from kernel PCI layer.
293 *
294 * LOCKING:
295 * Inherited from PCI layer (may sleep).
296 *
297 * RETURNS:
298 * Zero on success, or -ERRNO value.
299 */
300
301static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
302{
303 static int printed_version;
304 static struct ata_port_info info = {
305 .sht = &it8213_sht,
306 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
307 .pio_mask = 0x1f, /* pio0-4 */
308 .mwdma_mask = 0x07, /* mwdma0-2 */
309 .udma_mask = 0x1f, /* UDMA 100 */
310 .port_ops = &it8213_ops,
311 };
312 static struct ata_port_info *port_info[2] = { &info, &info };
313
314 if (!printed_version++)
315 dev_printk(KERN_DEBUG, &pdev->dev,
316 "version " DRV_VERSION "\n");
317
318 /* Current IT8213 stuff is single port */
319 return ata_pci_init_one(pdev, port_info, 1);
320}
321
322static const struct pci_device_id it8213_pci_tbl[] = {
323 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8213), },
324
325 { } /* terminate list */
326};
327
328static struct pci_driver it8213_pci_driver = {
329 .name = DRV_NAME,
330 .id_table = it8213_pci_tbl,
331 .probe = it8213_init_one,
332 .remove = ata_pci_remove_one,
333 .suspend = ata_pci_device_suspend,
334 .resume = ata_pci_device_resume,
335};
336
337static int __init it8213_init(void)
338{
339 return pci_register_driver(&it8213_pci_driver);
340}
341
342static void __exit it8213_exit(void)
343{
344 pci_unregister_driver(&it8213_pci_driver);
345}
346
347module_init(it8213_init);
348module_exit(it8213_exit);
349
350MODULE_AUTHOR("Alan Cox");
351MODULE_DESCRIPTION("SCSI low-level driver for the ITE 8213");
352MODULE_LICENSE("GPL");
353MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
354MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index e8afd486434a..73394c75be42 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -492,7 +492,7 @@ static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused
492 /* Bits 5 and 6 indicate if DMA is active on master/slave */ 492 /* Bits 5 and 6 indicate if DMA is active on master/slave */
493 /* It is possible that BMDMA isn't allocated */ 493 /* It is possible that BMDMA isn't allocated */
494 if (ap->ioaddr.bmdma_addr) 494 if (ap->ioaddr.bmdma_addr)
495 dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 495 dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
496 496
497 for (i = 0; i < ATA_MAX_DEVICES; i++) { 497 for (i = 0; i < ATA_MAX_DEVICES; i++) {
498 struct ata_device *dev = &ap->device[i]; 498 struct ata_device *dev = &ap->device[i];
@@ -531,23 +531,9 @@ static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused
531 531
532static void it821x_dev_config(struct ata_port *ap, struct ata_device *adev) 532static void it821x_dev_config(struct ata_port *ap, struct ata_device *adev)
533{ 533{
534 unsigned char model_num[40]; 534 unsigned char model_num[ATA_ID_PROD_LEN + 1];
535 char *s; 535
536 unsigned int len; 536 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
537
538 /* This block ought to be a library routine as it is in several
539 drivers now */
540
541 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS,
542 sizeof(model_num));
543 s = &model_num[0];
544 len = strnlen(s, sizeof(model_num));
545
546 /* ATAPI specifies that empty space is blank-filled; remove blanks */
547 while ((len > 0) && (s[len - 1] == ' ')) {
548 len--;
549 s[len] = 0;
550 }
551 537
552 if (adev->max_sectors > 255) 538 if (adev->max_sectors > 255)
553 adev->max_sectors = 255; 539 adev->max_sectors = 255;
@@ -608,14 +594,10 @@ static int it821x_port_start(struct ata_port *ap)
608 if (ret < 0) 594 if (ret < 0)
609 return ret; 595 return ret;
610 596
611 ap->private_data = kmalloc(sizeof(struct it821x_dev), GFP_KERNEL); 597 itdev = devm_kzalloc(&pdev->dev, sizeof(struct it821x_dev), GFP_KERNEL);
612 if (ap->private_data == NULL) { 598 if (itdev == NULL)
613 ata_port_stop(ap);
614 return -ENOMEM; 599 return -ENOMEM;
615 } 600 ap->private_data = itdev;
616
617 itdev = ap->private_data;
618 memset(itdev, 0, sizeof(struct it821x_dev));
619 601
620 pci_read_config_byte(pdev, 0x50, &conf); 602 pci_read_config_byte(pdev, 0x50, &conf);
621 603
@@ -646,20 +628,6 @@ static int it821x_port_start(struct ata_port *ap)
646 return 0; 628 return 0;
647} 629}
648 630
649/**
650 * it821x_port_stop - port shutdown
651 * @ap: ATA port being removed
652 *
653 * Release the private objects we added in it821x_port_start
654 */
655
656static void it821x_port_stop(struct ata_port *ap) {
657 kfree(ap->private_data);
658 ap->private_data = NULL; /* We want an OOPS if we reuse this
659 too late! */
660 ata_port_stop(ap);
661}
662
663static struct scsi_host_template it821x_sht = { 631static struct scsi_host_template it821x_sht = {
664 .module = THIS_MODULE, 632 .module = THIS_MODULE,
665 .name = DRV_NAME, 633 .name = DRV_NAME,
@@ -706,14 +674,14 @@ static struct ata_port_operations it821x_smart_port_ops = {
706 .qc_prep = ata_qc_prep, 674 .qc_prep = ata_qc_prep,
707 .qc_issue = it821x_smart_qc_issue_prot, 675 .qc_issue = it821x_smart_qc_issue_prot,
708 676
709 .data_xfer = ata_pio_data_xfer, 677 .data_xfer = ata_data_xfer,
710 678
711 .irq_handler = ata_interrupt, 679 .irq_handler = ata_interrupt,
712 .irq_clear = ata_bmdma_irq_clear, 680 .irq_clear = ata_bmdma_irq_clear,
681 .irq_on = ata_irq_on,
682 .irq_ack = ata_irq_ack,
713 683
714 .port_start = it821x_port_start, 684 .port_start = it821x_port_start,
715 .port_stop = it821x_port_stop,
716 .host_stop = ata_host_stop
717}; 685};
718 686
719static struct ata_port_operations it821x_passthru_port_ops = { 687static struct ata_port_operations it821x_passthru_port_ops = {
@@ -742,14 +710,14 @@ static struct ata_port_operations it821x_passthru_port_ops = {
742 .qc_prep = ata_qc_prep, 710 .qc_prep = ata_qc_prep,
743 .qc_issue = it821x_passthru_qc_issue_prot, 711 .qc_issue = it821x_passthru_qc_issue_prot,
744 712
745 .data_xfer = ata_pio_data_xfer, 713 .data_xfer = ata_data_xfer,
746 714
747 .irq_clear = ata_bmdma_irq_clear, 715 .irq_clear = ata_bmdma_irq_clear,
748 .irq_handler = ata_interrupt, 716 .irq_handler = ata_interrupt,
717 .irq_on = ata_irq_on,
718 .irq_ack = ata_irq_ack,
749 719
750 .port_start = it821x_port_start, 720 .port_start = it821x_port_start,
751 .port_stop = it821x_port_stop,
752 .host_stop = ata_host_stop
753}; 721};
754 722
755static void __devinit it821x_disable_raid(struct pci_dev *pdev) 723static void __devinit it821x_disable_raid(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 23b8aab3ebd8..3222ac7b945d 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -95,14 +95,6 @@ static void ixp4xx_irq_clear(struct ata_port *ap)
95{ 95{
96} 96}
97 97
98static void ixp4xx_host_stop (struct ata_host *host)
99{
100 struct ixp4xx_pata_data *data = host->dev->platform_data;
101
102 iounmap(data->cs0);
103 iounmap(data->cs1);
104}
105
106static struct scsi_host_template ixp4xx_sht = { 98static struct scsi_host_template ixp4xx_sht = {
107 .module = THIS_MODULE, 99 .module = THIS_MODULE,
108 .name = DRV_NAME, 100 .name = DRV_NAME,
@@ -139,10 +131,10 @@ static struct ata_port_operations ixp4xx_port_ops = {
139 131
140 .irq_handler = ata_interrupt, 132 .irq_handler = ata_interrupt,
141 .irq_clear = ixp4xx_irq_clear, 133 .irq_clear = ixp4xx_irq_clear,
134 .irq_on = ata_irq_on,
135 .irq_ack = ata_irq_ack,
142 136
143 .port_start = ata_port_start, 137 .port_start = ata_port_start,
144 .port_stop = ata_port_stop,
145 .host_stop = ixp4xx_host_stop,
146 138
147 .phy_reset = ixp4xx_phy_reset, 139 .phy_reset = ixp4xx_phy_reset,
148}; 140};
@@ -150,9 +142,9 @@ static struct ata_port_operations ixp4xx_port_ops = {
150static void ixp4xx_setup_port(struct ata_ioports *ioaddr, 142static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
151 struct ixp4xx_pata_data *data) 143 struct ixp4xx_pata_data *data)
152{ 144{
153 ioaddr->cmd_addr = (unsigned long) data->cs0; 145 ioaddr->cmd_addr = data->cs0;
154 ioaddr->altstatus_addr = (unsigned long) data->cs1 + 0x06; 146 ioaddr->altstatus_addr = data->cs1 + 0x06;
155 ioaddr->ctl_addr = (unsigned long) data->cs1 + 0x06; 147 ioaddr->ctl_addr = data->cs1 + 0x06;
156 148
157 ata_std_ports(ioaddr); 149 ata_std_ports(ioaddr);
158 150
@@ -162,19 +154,19 @@ static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
162 * ixp4xx in little endian mode. 154 * ixp4xx in little endian mode.
163 */ 155 */
164 156
165 ioaddr->data_addr ^= 0x02; 157 *(unsigned long *)&ioaddr->data_addr ^= 0x02;
166 ioaddr->cmd_addr ^= 0x03; 158 *(unsigned long *)&ioaddr->cmd_addr ^= 0x03;
167 ioaddr->altstatus_addr ^= 0x03; 159 *(unsigned long *)&ioaddr->altstatus_addr ^= 0x03;
168 ioaddr->ctl_addr ^= 0x03; 160 *(unsigned long *)&ioaddr->ctl_addr ^= 0x03;
169 ioaddr->error_addr ^= 0x03; 161 *(unsigned long *)&ioaddr->error_addr ^= 0x03;
170 ioaddr->feature_addr ^= 0x03; 162 *(unsigned long *)&ioaddr->feature_addr ^= 0x03;
171 ioaddr->nsect_addr ^= 0x03; 163 *(unsigned long *)&ioaddr->nsect_addr ^= 0x03;
172 ioaddr->lbal_addr ^= 0x03; 164 *(unsigned long *)&ioaddr->lbal_addr ^= 0x03;
173 ioaddr->lbam_addr ^= 0x03; 165 *(unsigned long *)&ioaddr->lbam_addr ^= 0x03;
174 ioaddr->lbah_addr ^= 0x03; 166 *(unsigned long *)&ioaddr->lbah_addr ^= 0x03;
175 ioaddr->device_addr ^= 0x03; 167 *(unsigned long *)&ioaddr->device_addr ^= 0x03;
176 ioaddr->status_addr ^= 0x03; 168 *(unsigned long *)&ioaddr->status_addr ^= 0x03;
177 ioaddr->command_addr ^= 0x03; 169 *(unsigned long *)&ioaddr->command_addr ^= 0x03;
178#endif 170#endif
179} 171}
180 172
@@ -195,8 +187,8 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
195 187
196 pdev->dev.coherent_dma_mask = DMA_32BIT_MASK; 188 pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
197 189
198 data->cs0 = ioremap(cs0->start, 0x1000); 190 data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
199 data->cs1 = ioremap(cs1->start, 0x1000); 191 data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
200 192
201 irq = platform_get_irq(pdev, 0); 193 irq = platform_get_irq(pdev, 0);
202 if (irq) 194 if (irq)
@@ -238,7 +230,7 @@ static __devexit int ixp4xx_pata_remove(struct platform_device *dev)
238{ 230{
239 struct ata_host *host = platform_get_drvdata(dev); 231 struct ata_host *host = platform_get_drvdata(dev);
240 232
241 ata_host_remove(host); 233 ata_host_detach(host);
242 platform_set_drvdata(dev, NULL); 234 platform_set_drvdata(dev, NULL);
243 235
244 return 0; 236 return 0;
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 2d661cb4df3c..7a635dd326f8 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -161,16 +161,16 @@ static const struct ata_port_operations jmicron_ops = {
161 .bmdma_status = ata_bmdma_status, 161 .bmdma_status = ata_bmdma_status,
162 .qc_prep = ata_qc_prep, 162 .qc_prep = ata_qc_prep,
163 .qc_issue = ata_qc_issue_prot, 163 .qc_issue = ata_qc_issue_prot,
164 .data_xfer = ata_pio_data_xfer, 164 .data_xfer = ata_data_xfer,
165 165
166 /* IRQ-related hooks */ 166 /* IRQ-related hooks */
167 .irq_handler = ata_interrupt, 167 .irq_handler = ata_interrupt,
168 .irq_clear = ata_bmdma_irq_clear, 168 .irq_clear = ata_bmdma_irq_clear,
169 .irq_on = ata_irq_on,
170 .irq_ack = ata_irq_ack,
169 171
170 /* Generic PATA PCI ATA helpers */ 172 /* Generic PATA PCI ATA helpers */
171 .port_start = ata_port_start, 173 .port_start = ata_port_start,
172 .port_stop = ata_port_stop,
173 .host_stop = ata_host_stop,
174}; 174};
175 175
176 176
@@ -204,20 +204,12 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
204 204
205 u32 reg; 205 u32 reg;
206 206
207 if (id->driver_data != 368) { 207 /* PATA controller is fn 1, AHCI is fn 0 */
208 /* Put the controller into AHCI mode in case the AHCI driver 208 if (id->driver_data != 368 && PCI_FUNC(pdev->devfn) != 1)
209 has not yet been loaded. This can be done with either 209 return -ENODEV;
210 function present */
211 210
212 /* FIXME: We may want a way to override this in future */ 211 /* The 365/66 have two PATA channels, redirect the second */
213 pci_write_config_byte(pdev, 0x41, 0xa1); 212 if (id->driver_data == 365 || id->driver_data == 366) {
214
215 /* PATA controller is fn 1, AHCI is fn 0 */
216 if (PCI_FUNC(pdev->devfn) != 1)
217 return -ENODEV;
218 }
219 if ( id->driver_data == 365 || id->driver_data == 366) {
220 /* The 365/66 have two PATA channels, redirect the second */
221 pci_read_config_dword(pdev, 0x80, &reg); 213 pci_read_config_dword(pdev, 0x80, &reg);
222 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */ 214 reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
223 pci_write_config_dword(pdev, 0x80, reg); 215 pci_write_config_dword(pdev, 0x80, reg);
@@ -229,7 +221,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
229static int jmicron_reinit_one(struct pci_dev *pdev) 221static int jmicron_reinit_one(struct pci_dev *pdev)
230{ 222{
231 u32 reg; 223 u32 reg;
232 224
233 switch(pdev->device) { 225 switch(pdev->device) {
234 case PCI_DEVICE_ID_JMICRON_JMB368: 226 case PCI_DEVICE_ID_JMICRON_JMB368:
235 break; 227 break;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 581cb33c6f45..4223e10de6a0 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -164,14 +164,14 @@ static struct ata_port_operations simple_port_ops = {
164 .qc_prep = ata_qc_prep, 164 .qc_prep = ata_qc_prep,
165 .qc_issue = ata_qc_issue_prot, 165 .qc_issue = ata_qc_issue_prot,
166 166
167 .data_xfer = ata_pio_data_xfer_noirq, 167 .data_xfer = ata_data_xfer_noirq,
168 168
169 .irq_handler = ata_interrupt, 169 .irq_handler = ata_interrupt,
170 .irq_clear = ata_bmdma_irq_clear, 170 .irq_clear = ata_bmdma_irq_clear,
171 .irq_on = ata_irq_on,
172 .irq_ack = ata_irq_ack,
171 173
172 .port_start = ata_port_start, 174 .port_start = ata_port_start,
173 .port_stop = ata_port_stop,
174 .host_stop = ata_host_stop
175}; 175};
176 176
177static struct ata_port_operations legacy_port_ops = { 177static struct ata_port_operations legacy_port_ops = {
@@ -189,14 +189,14 @@ static struct ata_port_operations legacy_port_ops = {
189 .qc_prep = ata_qc_prep, 189 .qc_prep = ata_qc_prep,
190 .qc_issue = ata_qc_issue_prot, 190 .qc_issue = ata_qc_issue_prot,
191 191
192 .data_xfer = ata_pio_data_xfer_noirq, 192 .data_xfer = ata_data_xfer_noirq,
193 193
194 .irq_handler = ata_interrupt, 194 .irq_handler = ata_interrupt,
195 .irq_clear = ata_bmdma_irq_clear, 195 .irq_clear = ata_bmdma_irq_clear,
196 .irq_on = ata_irq_on,
197 .irq_ack = ata_irq_ack,
196 198
197 .port_start = ata_port_start, 199 .port_start = ata_port_start,
198 .port_stop = ata_port_stop,
199 .host_stop = ata_host_stop
200}; 200};
201 201
202/* 202/*
@@ -257,31 +257,33 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
257 local_irq_save(flags); 257 local_irq_save(flags);
258 258
259 /* Perform the 32bit I/O synchronization sequence */ 259 /* Perform the 32bit I/O synchronization sequence */
260 inb(ap->ioaddr.nsect_addr); 260 ioread8(ap->ioaddr.nsect_addr);
261 inb(ap->ioaddr.nsect_addr); 261 ioread8(ap->ioaddr.nsect_addr);
262 inb(ap->ioaddr.nsect_addr); 262 ioread8(ap->ioaddr.nsect_addr);
263 263
264 /* Now the data */ 264 /* Now the data */
265 265
266 if (write_data) 266 if (write_data)
267 outsl(ap->ioaddr.data_addr, buf, buflen >> 2); 267 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
268 else 268 else
269 insl(ap->ioaddr.data_addr, buf, buflen >> 2); 269 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
270 270
271 if (unlikely(slop)) { 271 if (unlikely(slop)) {
272 u32 pad; 272 u32 pad;
273 if (write_data) { 273 if (write_data) {
274 memcpy(&pad, buf + buflen - slop, slop); 274 memcpy(&pad, buf + buflen - slop, slop);
275 outl(le32_to_cpu(pad), ap->ioaddr.data_addr); 275 pad = le32_to_cpu(pad);
276 iowrite32(pad, ap->ioaddr.data_addr);
276 } else { 277 } else {
277 pad = cpu_to_le16(inl(ap->ioaddr.data_addr)); 278 pad = ioread32(ap->ioaddr.data_addr);
279 pad = cpu_to_le16(pad);
278 memcpy(buf + buflen - slop, &pad, slop); 280 memcpy(buf + buflen - slop, &pad, slop);
279 } 281 }
280 } 282 }
281 local_irq_restore(flags); 283 local_irq_restore(flags);
282 } 284 }
283 else 285 else
284 ata_pio_data_xfer_noirq(adev, buf, buflen, write_data); 286 ata_data_xfer_noirq(adev, buf, buflen, write_data);
285} 287}
286 288
287static struct ata_port_operations pdc20230_port_ops = { 289static struct ata_port_operations pdc20230_port_ops = {
@@ -303,10 +305,10 @@ static struct ata_port_operations pdc20230_port_ops = {
303 305
304 .irq_handler = ata_interrupt, 306 .irq_handler = ata_interrupt,
305 .irq_clear = ata_bmdma_irq_clear, 307 .irq_clear = ata_bmdma_irq_clear,
308 .irq_on = ata_irq_on,
309 .irq_ack = ata_irq_ack,
306 310
307 .port_start = ata_port_start, 311 .port_start = ata_port_start,
308 .port_stop = ata_port_stop,
309 .host_stop = ata_host_stop
310}; 312};
311 313
312/* 314/*
@@ -332,8 +334,8 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
332 inb(0x3E6); 334 inb(0x3E6);
333 inb(0x3E6); 335 inb(0x3E6);
334 336
335 outb(recover << 4 | active, ap->ioaddr.device_addr); 337 iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
336 inb(ap->ioaddr.status_addr); 338 ioread8(ap->ioaddr.status_addr);
337} 339}
338 340
339static struct ata_port_operations ht6560a_port_ops = { 341static struct ata_port_operations ht6560a_port_ops = {
@@ -351,14 +353,14 @@ static struct ata_port_operations ht6560a_port_ops = {
351 .qc_prep = ata_qc_prep, 353 .qc_prep = ata_qc_prep,
352 .qc_issue = ata_qc_issue_prot, 354 .qc_issue = ata_qc_issue_prot,
353 355
354 .data_xfer = ata_pio_data_xfer, /* Check vlb/noirq */ 356 .data_xfer = ata_data_xfer, /* Check vlb/noirq */
355 357
356 .irq_handler = ata_interrupt, 358 .irq_handler = ata_interrupt,
357 .irq_clear = ata_bmdma_irq_clear, 359 .irq_clear = ata_bmdma_irq_clear,
360 .irq_on = ata_irq_on,
361 .irq_ack = ata_irq_ack,
358 362
359 .port_start = ata_port_start, 363 .port_start = ata_port_start,
360 .port_stop = ata_port_stop,
361 .host_stop = ata_host_stop
362}; 364};
363 365
364/* 366/*
@@ -387,7 +389,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
387 inb(0x3E6); 389 inb(0x3E6);
388 inb(0x3E6); 390 inb(0x3E6);
389 391
390 outb(recover << 4 | active, ap->ioaddr.device_addr); 392 iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
391 393
392 if (adev->class != ATA_DEV_ATA) { 394 if (adev->class != ATA_DEV_ATA) {
393 u8 rconf = inb(0x3E6); 395 u8 rconf = inb(0x3E6);
@@ -396,7 +398,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
396 outb(rconf, 0x3E6); 398 outb(rconf, 0x3E6);
397 } 399 }
398 } 400 }
399 inb(ap->ioaddr.status_addr); 401 ioread8(ap->ioaddr.status_addr);
400} 402}
401 403
402static struct ata_port_operations ht6560b_port_ops = { 404static struct ata_port_operations ht6560b_port_ops = {
@@ -414,14 +416,14 @@ static struct ata_port_operations ht6560b_port_ops = {
414 .qc_prep = ata_qc_prep, 416 .qc_prep = ata_qc_prep,
415 .qc_issue = ata_qc_issue_prot, 417 .qc_issue = ata_qc_issue_prot,
416 418
417 .data_xfer = ata_pio_data_xfer, /* FIXME: Check 32bit and noirq */ 419 .data_xfer = ata_data_xfer, /* FIXME: Check 32bit and noirq */
418 420
419 .irq_handler = ata_interrupt, 421 .irq_handler = ata_interrupt,
420 .irq_clear = ata_bmdma_irq_clear, 422 .irq_clear = ata_bmdma_irq_clear,
423 .irq_on = ata_irq_on,
424 .irq_ack = ata_irq_ack,
421 425
422 .port_start = ata_port_start, 426 .port_start = ata_port_start,
423 .port_stop = ata_port_stop,
424 .host_stop = ata_host_stop
425}; 427};
426 428
427/* 429/*
@@ -464,12 +466,12 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev
464 u8 rc; 466 u8 rc;
465 467
466 /* Enter configuration mode */ 468 /* Enter configuration mode */
467 inw(ap->ioaddr.error_addr); 469 ioread16(ap->ioaddr.error_addr);
468 inw(ap->ioaddr.error_addr); 470 ioread16(ap->ioaddr.error_addr);
469 outb(3, ap->ioaddr.nsect_addr); 471 iowrite8(3, ap->ioaddr.nsect_addr);
470 472
471 /* Read VLB clock strapping */ 473 /* Read VLB clock strapping */
472 clock = 1000000000 / khz[inb(ap->ioaddr.lbah_addr) & 0x03]; 474 clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
473 475
474 /* Get the timing data in cycles */ 476 /* Get the timing data in cycles */
475 ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000); 477 ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
@@ -487,33 +489,33 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev
487 setup = FIT(t.setup, 1, 4) - 1; 489 setup = FIT(t.setup, 1, 4) - 1;
488 490
489 /* Select the right timing bank for write timing */ 491 /* Select the right timing bank for write timing */
490 rc = inb(ap->ioaddr.lbal_addr); 492 rc = ioread8(ap->ioaddr.lbal_addr);
491 rc &= 0x7F; 493 rc &= 0x7F;
492 rc |= (adev->devno << 7); 494 rc |= (adev->devno << 7);
493 outb(rc, ap->ioaddr.lbal_addr); 495 iowrite8(rc, ap->ioaddr.lbal_addr);
494 496
495 /* Write the timings */ 497 /* Write the timings */
496 outb(active << 4 | recover, ap->ioaddr.error_addr); 498 iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
497 499
498 /* Select the right bank for read timings, also 500 /* Select the right bank for read timings, also
499 load the shared timings for address */ 501 load the shared timings for address */
500 rc = inb(ap->ioaddr.device_addr); 502 rc = ioread8(ap->ioaddr.device_addr);
501 rc &= 0xC0; 503 rc &= 0xC0;
502 rc |= adev->devno; /* Index select */ 504 rc |= adev->devno; /* Index select */
503 rc |= (setup << 4) | 0x04; 505 rc |= (setup << 4) | 0x04;
504 outb(rc, ap->ioaddr.device_addr); 506 iowrite8(rc, ap->ioaddr.device_addr);
505 507
506 /* Load the read timings */ 508 /* Load the read timings */
507 outb(active << 4 | recover, ap->ioaddr.data_addr); 509 iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
508 510
509 /* Ensure the timing register mode is right */ 511 /* Ensure the timing register mode is right */
510 rc = inb (ap->ioaddr.lbal_addr); 512 rc = ioread8(ap->ioaddr.lbal_addr);
511 rc &= 0x73; 513 rc &= 0x73;
512 rc |= 0x84; 514 rc |= 0x84;
513 outb(rc, ap->ioaddr.lbal_addr); 515 iowrite8(rc, ap->ioaddr.lbal_addr);
514 516
515 /* Exit command mode */ 517 /* Exit command mode */
516 outb(0x83, ap->ioaddr.nsect_addr); 518 iowrite8(0x83, ap->ioaddr.nsect_addr);
517} 519}
518 520
519 521
@@ -532,14 +534,14 @@ static struct ata_port_operations opti82c611a_port_ops = {
532 .qc_prep = ata_qc_prep, 534 .qc_prep = ata_qc_prep,
533 .qc_issue = ata_qc_issue_prot, 535 .qc_issue = ata_qc_issue_prot,
534 536
535 .data_xfer = ata_pio_data_xfer, 537 .data_xfer = ata_data_xfer,
536 538
537 .irq_handler = ata_interrupt, 539 .irq_handler = ata_interrupt,
538 .irq_clear = ata_bmdma_irq_clear, 540 .irq_clear = ata_bmdma_irq_clear,
541 .irq_on = ata_irq_on,
542 .irq_ack = ata_irq_ack,
539 543
540 .port_start = ata_port_start, 544 .port_start = ata_port_start,
541 .port_stop = ata_port_stop,
542 .host_stop = ata_host_stop
543}; 545};
544 546
545/* 547/*
@@ -563,9 +565,9 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
563 sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */ 565 sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */
564 566
565 /* Enter configuration mode */ 567 /* Enter configuration mode */
566 inw(ap->ioaddr.error_addr); 568 ioread16(ap->ioaddr.error_addr);
567 inw(ap->ioaddr.error_addr); 569 ioread16(ap->ioaddr.error_addr);
568 outb(3, ap->ioaddr.nsect_addr); 570 iowrite8(3, ap->ioaddr.nsect_addr);
569 571
570 /* Read VLB clock strapping */ 572 /* Read VLB clock strapping */
571 clock = 1000000000 / khz[sysclk]; 573 clock = 1000000000 / khz[sysclk];
@@ -586,33 +588,33 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
586 setup = FIT(t.setup, 1, 4) - 1; 588 setup = FIT(t.setup, 1, 4) - 1;
587 589
588 /* Select the right timing bank for write timing */ 590 /* Select the right timing bank for write timing */
589 rc = inb(ap->ioaddr.lbal_addr); 591 rc = ioread8(ap->ioaddr.lbal_addr);
590 rc &= 0x7F; 592 rc &= 0x7F;
591 rc |= (adev->devno << 7); 593 rc |= (adev->devno << 7);
592 outb(rc, ap->ioaddr.lbal_addr); 594 iowrite8(rc, ap->ioaddr.lbal_addr);
593 595
594 /* Write the timings */ 596 /* Write the timings */
595 outb(active << 4 | recover, ap->ioaddr.error_addr); 597 iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
596 598
597 /* Select the right bank for read timings, also 599 /* Select the right bank for read timings, also
598 load the shared timings for address */ 600 load the shared timings for address */
599 rc = inb(ap->ioaddr.device_addr); 601 rc = ioread8(ap->ioaddr.device_addr);
600 rc &= 0xC0; 602 rc &= 0xC0;
601 rc |= adev->devno; /* Index select */ 603 rc |= adev->devno; /* Index select */
602 rc |= (setup << 4) | 0x04; 604 rc |= (setup << 4) | 0x04;
603 outb(rc, ap->ioaddr.device_addr); 605 iowrite8(rc, ap->ioaddr.device_addr);
604 606
605 /* Load the read timings */ 607 /* Load the read timings */
606 outb(active << 4 | recover, ap->ioaddr.data_addr); 608 iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
607 609
608 /* Ensure the timing register mode is right */ 610 /* Ensure the timing register mode is right */
609 rc = inb (ap->ioaddr.lbal_addr); 611 rc = ioread8(ap->ioaddr.lbal_addr);
610 rc &= 0x73; 612 rc &= 0x73;
611 rc |= 0x84; 613 rc |= 0x84;
612 outb(rc, ap->ioaddr.lbal_addr); 614 iowrite8(rc, ap->ioaddr.lbal_addr);
613 615
614 /* Exit command mode */ 616 /* Exit command mode */
615 outb(0x83, ap->ioaddr.nsect_addr); 617 iowrite8(0x83, ap->ioaddr.nsect_addr);
616 618
617 /* We need to know this for quad device on the MVB */ 619 /* We need to know this for quad device on the MVB */
618 ap->host->private_data = ap; 620 ap->host->private_data = ap;
@@ -662,14 +664,14 @@ static struct ata_port_operations opti82c46x_port_ops = {
662 .qc_prep = ata_qc_prep, 664 .qc_prep = ata_qc_prep,
663 .qc_issue = opti82c46x_qc_issue_prot, 665 .qc_issue = opti82c46x_qc_issue_prot,
664 666
665 .data_xfer = ata_pio_data_xfer, 667 .data_xfer = ata_data_xfer,
666 668
667 .irq_handler = ata_interrupt, 669 .irq_handler = ata_interrupt,
668 .irq_clear = ata_bmdma_irq_clear, 670 .irq_clear = ata_bmdma_irq_clear,
671 .irq_on = ata_irq_on,
672 .irq_ack = ata_irq_ack,
669 673
670 .port_start = ata_port_start, 674 .port_start = ata_port_start,
671 .port_stop = ata_port_stop,
672 .host_stop = ata_host_stop
673}; 675};
674 676
675 677
@@ -689,21 +691,26 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
689 struct legacy_data *ld = &legacy_data[nr_legacy_host]; 691 struct legacy_data *ld = &legacy_data[nr_legacy_host];
690 struct ata_probe_ent ae; 692 struct ata_probe_ent ae;
691 struct platform_device *pdev; 693 struct platform_device *pdev;
692 int ret = -EBUSY;
693 struct ata_port_operations *ops = &legacy_port_ops; 694 struct ata_port_operations *ops = &legacy_port_ops;
695 void __iomem *io_addr, *ctrl_addr;
694 int pio_modes = pio_mask; 696 int pio_modes = pio_mask;
695 u32 mask = (1 << port); 697 u32 mask = (1 << port);
696 698 int ret;
697 if (request_region(io, 8, "pata_legacy") == NULL)
698 return -EBUSY;
699 if (request_region(ctrl, 1, "pata_legacy") == NULL)
700 goto fail_io;
701 699
702 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0); 700 pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
703 if (IS_ERR(pdev)) { 701 if (IS_ERR(pdev))
704 ret = PTR_ERR(pdev); 702 return PTR_ERR(pdev);
705 goto fail_dev; 703
706 } 704 ret = -EBUSY;
705 if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
706 devm_request_region(&pdev->dev, ctrl, 1, "pata_legacy") == NULL)
707 goto fail;
708
709 ret = -ENOMEM;
710 io_addr = devm_ioport_map(&pdev->dev, io, 8);
711 ctrl_addr = devm_ioport_map(&pdev->dev, ctrl, 1);
712 if (!io_addr || !ctrl_addr)
713 goto fail;
707 714
708 if (ht6560a & mask) { 715 if (ht6560a & mask) {
709 ops = &ht6560a_port_ops; 716 ops = &ht6560a_port_ops;
@@ -770,27 +777,22 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
770 ae.irq = irq; 777 ae.irq = irq;
771 ae.irq_flags = 0; 778 ae.irq_flags = 0;
772 ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST; 779 ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
773 ae.port[0].cmd_addr = io; 780 ae.port[0].cmd_addr = io_addr;
774 ae.port[0].altstatus_addr = ctrl; 781 ae.port[0].altstatus_addr = ctrl_addr;
775 ae.port[0].ctl_addr = ctrl; 782 ae.port[0].ctl_addr = ctrl_addr;
776 ata_std_ports(&ae.port[0]); 783 ata_std_ports(&ae.port[0]);
777 ae.private_data = ld; 784 ae.private_data = ld;
778 785
779 ret = ata_device_add(&ae); 786 ret = -ENODEV;
780 if (ret == 0) { 787 if (!ata_device_add(&ae))
781 ret = -ENODEV;
782 goto fail; 788 goto fail;
783 } 789
784 legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev); 790 legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
785 ld->platform_dev = pdev; 791 ld->platform_dev = pdev;
786 return 0; 792 return 0;
787 793
788fail: 794fail:
789 platform_device_unregister(pdev); 795 platform_device_unregister(pdev);
790fail_dev:
791 release_region(ctrl, 1);
792fail_io:
793 release_region(io, 8);
794 return ret; 796 return ret;
795} 797}
796 798
@@ -923,15 +925,11 @@ static __exit void legacy_exit(void)
923 925
924 for (i = 0; i < nr_legacy_host; i++) { 926 for (i = 0; i < nr_legacy_host; i++) {
925 struct legacy_data *ld = &legacy_data[i]; 927 struct legacy_data *ld = &legacy_data[i];
926 struct ata_port *ap =legacy_host[i]->ports[0]; 928
927 unsigned long io = ap->ioaddr.cmd_addr; 929 ata_host_detach(legacy_host[i]);
928 unsigned long ctrl = ap->ioaddr.ctl_addr;
929 ata_host_remove(legacy_host[i]);
930 platform_device_unregister(ld->platform_dev); 930 platform_device_unregister(ld->platform_dev);
931 if (ld->timing) 931 if (ld->timing)
932 release_region(ld->timing, 2); 932 release_region(ld->timing, 2);
933 release_region(io, 8);
934 release_region(ctrl, 1);
935 } 933 }
936} 934}
937 935
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 1c810ea00253..13a70ac6f1dc 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -45,10 +45,10 @@ static int marvell_pre_reset(struct ata_port *ap)
45 for(i = 0; i <= 0x0F; i++) 45 for(i = 0; i <= 0x0F; i++)
46 printk("%02X:%02X ", i, readb(barp + i)); 46 printk("%02X:%02X ", i, readb(barp + i));
47 printk("\n"); 47 printk("\n");
48 48
49 devices = readl(barp + 0x0C); 49 devices = readl(barp + 0x0C);
50 pci_iounmap(pdev, barp); 50 pci_iounmap(pdev, barp);
51 51
52 if ((pdev->device == 0x6145) && (ap->port_no == 0) && 52 if ((pdev->device == 0x6145) && (ap->port_no == 0) &&
53 (!(devices & 0x10))) /* PATA enable ? */ 53 (!(devices & 0x10))) /* PATA enable ? */
54 return -ENOENT; 54 return -ENOENT;
@@ -57,7 +57,7 @@ static int marvell_pre_reset(struct ata_port *ap)
57 switch(ap->port_no) 57 switch(ap->port_no)
58 { 58 {
59 case 0: 59 case 0:
60 if (inb(ap->ioaddr.bmdma_addr + 1) & 1) 60 if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
61 ap->cbl = ATA_CBL_PATA40; 61 ap->cbl = ATA_CBL_PATA40;
62 else 62 else
63 ap->cbl = ATA_CBL_PATA80; 63 ap->cbl = ATA_CBL_PATA80;
@@ -129,16 +129,16 @@ static const struct ata_port_operations marvell_ops = {
129 .bmdma_status = ata_bmdma_status, 129 .bmdma_status = ata_bmdma_status,
130 .qc_prep = ata_qc_prep, 130 .qc_prep = ata_qc_prep,
131 .qc_issue = ata_qc_issue_prot, 131 .qc_issue = ata_qc_issue_prot,
132 .data_xfer = ata_pio_data_xfer, 132 .data_xfer = ata_data_xfer,
133 133
134 /* Timeout handling */ 134 /* Timeout handling */
135 .irq_handler = ata_interrupt, 135 .irq_handler = ata_interrupt,
136 .irq_clear = ata_bmdma_irq_clear, 136 .irq_clear = ata_bmdma_irq_clear,
137 .irq_on = ata_irq_on,
138 .irq_ack = ata_irq_ack,
137 139
138 /* Generic PATA PCI ATA helpers */ 140 /* Generic PATA PCI ATA helpers */
139 .port_start = ata_port_start, 141 .port_start = ata_port_start,
140 .port_stop = ata_port_stop,
141 .host_stop = ata_host_stop,
142}; 142};
143 143
144 144
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
new file mode 100644
index 000000000000..d7378df44970
--- /dev/null
+++ b/drivers/ata/pata_mpc52xx.c
@@ -0,0 +1,538 @@
1/*
2 * drivers/ata/pata_mpc52xx.c
3 *
4 * libata driver for the Freescale MPC52xx on-chip IDE interface
5 *
6 * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
7 * Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/libata.h>
19
20#include <asm/types.h>
21#include <asm/prom.h>
22#include <asm/of_platform.h>
23#include <asm/mpc52xx.h>
24
25
26#define DRV_NAME "mpc52xx_ata"
27#define DRV_VERSION "0.1.0"
28
29
30/* Private structures used by the driver */
31struct mpc52xx_ata_timings {
32 u32 pio1;
33 u32 pio2;
34};
35
36struct mpc52xx_ata_priv {
37 unsigned int ipb_period;
38 struct mpc52xx_ata __iomem * ata_regs;
39 int ata_irq;
40 struct mpc52xx_ata_timings timings[2];
41 int csel;
42};
43
44
45/* ATAPI-4 PIO specs (in ns) */
46static const int ataspec_t0[5] = {600, 383, 240, 180, 120};
47static const int ataspec_t1[5] = { 70, 50, 30, 30, 25};
48static const int ataspec_t2_8[5] = {290, 290, 290, 80, 70};
49static const int ataspec_t2_16[5] = {165, 125, 100, 80, 70};
50static const int ataspec_t2i[5] = { 0, 0, 0, 70, 25};
51static const int ataspec_t4[5] = { 30, 20, 15, 10, 10};
52static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
53
54#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
55
56
57/* Bit definitions inside the registers */
58#define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */
59#define MPC52xx_ATA_HOSTCONF_FR 0x40000000UL /* FIFO Reset */
60#define MPC52xx_ATA_HOSTCONF_IE 0x02000000UL /* Enable interrupt in PIO */
61#define MPC52xx_ATA_HOSTCONF_IORDY 0x01000000UL /* Drive supports IORDY protocol */
62
63#define MPC52xx_ATA_HOSTSTAT_TIP 0x80000000UL /* Transaction in progress */
64#define MPC52xx_ATA_HOSTSTAT_UREP 0x40000000UL /* UDMA Read Extended Pause */
65#define MPC52xx_ATA_HOSTSTAT_RERR 0x02000000UL /* Read Error */
66#define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */
67
68#define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */
69
70#define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */
71#define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */
72#define MPC52xx_ATA_DMAMODE_UDMA 0x04 /* UDMA enabled */
73#define MPC52xx_ATA_DMAMODE_IE 0x08 /* Enable drive interrupt to CPU in DMA mode */
74#define MPC52xx_ATA_DMAMODE_FE 0x10 /* FIFO Flush enable in Rx mode */
75#define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */
76#define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */
77
78
79/* Structure of the hardware registers */
80struct mpc52xx_ata {
81
82 /* Host interface registers */
83 u32 config; /* ATA + 0x00 Host configuration */
84 u32 host_status; /* ATA + 0x04 Host controller status */
85 u32 pio1; /* ATA + 0x08 PIO Timing 1 */
86 u32 pio2; /* ATA + 0x0c PIO Timing 2 */
87 u32 mdma1; /* ATA + 0x10 MDMA Timing 1 */
88 u32 mdma2; /* ATA + 0x14 MDMA Timing 2 */
89 u32 udma1; /* ATA + 0x18 UDMA Timing 1 */
90 u32 udma2; /* ATA + 0x1c UDMA Timing 2 */
91 u32 udma3; /* ATA + 0x20 UDMA Timing 3 */
92 u32 udma4; /* ATA + 0x24 UDMA Timing 4 */
93 u32 udma5; /* ATA + 0x28 UDMA Timing 5 */
94 u32 share_cnt; /* ATA + 0x2c ATA share counter */
95 u32 reserved0[3];
96
97 /* FIFO registers */
98 u32 fifo_data; /* ATA + 0x3c */
99 u8 fifo_status_frame; /* ATA + 0x40 */
100 u8 fifo_status; /* ATA + 0x41 */
101 u16 reserved7[1];
102 u8 fifo_control; /* ATA + 0x44 */
103 u8 reserved8[5];
104 u16 fifo_alarm; /* ATA + 0x4a */
105 u16 reserved9;
106 u16 fifo_rdp; /* ATA + 0x4e */
107 u16 reserved10;
108 u16 fifo_wrp; /* ATA + 0x52 */
109 u16 reserved11;
110 u16 fifo_lfrdp; /* ATA + 0x56 */
111 u16 reserved12;
112 u16 fifo_lfwrp; /* ATA + 0x5a */
113
114 /* Drive TaskFile registers */
115 u8 tf_control; /* ATA + 0x5c TASKFILE Control/Alt Status */
116 u8 reserved13[3];
117 u16 tf_data; /* ATA + 0x60 TASKFILE Data */
118 u16 reserved14;
119 u8 tf_features; /* ATA + 0x64 TASKFILE Features/Error */
120 u8 reserved15[3];
121 u8 tf_sec_count; /* ATA + 0x68 TASKFILE Sector Count */
122 u8 reserved16[3];
123 u8 tf_sec_num; /* ATA + 0x6c TASKFILE Sector Number */
124 u8 reserved17[3];
125 u8 tf_cyl_low; /* ATA + 0x70 TASKFILE Cylinder Low */
126 u8 reserved18[3];
127 u8 tf_cyl_high; /* ATA + 0x74 TASKFILE Cylinder High */
128 u8 reserved19[3];
129 u8 tf_dev_head; /* ATA + 0x78 TASKFILE Device/Head */
130 u8 reserved20[3];
131 u8 tf_command; /* ATA + 0x7c TASKFILE Command/Status */
132 u8 dma_mode; /* ATA + 0x7d ATA Host DMA Mode configuration */
133 u8 reserved21[2];
134};
135
136
137/* ======================================================================== */
138/* Aux fns */
139/* ======================================================================== */
140
141
142/* MPC52xx low level hw control */
143
144static int
145mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
146{
147 struct mpc52xx_ata_timings *timing = &priv->timings[dev];
148 unsigned int ipb_period = priv->ipb_period;
149 unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
150
151 if ((pio<0) || (pio>4))
152 return -EINVAL;
153
154 t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
155 t1 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t1[pio]);
156 t2_8 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_8[pio]);
157 t2_16 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2_16[pio]);
158 t2i = CALC_CLKCYC(ipb_period, 1000 * ataspec_t2i[pio]);
159 t4 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t4[pio]);
160 ta = CALC_CLKCYC(ipb_period, 1000 * ataspec_ta[pio]);
161
162 timing->pio1 = (t0 << 24) | (t2_8 << 16) | (t2_16 << 8) | (t2i);
163 timing->pio2 = (t4 << 24) | (t1 << 16) | (ta << 8);
164
165 return 0;
166}
167
168static void
169mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
170{
171 struct mpc52xx_ata __iomem *regs = priv->ata_regs;
172 struct mpc52xx_ata_timings *timing = &priv->timings[device];
173
174 out_be32(&regs->pio1, timing->pio1);
175 out_be32(&regs->pio2, timing->pio2);
176 out_be32(&regs->mdma1, 0);
177 out_be32(&regs->mdma2, 0);
178 out_be32(&regs->udma1, 0);
179 out_be32(&regs->udma2, 0);
180 out_be32(&regs->udma3, 0);
181 out_be32(&regs->udma4, 0);
182 out_be32(&regs->udma5, 0);
183
184 priv->csel = device;
185}
186
187static int
188mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
189{
190 struct mpc52xx_ata __iomem *regs = priv->ata_regs;
191 int tslot;
192
193 /* Clear share_cnt (all sample code do this ...) */
194 out_be32(&regs->share_cnt, 0);
195
196 /* Configure and reset host */
197 out_be32(&regs->config,
198 MPC52xx_ATA_HOSTCONF_IE |
199 MPC52xx_ATA_HOSTCONF_IORDY |
200 MPC52xx_ATA_HOSTCONF_SMR |
201 MPC52xx_ATA_HOSTCONF_FR);
202
203 udelay(10);
204
205 out_be32(&regs->config,
206 MPC52xx_ATA_HOSTCONF_IE |
207 MPC52xx_ATA_HOSTCONF_IORDY);
208
209 /* Set the time slot to 1us */
210 tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
211 out_be32(&regs->share_cnt, tslot << 16 );
212
213 /* Init timings to PIO0 */
214 memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
215
216 mpc52xx_ata_compute_pio_timings(priv, 0, 0);
217 mpc52xx_ata_compute_pio_timings(priv, 1, 0);
218
219 mpc52xx_ata_apply_timings(priv, 0);
220
221 return 0;
222}
223
224
225/* ======================================================================== */
226/* libata driver */
227/* ======================================================================== */
228
229static void
230mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
231{
232 struct mpc52xx_ata_priv *priv = ap->host->private_data;
233 int pio, rv;
234
235 pio = adev->pio_mode - XFER_PIO_0;
236
237 rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
238
239 if (rv) {
240 printk(KERN_ERR DRV_NAME
241 ": Trying to select invalid PIO mode %d\n", pio);
242 return;
243 }
244
245 mpc52xx_ata_apply_timings(priv, adev->devno);
246}
247static void
248mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
249{
250 struct mpc52xx_ata_priv *priv = ap->host->private_data;
251
252 if (device != priv->csel)
253 mpc52xx_ata_apply_timings(priv, device);
254
255 ata_std_dev_select(ap,device);
256}
257
258static void
259mpc52xx_ata_error_handler(struct ata_port *ap)
260{
261 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, NULL,
262 ata_std_postreset);
263}
264
265
266
267static struct scsi_host_template mpc52xx_ata_sht = {
268 .module = THIS_MODULE,
269 .name = DRV_NAME,
270 .ioctl = ata_scsi_ioctl,
271 .queuecommand = ata_scsi_queuecmd,
272 .can_queue = ATA_DEF_QUEUE,
273 .this_id = ATA_SHT_THIS_ID,
274 .sg_tablesize = LIBATA_MAX_PRD,
275 .max_sectors = ATA_MAX_SECTORS,
276 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
277 .emulated = ATA_SHT_EMULATED,
278 .use_clustering = ATA_SHT_USE_CLUSTERING,
279 .proc_name = DRV_NAME,
280 .dma_boundary = ATA_DMA_BOUNDARY,
281 .slave_configure = ata_scsi_slave_config,
282 .bios_param = ata_std_bios_param,
283};
284
285static struct ata_port_operations mpc52xx_ata_port_ops = {
286 .port_disable = ata_port_disable,
287 .set_piomode = mpc52xx_ata_set_piomode,
288 .dev_select = mpc52xx_ata_dev_select,
289 .tf_load = ata_tf_load,
290 .tf_read = ata_tf_read,
291 .check_status = ata_check_status,
292 .exec_command = ata_exec_command,
293 .freeze = ata_bmdma_freeze,
294 .thaw = ata_bmdma_thaw,
295 .error_handler = mpc52xx_ata_error_handler,
296 .qc_prep = ata_qc_prep,
297 .qc_issue = ata_qc_issue_prot,
298 .data_xfer = ata_data_xfer,
299 .irq_handler = ata_interrupt,
300 .irq_clear = ata_bmdma_irq_clear,
301 .irq_on = ata_irq_on,
302 .irq_ack = ata_irq_ack,
303 .port_start = ata_port_start,
304};
305
306static struct ata_probe_ent mpc52xx_ata_probe_ent = {
307 .port_ops = &mpc52xx_ata_port_ops,
308 .sht = &mpc52xx_ata_sht,
309 .n_ports = 1,
310 .pio_mask = 0x1f, /* Up to PIO4 */
311 .mwdma_mask = 0x00, /* No MWDMA */
312 .udma_mask = 0x00, /* No UDMA */
313 .port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
314 .irq_flags = 0,
315};
316
317static int __devinit
318mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv)
319{
320 struct ata_probe_ent *ae = &mpc52xx_ata_probe_ent;
321 struct ata_ioports *aio = &ae->port[0];
322 int rv;
323
324 INIT_LIST_HEAD(&ae->node);
325 ae->dev = dev;
326 ae->irq = priv->ata_irq;
327
328 aio->cmd_addr = 0; /* Don't have a classic reg block */
329 aio->altstatus_addr = &priv->ata_regs->tf_control;
330 aio->ctl_addr = &priv->ata_regs->tf_control;
331 aio->data_addr = &priv->ata_regs->tf_data;
332 aio->error_addr = &priv->ata_regs->tf_features;
333 aio->feature_addr = &priv->ata_regs->tf_features;
334 aio->nsect_addr = &priv->ata_regs->tf_sec_count;
335 aio->lbal_addr = &priv->ata_regs->tf_sec_num;
336 aio->lbam_addr = &priv->ata_regs->tf_cyl_low;
337 aio->lbah_addr = &priv->ata_regs->tf_cyl_high;
338 aio->device_addr = &priv->ata_regs->tf_dev_head;
339 aio->status_addr = &priv->ata_regs->tf_command;
340 aio->command_addr = &priv->ata_regs->tf_command;
341
342 ae->private_data = priv;
343
344 rv = ata_device_add(ae);
345
346 return rv ? 0 : -EINVAL;
347}
348
349static struct mpc52xx_ata_priv *
350mpc52xx_ata_remove_one(struct device *dev)
351{
352 struct ata_host *host = dev_get_drvdata(dev);
353 struct mpc52xx_ata_priv *priv = host->private_data;
354
355 ata_host_detach(host);
356
357 return priv;
358}
359
360
361/* ======================================================================== */
362/* OF Platform driver */
363/* ======================================================================== */
364
365static int __devinit
366mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
367{
368 unsigned int ipb_freq;
369 struct resource res_mem;
370 int ata_irq = NO_IRQ;
371 struct mpc52xx_ata __iomem *ata_regs;
372 struct mpc52xx_ata_priv *priv;
373 int rv;
374
375 /* Get ipb frequency */
376 ipb_freq = mpc52xx_find_ipb_freq(op->node);
377 if (!ipb_freq) {
378 printk(KERN_ERR DRV_NAME ": "
379 "Unable to find IPB Bus frequency\n" );
380 return -ENODEV;
381 }
382
383 /* Get IRQ and register */
384 rv = of_address_to_resource(op->node, 0, &res_mem);
385 if (rv) {
386 printk(KERN_ERR DRV_NAME ": "
387 "Error while parsing device node resource\n" );
388 return rv;
389 }
390
391 ata_irq = irq_of_parse_and_map(op->node, 0);
392 if (ata_irq == NO_IRQ) {
393 printk(KERN_ERR DRV_NAME ": "
394 "Error while mapping the irq\n");
395 return -EINVAL;
396 }
397
398 /* Request mem region */
399 if (!devm_request_mem_region(&op->dev, res_mem.start,
400 sizeof(struct mpc52xx_ata), DRV_NAME)) {
401 printk(KERN_ERR DRV_NAME ": "
402 "Error while requesting mem region\n");
403 rv = -EBUSY;
404 goto err;
405 }
406
407 /* Remap registers */
408 ata_regs = devm_ioremap(&op->dev, res_mem.start,
409 sizeof(struct mpc52xx_ata));
410 if (!ata_regs) {
411 printk(KERN_ERR DRV_NAME ": "
412 "Error while mapping register set\n");
413 rv = -ENOMEM;
414 goto err;
415 }
416
417 /* Prepare our private structure */
418 priv = devm_kzalloc(&op->dev, sizeof(struct mpc52xx_ata_priv),
419 GFP_ATOMIC);
420 if (!priv) {
421 printk(KERN_ERR DRV_NAME ": "
422 "Error while allocating private structure\n");
423 rv = -ENOMEM;
424 goto err;
425 }
426
427 priv->ipb_period = 1000000000 / (ipb_freq / 1000);
428 priv->ata_regs = ata_regs;
429 priv->ata_irq = ata_irq;
430 priv->csel = -1;
431
432 /* Init the hw */
433 rv = mpc52xx_ata_hw_init(priv);
434 if (rv) {
435 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
436 goto err;
437 }
438
439 /* Register ourselves to libata */
440 rv = mpc52xx_ata_init_one(&op->dev, priv);
441 if (rv) {
442 printk(KERN_ERR DRV_NAME ": "
443 "Error while registering to ATA layer\n");
444 return rv;
445 }
446
447 /* Done */
448 return 0;
449
450 /* Error path */
451err:
452 irq_dispose_mapping(ata_irq);
453 return rv;
454}
455
456static int
457mpc52xx_ata_remove(struct of_device *op)
458{
459 struct mpc52xx_ata_priv *priv;
460
461 priv = mpc52xx_ata_remove_one(&op->dev);
462 irq_dispose_mapping(priv->ata_irq);
463
464 return 0;
465}
466
467
468#ifdef CONFIG_PM
469
470static int
471mpc52xx_ata_suspend(struct of_device *op, pm_message_t state)
472{
473 return 0; /* FIXME : What to do here ? */
474}
475
476static int
477mpc52xx_ata_resume(struct of_device *op)
478{
479 return 0; /* FIXME : What to do here ? */
480}
481
482#endif
483
484
485static struct of_device_id mpc52xx_ata_of_match[] = {
486 {
487 .compatible = "mpc5200-ata",
488 },
489 {
490 .compatible = "mpc52xx-ata",
491 },
492 {},
493};
494
495
496static struct of_platform_driver mpc52xx_ata_of_platform_driver = {
497 .owner = THIS_MODULE,
498 .name = DRV_NAME,
499 .match_table = mpc52xx_ata_of_match,
500 .probe = mpc52xx_ata_probe,
501 .remove = mpc52xx_ata_remove,
502#ifdef CONFIG_PM
503 .suspend = mpc52xx_ata_suspend,
504 .resume = mpc52xx_ata_resume,
505#endif
506 .driver = {
507 .name = DRV_NAME,
508 .owner = THIS_MODULE,
509 },
510};
511
512
513/* ======================================================================== */
514/* Module */
515/* ======================================================================== */
516
517static int __init
518mpc52xx_ata_init(void)
519{
520 printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
521 return of_register_platform_driver(&mpc52xx_ata_of_platform_driver);
522}
523
524static void __exit
525mpc52xx_ata_exit(void)
526{
527 of_unregister_platform_driver(&mpc52xx_ata_of_platform_driver);
528}
529
530module_init(mpc52xx_ata_init);
531module_exit(mpc52xx_ata_exit);
532
533MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
534MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
535MODULE_LICENSE("GPL");
536MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
537MODULE_VERSION(DRV_VERSION);
538
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 4ccca938675e..ca8c965179b1 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -35,7 +35,7 @@
35#include <linux/libata.h> 35#include <linux/libata.h>
36 36
37#define DRV_NAME "pata_mpiix" 37#define DRV_NAME "pata_mpiix"
38#define DRV_VERSION "0.7.3" 38#define DRV_VERSION "0.7.5"
39 39
40enum { 40enum {
41 IDETIM = 0x6C, /* IDE control register */ 41 IDETIM = 0x6C, /* IDE control register */
@@ -49,12 +49,9 @@ enum {
49static int mpiix_pre_reset(struct ata_port *ap) 49static int mpiix_pre_reset(struct ata_port *ap)
50{ 50{
51 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 51 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52 static const struct pci_bits mpiix_enable_bits[] = { 52 static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 };
53 { 0x6D, 1, 0x80, 0x80 },
54 { 0x6F, 1, 0x80, 0x80 }
55 };
56 53
57 if (!pci_test_config_bits(pdev, &mpiix_enable_bits[ap->port_no])) 54 if (!pci_test_config_bits(pdev, &mpiix_enable_bits))
58 return -ENOENT; 55 return -ENOENT;
59 ap->cbl = ATA_CBL_PATA40; 56 ap->cbl = ATA_CBL_PATA40;
60 return ata_std_prereset(ap); 57 return ata_std_prereset(ap);
@@ -80,8 +77,8 @@ static void mpiix_error_handler(struct ata_port *ap)
80 * @adev: ATA device 77 * @adev: ATA device
81 * 78 *
82 * Called to do the PIO mode setup. The MPIIX allows us to program the 79 * Called to do the PIO mode setup. The MPIIX allows us to program the
83 * IORDY sample point (2-5 clocks), recovery 1-4 clocks and whether 80 * IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether
84 * prefetching or iordy are used. 81 * prefetching or IORDY are used.
85 * 82 *
86 * This would get very ugly because we can only program timing for one 83 * This would get very ugly because we can only program timing for one
87 * device at a time, the other gets PIO0. Fortunately libata calls 84 * device at a time, the other gets PIO0. Fortunately libata calls
@@ -103,18 +100,19 @@ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
103 { 2, 3 }, }; 100 { 2, 3 }, };
104 101
105 pci_read_config_word(pdev, IDETIM, &idetim); 102 pci_read_config_word(pdev, IDETIM, &idetim);
106 /* Mask the IORDY/TIME/PPE0 bank for this device */ 103
104 /* Mask the IORDY/TIME/PPE for this device */
107 if (adev->class == ATA_DEV_ATA) 105 if (adev->class == ATA_DEV_ATA)
108 control |= PPE; /* PPE enable for disk */ 106 control |= PPE; /* Enable prefetch/posting for disk */
109 if (ata_pio_need_iordy(adev)) 107 if (ata_pio_need_iordy(adev))
110 control |= IORDY; /* IORDY */ 108 control |= IORDY;
111 if (pio > 0) 109 if (pio > 1)
112 control |= FTIM; /* This drive is on the fast timing bank */ 110 control |= FTIM; /* This drive is on the fast timing bank */
113 111
114 /* Mask out timing and clear both TIME bank selects */ 112 /* Mask out timing and clear both TIME bank selects */
115 idetim &= 0xCCEE; 113 idetim &= 0xCCEE;
116 idetim &= ~(0x07 << (2 * adev->devno)); 114 idetim &= ~(0x07 << (4 * adev->devno));
117 idetim |= (control << (2 * adev->devno)); 115 idetim |= control << (4 * adev->devno);
118 116
119 idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8); 117 idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
120 pci_write_config_word(pdev, IDETIM, idetim); 118 pci_write_config_word(pdev, IDETIM, idetim);
@@ -188,23 +186,24 @@ static struct ata_port_operations mpiix_port_ops = {
188 186
189 .qc_prep = ata_qc_prep, 187 .qc_prep = ata_qc_prep,
190 .qc_issue = mpiix_qc_issue_prot, 188 .qc_issue = mpiix_qc_issue_prot,
191 .data_xfer = ata_pio_data_xfer, 189 .data_xfer = ata_data_xfer,
192 190
193 .irq_handler = ata_interrupt, 191 .irq_handler = ata_interrupt,
194 .irq_clear = ata_bmdma_irq_clear, 192 .irq_clear = ata_bmdma_irq_clear,
193 .irq_on = ata_irq_on,
194 .irq_ack = ata_irq_ack,
195 195
196 .port_start = ata_port_start, 196 .port_start = ata_port_start,
197 .port_stop = ata_port_stop,
198 .host_stop = ata_host_stop
199}; 197};
200 198
201static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) 199static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
202{ 200{
203 /* Single threaded by the PCI probe logic */ 201 /* Single threaded by the PCI probe logic */
204 static struct ata_probe_ent probe[2]; 202 static struct ata_probe_ent probe;
205 static int printed_version; 203 static int printed_version;
204 void __iomem *cmd_addr, *ctl_addr;
206 u16 idetim; 205 u16 idetim;
207 int enabled; 206 int irq;
208 207
209 if (!printed_version++) 208 if (!printed_version++)
210 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); 209 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
@@ -217,65 +216,49 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
217 if (!(idetim & ENABLED)) 216 if (!(idetim & ENABLED))
218 return -ENODEV; 217 return -ENODEV;
219 218
219 /* See if it's primary or secondary channel... */
220 if (!(idetim & SECONDARY)) {
221 irq = 14;
222 cmd_addr = devm_ioport_map(&dev->dev, 0x1F0, 8);
223 ctl_addr = devm_ioport_map(&dev->dev, 0x3F6, 1);
224 } else {
225 irq = 15;
226 cmd_addr = devm_ioport_map(&dev->dev, 0x170, 8);
227 ctl_addr = devm_ioport_map(&dev->dev, 0x376, 1);
228 }
229
230 if (!cmd_addr || !ctl_addr)
231 return -ENOMEM;
232
220 /* We do our own plumbing to avoid leaking special cases for whacko 233 /* We do our own plumbing to avoid leaking special cases for whacko
221 ancient hardware into the core code. There are two issues to 234 ancient hardware into the core code. There are two issues to
222 worry about. #1 The chip is a bridge so if in legacy mode and 235 worry about. #1 The chip is a bridge so if in legacy mode and
223 without BARs set fools the setup. #2 If you pci_disable_device 236 without BARs set fools the setup. #2 If you pci_disable_device
224 the MPIIX your box goes castors up */ 237 the MPIIX your box goes castors up */
225 238
226 INIT_LIST_HEAD(&probe[0].node); 239 INIT_LIST_HEAD(&probe.node);
227 probe[0].dev = pci_dev_to_dev(dev); 240 probe.dev = pci_dev_to_dev(dev);
228 probe[0].port_ops = &mpiix_port_ops; 241 probe.port_ops = &mpiix_port_ops;
229 probe[0].sht = &mpiix_sht; 242 probe.sht = &mpiix_sht;
230 probe[0].pio_mask = 0x1F; 243 probe.pio_mask = 0x1F;
231 probe[0].irq = 14; 244 probe.irq_flags = SA_SHIRQ;
232 probe[0].irq_flags = SA_SHIRQ; 245 probe.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
233 probe[0].port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; 246 probe.n_ports = 1;
234 probe[0].n_ports = 1; 247
235 probe[0].port[0].cmd_addr = 0x1F0; 248 probe.irq = irq;
236 probe[0].port[0].ctl_addr = 0x3F6; 249 probe.port[0].cmd_addr = cmd_addr;
237 probe[0].port[0].altstatus_addr = 0x3F6; 250 probe.port[0].ctl_addr = ctl_addr;
238 251 probe.port[0].altstatus_addr = ctl_addr;
239 /* The secondary lurks at different addresses but is otherwise
240 the same beastie */
241
242 INIT_LIST_HEAD(&probe[1].node);
243 probe[1] = probe[0];
244 probe[1].irq = 15;
245 probe[1].port[0].cmd_addr = 0x170;
246 probe[1].port[0].ctl_addr = 0x376;
247 probe[1].port[0].altstatus_addr = 0x376;
248 252
249 /* Let libata fill in the port details */ 253 /* Let libata fill in the port details */
250 ata_std_ports(&probe[0].port[0]); 254 ata_std_ports(&probe.port[0]);
251 ata_std_ports(&probe[1].port[0]);
252 255
253 /* Now add the port that is active */ 256 /* Now add the port that is active */
254 enabled = (idetim & SECONDARY) ? 1 : 0; 257 if (ata_device_add(&probe))
255
256 if (ata_device_add(&probe[enabled]))
257 return 0; 258 return 0;
258 return -ENODEV; 259 return -ENODEV;
259} 260}
260 261
261/**
262 * mpiix_remove_one - device unload
263 * @pdev: PCI device being removed
264 *
265 * Handle an unplug/unload event for a PCI device. Unload the
266 * PCI driver but do not use the default handler as we *MUST NOT*
267 * disable the device as it has other functions.
268 */
269
270static void __devexit mpiix_remove_one(struct pci_dev *pdev)
271{
272 struct device *dev = pci_dev_to_dev(pdev);
273 struct ata_host *host = dev_get_drvdata(dev);
274
275 ata_host_remove(host);
276 dev_set_drvdata(dev, NULL);
277}
278
279static const struct pci_device_id mpiix[] = { 262static const struct pci_device_id mpiix[] = {
280 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), }, 263 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
281 264
@@ -286,7 +269,7 @@ static struct pci_driver mpiix_pci_driver = {
286 .name = DRV_NAME, 269 .name = DRV_NAME,
287 .id_table = mpiix, 270 .id_table = mpiix,
288 .probe = mpiix_init_one, 271 .probe = mpiix_init_one,
289 .remove = mpiix_remove_one, 272 .remove = ata_pci_remove_one,
290 .suspend = ata_pci_device_suspend, 273 .suspend = ata_pci_device_suspend,
291 .resume = ata_pci_device_resume, 274 .resume = ata_pci_device_resume,
292}; 275};
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index cf7fe037471c..e8393e19be47 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -89,16 +89,16 @@ static const struct ata_port_operations netcell_ops = {
89 .bmdma_status = ata_bmdma_status, 89 .bmdma_status = ata_bmdma_status,
90 .qc_prep = ata_qc_prep, 90 .qc_prep = ata_qc_prep,
91 .qc_issue = ata_qc_issue_prot, 91 .qc_issue = ata_qc_issue_prot,
92 .data_xfer = ata_pio_data_xfer, 92 .data_xfer = ata_data_xfer,
93 93
94 /* IRQ-related hooks */ 94 /* IRQ-related hooks */
95 .irq_handler = ata_interrupt, 95 .irq_handler = ata_interrupt,
96 .irq_clear = ata_bmdma_irq_clear, 96 .irq_clear = ata_bmdma_irq_clear,
97 .irq_on = ata_irq_on,
98 .irq_ack = ata_irq_ack,
97 99
98 /* Generic PATA PCI ATA helpers */ 100 /* Generic PATA PCI ATA helpers */
99 .port_start = ata_port_start, 101 .port_start = ata_port_start,
100 .port_stop = ata_port_stop,
101 .host_stop = ata_host_stop,
102}; 102};
103 103
104 104
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index c3032eb9010d..3d1fa487c486 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -179,14 +179,14 @@ static struct ata_port_operations ns87410_port_ops = {
179 .qc_prep = ata_qc_prep, 179 .qc_prep = ata_qc_prep,
180 .qc_issue = ns87410_qc_issue_prot, 180 .qc_issue = ns87410_qc_issue_prot,
181 181
182 .data_xfer = ata_pio_data_xfer, 182 .data_xfer = ata_data_xfer,
183 183
184 .irq_handler = ata_interrupt, 184 .irq_handler = ata_interrupt,
185 .irq_clear = ata_bmdma_irq_clear, 185 .irq_clear = ata_bmdma_irq_clear,
186 .irq_on = ata_irq_on,
187 .irq_ack = ata_irq_ack,
186 188
187 .port_start = ata_port_start, 189 .port_start = ata_port_start,
188 .port_stop = ata_port_stop,
189 .host_stop = ata_host_stop
190}; 190};
191 191
192static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id) 192static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 10ac3cc10181..45215aa05e72 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -25,7 +25,7 @@
25#include <linux/ata.h> 25#include <linux/ata.h>
26 26
27#define DRV_NAME "pata_oldpiix" 27#define DRV_NAME "pata_oldpiix"
28#define DRV_VERSION "0.5.2" 28#define DRV_VERSION "0.5.3"
29 29
30/** 30/**
31 * oldpiix_pre_reset - probe begin 31 * oldpiix_pre_reset - probe begin
@@ -94,19 +94,21 @@ static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
94 { 2, 1 }, 94 { 2, 1 },
95 { 2, 3 }, }; 95 { 2, 3 }, };
96 96
97 if (pio > 2) 97 if (pio > 1)
98 control |= 1; /* TIME1 enable */ 98 control |= 1; /* TIME */
99 if (ata_pio_need_iordy(adev)) 99 if (ata_pio_need_iordy(adev))
100 control |= 2; /* IE IORDY */ 100 control |= 2; /* IE */
101 101
102 /* Intel specifies that the PPE functionality is for disk only */ 102 /* Intel specifies that the prefetch/posting is for disk only */
103 if (adev->class == ATA_DEV_ATA) 103 if (adev->class == ATA_DEV_ATA)
104 control |= 4; /* PPE enable */ 104 control |= 4; /* PPE */
105 105
106 pci_read_config_word(dev, idetm_port, &idetm_data); 106 pci_read_config_word(dev, idetm_port, &idetm_data);
107 107
108 /* Enable PPE, IE and TIME as appropriate. Clear the other 108 /*
109 drive timing bits */ 109 * Set PPE, IE and TIME as appropriate.
110 * Clear the other drive's timing bits.
111 */
110 if (adev->devno == 0) { 112 if (adev->devno == 0) {
111 idetm_data &= 0xCCE0; 113 idetm_data &= 0xCCE0;
112 idetm_data |= control; 114 idetm_data |= control;
@@ -259,14 +261,14 @@ static const struct ata_port_operations oldpiix_pata_ops = {
259 .bmdma_status = ata_bmdma_status, 261 .bmdma_status = ata_bmdma_status,
260 .qc_prep = ata_qc_prep, 262 .qc_prep = ata_qc_prep,
261 .qc_issue = oldpiix_qc_issue_prot, 263 .qc_issue = oldpiix_qc_issue_prot,
262 .data_xfer = ata_pio_data_xfer, 264 .data_xfer = ata_data_xfer,
263 265
264 .irq_handler = ata_interrupt, 266 .irq_handler = ata_interrupt,
265 .irq_clear = ata_bmdma_irq_clear, 267 .irq_clear = ata_bmdma_irq_clear,
268 .irq_on = ata_irq_on,
269 .irq_ack = ata_irq_ack,
266 270
267 .port_start = ata_port_start, 271 .port_start = ata_port_start,
268 .port_stop = ata_port_stop,
269 .host_stop = ata_host_stop,
270}; 272};
271 273
272 274
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index c2988b0aa8ea..da1aa148b37d 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -95,18 +95,18 @@ static void opti_error_handler(struct ata_port *ap)
95 95
96static void opti_write_reg(struct ata_port *ap, u8 val, int reg) 96static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
97{ 97{
98 unsigned long regio = ap->ioaddr.cmd_addr; 98 void __iomem *regio = ap->ioaddr.cmd_addr;
99 99
100 /* These 3 unlock the control register access */ 100 /* These 3 unlock the control register access */
101 inw(regio + 1); 101 ioread16(regio + 1);
102 inw(regio + 1); 102 ioread16(regio + 1);
103 outb(3, regio + 2); 103 iowrite8(3, regio + 2);
104 104
105 /* Do the I/O */ 105 /* Do the I/O */
106 outb(val, regio + reg); 106 iowrite8(val, regio + reg);
107 107
108 /* Relock */ 108 /* Relock */
109 outb(0x83, regio + 2); 109 iowrite8(0x83, regio + 2);
110} 110}
111 111
112/** 112/**
@@ -124,7 +124,7 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
124 struct ata_device *pair = ata_dev_pair(adev); 124 struct ata_device *pair = ata_dev_pair(adev);
125 int clock; 125 int clock;
126 int pio = adev->pio_mode - XFER_PIO_0; 126 int pio = adev->pio_mode - XFER_PIO_0;
127 unsigned long regio = ap->ioaddr.cmd_addr; 127 void __iomem *regio = ap->ioaddr.cmd_addr;
128 u8 addr; 128 u8 addr;
129 129
130 /* Address table precomputed with prefetch off and a DCLK of 2 */ 130 /* Address table precomputed with prefetch off and a DCLK of 2 */
@@ -137,8 +137,8 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
137 { 0x58, 0x44, 0x32, 0x22, 0x21 } 137 { 0x58, 0x44, 0x32, 0x22, 0x21 }
138 }; 138 };
139 139
140 outb(0xff, regio + 5); 140 iowrite8(0xff, regio + 5);
141 clock = inw(regio + 5) & 1; 141 clock = ioread16(regio + 5) & 1;
142 142
143 /* 143 /*
144 * As with many controllers the address setup time is shared 144 * As with many controllers the address setup time is shared
@@ -205,14 +205,14 @@ static struct ata_port_operations opti_port_ops = {
205 .qc_prep = ata_qc_prep, 205 .qc_prep = ata_qc_prep,
206 .qc_issue = ata_qc_issue_prot, 206 .qc_issue = ata_qc_issue_prot,
207 207
208 .data_xfer = ata_pio_data_xfer, 208 .data_xfer = ata_data_xfer,
209 209
210 .irq_handler = ata_interrupt, 210 .irq_handler = ata_interrupt,
211 .irq_clear = ata_bmdma_irq_clear, 211 .irq_clear = ata_bmdma_irq_clear,
212 .irq_on = ata_irq_on,
213 .irq_ack = ata_irq_ack,
212 214
213 .port_start = ata_port_start, 215 .port_start = ata_port_start,
214 .port_stop = ata_port_stop,
215 .host_stop = ata_host_stop
216}; 216};
217 217
218static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id) 218static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 80d111c569dc..d80b36e209cc 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -91,12 +91,12 @@ static void optidma_error_handler(struct ata_port *ap)
91 91
92static void optidma_unlock(struct ata_port *ap) 92static void optidma_unlock(struct ata_port *ap)
93{ 93{
94 unsigned long regio = ap->ioaddr.cmd_addr; 94 void __iomem *regio = ap->ioaddr.cmd_addr;
95 95
96 /* These 3 unlock the control register access */ 96 /* These 3 unlock the control register access */
97 inw(regio + 1); 97 ioread16(regio + 1);
98 inw(regio + 1); 98 ioread16(regio + 1);
99 outb(3, regio + 2); 99 iowrite8(3, regio + 2);
100} 100}
101 101
102/** 102/**
@@ -108,10 +108,10 @@ static void optidma_unlock(struct ata_port *ap)
108 108
109static void optidma_lock(struct ata_port *ap) 109static void optidma_lock(struct ata_port *ap)
110{ 110{
111 unsigned long regio = ap->ioaddr.cmd_addr; 111 void __iomem *regio = ap->ioaddr.cmd_addr;
112 112
113 /* Relock */ 113 /* Relock */
114 outb(0x83, regio + 2); 114 iowrite8(0x83, regio + 2);
115} 115}
116 116
117/** 117/**
@@ -133,7 +133,7 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo
133 struct ata_device *pair = ata_dev_pair(adev); 133 struct ata_device *pair = ata_dev_pair(adev);
134 int pio = adev->pio_mode - XFER_PIO_0; 134 int pio = adev->pio_mode - XFER_PIO_0;
135 int dma = adev->dma_mode - XFER_MW_DMA_0; 135 int dma = adev->dma_mode - XFER_MW_DMA_0;
136 unsigned long regio = ap->ioaddr.cmd_addr; 136 void __iomem *regio = ap->ioaddr.cmd_addr;
137 u8 addr; 137 u8 addr;
138 138
139 /* Address table precomputed with a DCLK of 2 */ 139 /* Address table precomputed with a DCLK of 2 */
@@ -178,20 +178,20 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo
178 178
179 /* Commence primary programming sequence */ 179 /* Commence primary programming sequence */
180 /* First we load the device number into the timing select */ 180 /* First we load the device number into the timing select */
181 outb(adev->devno, regio + MISC_REG); 181 iowrite8(adev->devno, regio + MISC_REG);
182 /* Now we load the data timings into read data/write data */ 182 /* Now we load the data timings into read data/write data */
183 if (mode < XFER_MW_DMA_0) { 183 if (mode < XFER_MW_DMA_0) {
184 outb(data_rec_timing[pci_clock][pio], regio + READ_REG); 184 iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG);
185 outb(data_rec_timing[pci_clock][pio], regio + WRITE_REG); 185 iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
186 } else if (mode < XFER_UDMA_0) { 186 } else if (mode < XFER_UDMA_0) {
187 outb(dma_data_rec_timing[pci_clock][dma], regio + READ_REG); 187 iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
188 outb(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG); 188 iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
189 } 189 }
190 /* Finally we load the address setup into the misc register */ 190 /* Finally we load the address setup into the misc register */
191 outb(addr | adev->devno, regio + MISC_REG); 191 iowrite8(addr | adev->devno, regio + MISC_REG);
192 192
193 /* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */ 193 /* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
194 outb(0x85, regio + CNTRL_REG); 194 iowrite8(0x85, regio + CNTRL_REG);
195 195
196 /* Switch back to IDE mode */ 196 /* Switch back to IDE mode */
197 optidma_lock(ap); 197 optidma_lock(ap);
@@ -389,14 +389,14 @@ static struct ata_port_operations optidma_port_ops = {
389 .qc_prep = ata_qc_prep, 389 .qc_prep = ata_qc_prep,
390 .qc_issue = ata_qc_issue_prot, 390 .qc_issue = ata_qc_issue_prot,
391 391
392 .data_xfer = ata_pio_data_xfer, 392 .data_xfer = ata_data_xfer,
393 393
394 .irq_handler = ata_interrupt, 394 .irq_handler = ata_interrupt,
395 .irq_clear = ata_bmdma_irq_clear, 395 .irq_clear = ata_bmdma_irq_clear,
396 .irq_on = ata_irq_on,
397 .irq_ack = ata_irq_ack,
396 398
397 .port_start = ata_port_start, 399 .port_start = ata_port_start,
398 .port_stop = ata_port_stop,
399 .host_stop = ata_host_stop
400}; 400};
401 401
402static struct ata_port_operations optiplus_port_ops = { 402static struct ata_port_operations optiplus_port_ops = {
@@ -424,14 +424,14 @@ static struct ata_port_operations optiplus_port_ops = {
424 .qc_prep = ata_qc_prep, 424 .qc_prep = ata_qc_prep,
425 .qc_issue = ata_qc_issue_prot, 425 .qc_issue = ata_qc_issue_prot,
426 426
427 .data_xfer = ata_pio_data_xfer, 427 .data_xfer = ata_data_xfer,
428 428
429 .irq_handler = ata_interrupt, 429 .irq_handler = ata_interrupt,
430 .irq_clear = ata_bmdma_irq_clear, 430 .irq_clear = ata_bmdma_irq_clear,
431 .irq_on = ata_irq_on,
432 .irq_ack = ata_irq_ack,
431 433
432 .port_start = ata_port_start, 434 .port_start = ata_port_start,
433 .port_stop = ata_port_stop,
434 .host_stop = ata_host_stop
435}; 435};
436 436
437/** 437/**
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 9ed7f58424a3..acfc09f9abd9 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -88,14 +88,14 @@ static struct ata_port_operations pcmcia_port_ops = {
88 .qc_prep = ata_qc_prep, 88 .qc_prep = ata_qc_prep,
89 .qc_issue = ata_qc_issue_prot, 89 .qc_issue = ata_qc_issue_prot,
90 90
91 .data_xfer = ata_pio_data_xfer_noirq, 91 .data_xfer = ata_data_xfer_noirq,
92 92
93 .irq_handler = ata_interrupt, 93 .irq_handler = ata_interrupt,
94 .irq_clear = ata_bmdma_irq_clear, 94 .irq_clear = ata_bmdma_irq_clear,
95 .irq_on = ata_irq_on,
96 .irq_ack = ata_irq_ack,
95 97
96 .port_start = ata_port_start, 98 .port_start = ata_port_start,
97 .port_stop = ata_port_stop,
98 .host_stop = ata_host_stop
99}; 99};
100 100
101#define CS_CHECK(fn, ret) \ 101#define CS_CHECK(fn, ret) \
@@ -123,6 +123,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
123 cistpl_cftable_entry_t *cfg; 123 cistpl_cftable_entry_t *cfg;
124 int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM; 124 int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
125 unsigned long io_base, ctl_base; 125 unsigned long io_base, ctl_base;
126 void __iomem *io_addr, *ctl_addr;
126 127
127 info = kzalloc(sizeof(*info), GFP_KERNEL); 128 info = kzalloc(sizeof(*info), GFP_KERNEL);
128 if (info == NULL) 129 if (info == NULL)
@@ -233,10 +234,17 @@ next_entry:
233 CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq)); 234 CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
234 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf)); 235 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
235 236
237 /* iomap */
238 ret = -ENOMEM;
239 io_addr = devm_ioport_map(&pdev->dev, io_base, 8);
240 ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1);
241 if (!io_addr || !ctl_addr)
242 goto failed;
243
236 /* Success. Disable the IRQ nIEN line, do quirks */ 244 /* Success. Disable the IRQ nIEN line, do quirks */
237 outb(0x02, ctl_base); 245 iowrite8(0x02, ctl_addr);
238 if (is_kme) 246 if (is_kme)
239 outb(0x81, ctl_base + 0x01); 247 iowrite8(0x81, ctl_addr + 0x01);
240 248
241 /* FIXME: Could be more ports at base + 0x10 but we only deal with 249 /* FIXME: Could be more ports at base + 0x10 but we only deal with
242 one right now */ 250 one right now */
@@ -258,11 +266,12 @@ next_entry:
258 ae.irq = pdev->irq.AssignedIRQ; 266 ae.irq = pdev->irq.AssignedIRQ;
259 ae.irq_flags = SA_SHIRQ; 267 ae.irq_flags = SA_SHIRQ;
260 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; 268 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
261 ae.port[0].cmd_addr = io_base; 269 ae.port[0].cmd_addr = io_addr;
262 ae.port[0].altstatus_addr = ctl_base; 270 ae.port[0].altstatus_addr = ctl_addr;
263 ae.port[0].ctl_addr = ctl_base; 271 ae.port[0].ctl_addr = ctl_addr;
264 ata_std_ports(&ae.port[0]); 272 ata_std_ports(&ae.port[0]);
265 273
274 ret = -ENODEV;
266 if (ata_device_add(&ae) == 0) 275 if (ata_device_add(&ae) == 0)
267 goto failed; 276 goto failed;
268 277
@@ -298,7 +307,7 @@ static void pcmcia_remove_one(struct pcmcia_device *pdev)
298 /* If we have attached the device to the ATA layer, detach it */ 307 /* If we have attached the device to the ATA layer, detach it */
299 if (info->ndev) { 308 if (info->ndev) {
300 struct ata_host *host = dev_get_drvdata(dev); 309 struct ata_host *host = dev_get_drvdata(dev);
301 ata_host_remove(host); 310 ata_host_detach(host);
302 dev_set_drvdata(dev, NULL); 311 dev_set_drvdata(dev, NULL);
303 } 312 }
304 info->ndev = 0; 313 info->ndev = 0;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 76dd1c935dbd..ffa7f47fbb20 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -33,7 +33,6 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
35#include <linux/libata.h> 35#include <linux/libata.h>
36#include <asm/io.h>
37 36
38#define DRV_NAME "pata_pdc2027x" 37#define DRV_NAME "pata_pdc2027x"
39#define DRV_VERSION "0.74-ac5" 38#define DRV_VERSION "0.74-ac5"
@@ -46,6 +45,8 @@
46#endif 45#endif
47 46
48enum { 47enum {
48 PDC_MMIO_BAR = 5,
49
49 PDC_UDMA_100 = 0, 50 PDC_UDMA_100 = 0,
50 PDC_UDMA_133 = 1, 51 PDC_UDMA_133 = 1,
51 52
@@ -62,7 +63,6 @@ enum {
62}; 63};
63 64
64static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 65static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
65static void pdc2027x_remove_one(struct pci_dev *pdev);
66static void pdc2027x_error_handler(struct ata_port *ap); 66static void pdc2027x_error_handler(struct ata_port *ap);
67static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev); 67static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
68static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev); 68static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
@@ -123,7 +123,7 @@ static struct pci_driver pdc2027x_pci_driver = {
123 .name = DRV_NAME, 123 .name = DRV_NAME,
124 .id_table = pdc2027x_pci_tbl, 124 .id_table = pdc2027x_pci_tbl,
125 .probe = pdc2027x_init_one, 125 .probe = pdc2027x_init_one,
126 .remove = __devexit_p(pdc2027x_remove_one), 126 .remove = ata_pci_remove_one,
127}; 127};
128 128
129static struct scsi_host_template pdc2027x_sht = { 129static struct scsi_host_template pdc2027x_sht = {
@@ -160,7 +160,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
160 .bmdma_status = ata_bmdma_status, 160 .bmdma_status = ata_bmdma_status,
161 .qc_prep = ata_qc_prep, 161 .qc_prep = ata_qc_prep,
162 .qc_issue = ata_qc_issue_prot, 162 .qc_issue = ata_qc_issue_prot,
163 .data_xfer = ata_mmio_data_xfer, 163 .data_xfer = ata_data_xfer,
164 164
165 .freeze = ata_bmdma_freeze, 165 .freeze = ata_bmdma_freeze,
166 .thaw = ata_bmdma_thaw, 166 .thaw = ata_bmdma_thaw,
@@ -169,10 +169,10 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
169 169
170 .irq_handler = ata_interrupt, 170 .irq_handler = ata_interrupt,
171 .irq_clear = ata_bmdma_irq_clear, 171 .irq_clear = ata_bmdma_irq_clear,
172 .irq_on = ata_irq_on,
173 .irq_ack = ata_irq_ack,
172 174
173 .port_start = ata_port_start, 175 .port_start = ata_port_start,
174 .port_stop = ata_port_stop,
175 .host_stop = ata_pci_host_stop,
176}; 176};
177 177
178static struct ata_port_operations pdc2027x_pata133_ops = { 178static struct ata_port_operations pdc2027x_pata133_ops = {
@@ -194,7 +194,7 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
194 .bmdma_status = ata_bmdma_status, 194 .bmdma_status = ata_bmdma_status,
195 .qc_prep = ata_qc_prep, 195 .qc_prep = ata_qc_prep,
196 .qc_issue = ata_qc_issue_prot, 196 .qc_issue = ata_qc_issue_prot,
197 .data_xfer = ata_mmio_data_xfer, 197 .data_xfer = ata_data_xfer,
198 198
199 .freeze = ata_bmdma_freeze, 199 .freeze = ata_bmdma_freeze,
200 .thaw = ata_bmdma_thaw, 200 .thaw = ata_bmdma_thaw,
@@ -203,10 +203,10 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
203 203
204 .irq_handler = ata_interrupt, 204 .irq_handler = ata_interrupt,
205 .irq_clear = ata_bmdma_irq_clear, 205 .irq_clear = ata_bmdma_irq_clear,
206 .irq_on = ata_irq_on,
207 .irq_ack = ata_irq_ack,
206 208
207 .port_start = ata_port_start, 209 .port_start = ata_port_start,
208 .port_stop = ata_port_stop,
209 .host_stop = ata_pci_host_stop,
210}; 210};
211 211
212static struct ata_port_info pdc2027x_port_info[] = { 212static struct ata_port_info pdc2027x_port_info[] = {
@@ -245,7 +245,7 @@ MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
245 */ 245 */
246static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset) 246static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset)
247{ 247{
248 return ap->host->mmio_base + ap->port_no * 0x100 + offset; 248 return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset;
249} 249}
250 250
251/** 251/**
@@ -526,18 +526,19 @@ static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
526 526
527static long pdc_read_counter(struct ata_probe_ent *probe_ent) 527static long pdc_read_counter(struct ata_probe_ent *probe_ent)
528{ 528{
529 void __iomem *mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
529 long counter; 530 long counter;
530 int retry = 1; 531 int retry = 1;
531 u32 bccrl, bccrh, bccrlv, bccrhv; 532 u32 bccrl, bccrh, bccrlv, bccrhv;
532 533
533retry: 534retry:
534 bccrl = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff; 535 bccrl = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
535 bccrh = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff; 536 bccrh = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
536 rmb(); 537 rmb();
537 538
538 /* Read the counter values again for verification */ 539 /* Read the counter values again for verification */
539 bccrlv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff; 540 bccrlv = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
540 bccrhv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff; 541 bccrhv = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
541 rmb(); 542 rmb();
542 543
543 counter = (bccrh << 15) | bccrl; 544 counter = (bccrh << 15) | bccrl;
@@ -568,7 +569,7 @@ retry:
568 */ 569 */
569static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx) 570static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx)
570{ 571{
571 572 void __iomem *mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
572 u16 pll_ctl; 573 u16 pll_ctl;
573 long pll_clock_khz = pll_clock / 1000; 574 long pll_clock_khz = pll_clock / 1000;
574 long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ; 575 long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
@@ -587,7 +588,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
587 /* Show the current clock value of PLL control register 588 /* Show the current clock value of PLL control register
588 * (maybe already configured by the firmware) 589 * (maybe already configured by the firmware)
589 */ 590 */
590 pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL); 591 pll_ctl = readw(mmio_base + PDC_PLL_CTL);
591 592
592 PDPRINTK("pll_ctl[%X]\n", pll_ctl); 593 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
593#endif 594#endif
@@ -627,8 +628,8 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
627 628
628 PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl); 629 PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
629 630
630 writew(pll_ctl, probe_ent->mmio_base + PDC_PLL_CTL); 631 writew(pll_ctl, mmio_base + PDC_PLL_CTL);
631 readw(probe_ent->mmio_base + PDC_PLL_CTL); /* flush */ 632 readw(mmio_base + PDC_PLL_CTL); /* flush */
632 633
633 /* Wait the PLL circuit to be stable */ 634 /* Wait the PLL circuit to be stable */
634 mdelay(30); 635 mdelay(30);
@@ -638,7 +639,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
638 * Show the current clock value of PLL control register 639 * Show the current clock value of PLL control register
639 * (maybe configured by the firmware) 640 * (maybe configured by the firmware)
640 */ 641 */
641 pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL); 642 pll_ctl = readw(mmio_base + PDC_PLL_CTL);
642 643
643 PDPRINTK("pll_ctl[%X]\n", pll_ctl); 644 PDPRINTK("pll_ctl[%X]\n", pll_ctl);
644#endif 645#endif
@@ -654,6 +655,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
654 */ 655 */
655static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent) 656static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
656{ 657{
658 void __iomem *mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
657 u32 scr; 659 u32 scr;
658 long start_count, end_count; 660 long start_count, end_count;
659 long pll_clock; 661 long pll_clock;
@@ -662,10 +664,10 @@ static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
662 start_count = pdc_read_counter(probe_ent); 664 start_count = pdc_read_counter(probe_ent);
663 665
664 /* Start the test mode */ 666 /* Start the test mode */
665 scr = readl(probe_ent->mmio_base + PDC_SYS_CTL); 667 scr = readl(mmio_base + PDC_SYS_CTL);
666 PDPRINTK("scr[%X]\n", scr); 668 PDPRINTK("scr[%X]\n", scr);
667 writel(scr | (0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL); 669 writel(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
668 readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */ 670 readl(mmio_base + PDC_SYS_CTL); /* flush */
669 671
670 /* Let the counter run for 100 ms. */ 672 /* Let the counter run for 100 ms. */
671 mdelay(100); 673 mdelay(100);
@@ -674,10 +676,10 @@ static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
674 end_count = pdc_read_counter(probe_ent); 676 end_count = pdc_read_counter(probe_ent);
675 677
676 /* Stop the test mode */ 678 /* Stop the test mode */
677 scr = readl(probe_ent->mmio_base + PDC_SYS_CTL); 679 scr = readl(mmio_base + PDC_SYS_CTL);
678 PDPRINTK("scr[%X]\n", scr); 680 PDPRINTK("scr[%X]\n", scr);
679 writel(scr & ~(0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL); 681 writel(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
680 readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */ 682 readl(mmio_base + PDC_SYS_CTL); /* flush */
681 683
682 /* calculate the input clock in Hz */ 684 /* calculate the input clock in Hz */
683 pll_clock = (start_count - end_count) * 10; 685 pll_clock = (start_count - end_count) * 10;
@@ -722,7 +724,7 @@ static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, uns
722 * @port: ata ioports to setup 724 * @port: ata ioports to setup
723 * @base: base address 725 * @base: base address
724 */ 726 */
725static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base) 727static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
726{ 728{
727 port->cmd_addr = 729 port->cmd_addr =
728 port->data_addr = base; 730 port->data_addr = base;
@@ -755,48 +757,37 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
755 static int printed_version; 757 static int printed_version;
756 unsigned int board_idx = (unsigned int) ent->driver_data; 758 unsigned int board_idx = (unsigned int) ent->driver_data;
757 759
758 struct ata_probe_ent *probe_ent = NULL; 760 struct ata_probe_ent *probe_ent;
759 unsigned long base;
760 void __iomem *mmio_base; 761 void __iomem *mmio_base;
761 int rc; 762 int rc;
762 763
763 if (!printed_version++) 764 if (!printed_version++)
764 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 765 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
765 766
766 rc = pci_enable_device(pdev); 767 rc = pcim_enable_device(pdev);
767 if (rc) 768 if (rc)
768 return rc; 769 return rc;
769 770
770 rc = pci_request_regions(pdev, DRV_NAME); 771 rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
771 if (rc) 772 if (rc)
772 goto err_out; 773 return rc;
773 774
774 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 775 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
775 if (rc) 776 if (rc)
776 goto err_out_regions; 777 return rc;
777 778
778 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 779 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
779 if (rc) 780 if (rc)
780 goto err_out_regions; 781 return rc;
781 782
782 /* Prepare the probe entry */ 783 /* Prepare the probe entry */
783 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 784 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
784 if (probe_ent == NULL) { 785 if (probe_ent == NULL)
785 rc = -ENOMEM; 786 return -ENOMEM;
786 goto err_out_regions;
787 }
788 787
789 probe_ent->dev = pci_dev_to_dev(pdev); 788 probe_ent->dev = pci_dev_to_dev(pdev);
790 INIT_LIST_HEAD(&probe_ent->node); 789 INIT_LIST_HEAD(&probe_ent->node);
791 790
792 mmio_base = pci_iomap(pdev, 5, 0);
793 if (!mmio_base) {
794 rc = -ENOMEM;
795 goto err_out_free_ent;
796 }
797
798 base = (unsigned long) mmio_base;
799
800 probe_ent->sht = pdc2027x_port_info[board_idx].sht; 791 probe_ent->sht = pdc2027x_port_info[board_idx].sht;
801 probe_ent->port_flags = pdc2027x_port_info[board_idx].flags; 792 probe_ent->port_flags = pdc2027x_port_info[board_idx].flags;
802 probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask; 793 probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask;
@@ -806,12 +797,14 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
806 797
807 probe_ent->irq = pdev->irq; 798 probe_ent->irq = pdev->irq;
808 probe_ent->irq_flags = SA_SHIRQ; 799 probe_ent->irq_flags = SA_SHIRQ;
809 probe_ent->mmio_base = mmio_base; 800 probe_ent->iomap = pcim_iomap_table(pdev);
810 801
811 pdc_ata_setup_port(&probe_ent->port[0], base + 0x17c0); 802 mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
812 probe_ent->port[0].bmdma_addr = base + 0x1000; 803
813 pdc_ata_setup_port(&probe_ent->port[1], base + 0x15c0); 804 pdc_ata_setup_port(&probe_ent->port[0], mmio_base + 0x17c0);
814 probe_ent->port[1].bmdma_addr = base + 0x1008; 805 probe_ent->port[0].bmdma_addr = mmio_base + 0x1000;
806 pdc_ata_setup_port(&probe_ent->port[1], mmio_base + 0x15c0);
807 probe_ent->port[1].bmdma_addr = mmio_base + 0x1008;
815 808
816 probe_ent->n_ports = 2; 809 probe_ent->n_ports = 2;
817 810
@@ -820,32 +813,13 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
820 813
821 /* initialize adapter */ 814 /* initialize adapter */
822 if (pdc_hardware_init(pdev, probe_ent, board_idx) != 0) 815 if (pdc_hardware_init(pdev, probe_ent, board_idx) != 0)
823 goto err_out_free_ent; 816 return -EIO;
824 817
825 ata_device_add(probe_ent); 818 if (!ata_device_add(probe_ent))
826 kfree(probe_ent); 819 return -ENODEV;
827 820
821 devm_kfree(&pdev->dev, probe_ent);
828 return 0; 822 return 0;
829
830err_out_free_ent:
831 kfree(probe_ent);
832err_out_regions:
833 pci_release_regions(pdev);
834err_out:
835 pci_disable_device(pdev);
836 return rc;
837}
838
839/**
840 * pdc2027x_remove_one - Called to remove a single instance of the
841 * adapter.
842 *
843 * @dev: The PCI device to remove.
844 * FIXME: module load/unload not working yet
845 */
846static void __devexit pdc2027x_remove_one(struct pci_dev *pdev)
847{
848 ata_pci_remove_one(pdev);
849} 823}
850 824
851/** 825/**
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index ad691b9e7743..6dd63413a523 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -170,17 +170,17 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
170 struct ata_taskfile *tf = &qc->tf; 170 struct ata_taskfile *tf = &qc->tf;
171 int sel66 = ap->port_no ? 0x08: 0x02; 171 int sel66 = ap->port_no ? 0x08: 0x02;
172 172
173 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr; 173 void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
174 unsigned long clock = master + 0x11; 174 void __iomem *clock = master + 0x11;
175 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no); 175 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
176 176
177 u32 len; 177 u32 len;
178 178
179 /* Check we keep host level locking here */ 179 /* Check we keep host level locking here */
180 if (adev->dma_mode >= XFER_UDMA_2) 180 if (adev->dma_mode >= XFER_UDMA_2)
181 outb(inb(clock) | sel66, clock); 181 iowrite8(ioread8(clock) | sel66, clock);
182 else 182 else
183 outb(inb(clock) & ~sel66, clock); 183 iowrite8(ioread8(clock) & ~sel66, clock);
184 184
185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional 185 /* The DMA clocks may have been trashed by a reset. FIXME: make conditional
186 and move to qc_issue ? */ 186 and move to qc_issue ? */
@@ -189,17 +189,14 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
189 /* Cases the state machine will not complete correctly without help */ 189 /* Cases the state machine will not complete correctly without help */
190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA) 190 if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
191 { 191 {
192 if (tf->flags & ATA_TFLAG_LBA48) 192 len = qc->nbytes;
193 len = qc->nsect * 512;
194 else
195 len = qc->nbytes;
196 193
197 if (tf->flags & ATA_TFLAG_WRITE) 194 if (tf->flags & ATA_TFLAG_WRITE)
198 len |= 0x06000000; 195 len |= 0x06000000;
199 else 196 else
200 len |= 0x05000000; 197 len |= 0x05000000;
201 198
202 outl(len, atapi_reg); 199 iowrite32(len, atapi_reg);
203 } 200 }
204 201
205 /* Activate DMA */ 202 /* Activate DMA */
@@ -222,19 +219,19 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
222 219
223 int sel66 = ap->port_no ? 0x08: 0x02; 220 int sel66 = ap->port_no ? 0x08: 0x02;
224 /* The clock bits are in the same register for both channels */ 221 /* The clock bits are in the same register for both channels */
225 unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr; 222 void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
226 unsigned long clock = master + 0x11; 223 void __iomem *clock = master + 0x11;
227 unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no); 224 void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
228 225
229 /* Cases the state machine will not complete correctly */ 226 /* Cases the state machine will not complete correctly */
230 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) { 227 if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
231 outl(0, atapi_reg); 228 iowrite32(0, atapi_reg);
232 outb(inb(clock) & ~sel66, clock); 229 iowrite8(ioread8(clock) & ~sel66, clock);
233 } 230 }
234 /* Check we keep host level locking here */ 231 /* Check we keep host level locking here */
235 /* Flip back to 33Mhz for PIO */ 232 /* Flip back to 33Mhz for PIO */
236 if (adev->dma_mode >= XFER_UDMA_2) 233 if (adev->dma_mode >= XFER_UDMA_2)
237 outb(inb(clock) & ~sel66, clock); 234 iowrite8(ioread8(clock) & ~sel66, clock);
238 235
239 ata_bmdma_stop(qc); 236 ata_bmdma_stop(qc);
240} 237}
@@ -297,14 +294,14 @@ static struct ata_port_operations pdc2024x_port_ops = {
297 294
298 .qc_prep = ata_qc_prep, 295 .qc_prep = ata_qc_prep,
299 .qc_issue = ata_qc_issue_prot, 296 .qc_issue = ata_qc_issue_prot,
300 .data_xfer = ata_pio_data_xfer, 297 .data_xfer = ata_data_xfer,
301 298
302 .irq_handler = ata_interrupt, 299 .irq_handler = ata_interrupt,
303 .irq_clear = ata_bmdma_irq_clear, 300 .irq_clear = ata_bmdma_irq_clear,
301 .irq_on = ata_irq_on,
302 .irq_ack = ata_irq_ack,
304 303
305 .port_start = ata_port_start, 304 .port_start = ata_port_start,
306 .port_stop = ata_port_stop,
307 .host_stop = ata_host_stop
308}; 305};
309 306
310static struct ata_port_operations pdc2026x_port_ops = { 307static struct ata_port_operations pdc2026x_port_ops = {
@@ -331,14 +328,14 @@ static struct ata_port_operations pdc2026x_port_ops = {
331 328
332 .qc_prep = ata_qc_prep, 329 .qc_prep = ata_qc_prep,
333 .qc_issue = ata_qc_issue_prot, 330 .qc_issue = ata_qc_issue_prot,
334 .data_xfer = ata_pio_data_xfer, 331 .data_xfer = ata_data_xfer,
335 332
336 .irq_handler = ata_interrupt, 333 .irq_handler = ata_interrupt,
337 .irq_clear = ata_bmdma_irq_clear, 334 .irq_clear = ata_bmdma_irq_clear,
335 .irq_on = ata_irq_on,
336 .irq_ack = ata_irq_ack,
338 337
339 .port_start = ata_port_start, 338 .port_start = ata_port_start,
340 .port_stop = ata_port_stop,
341 .host_stop = ata_host_stop
342}; 339};
343 340
344static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) 341static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 40ae11cbfda4..479a326114e0 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -47,23 +47,6 @@ static int pata_platform_set_mode(struct ata_port *ap, struct ata_device **unuse
47 return 0; 47 return 0;
48} 48}
49 49
50static void pata_platform_host_stop(struct ata_host *host)
51{
52 int i;
53
54 /*
55 * Unmap the bases for MMIO
56 */
57 for (i = 0; i < host->n_ports; i++) {
58 struct ata_port *ap = host->ports[i];
59
60 if (ap->flags & ATA_FLAG_MMIO) {
61 iounmap((void __iomem *)ap->ioaddr.ctl_addr);
62 iounmap((void __iomem *)ap->ioaddr.cmd_addr);
63 }
64 }
65}
66
67static struct scsi_host_template pata_platform_sht = { 50static struct scsi_host_template pata_platform_sht = {
68 .module = THIS_MODULE, 51 .module = THIS_MODULE,
69 .name = DRV_NAME, 52 .name = DRV_NAME,
@@ -100,14 +83,14 @@ static struct ata_port_operations pata_platform_port_ops = {
100 .qc_prep = ata_qc_prep, 83 .qc_prep = ata_qc_prep,
101 .qc_issue = ata_qc_issue_prot, 84 .qc_issue = ata_qc_issue_prot,
102 85
103 .data_xfer = ata_pio_data_xfer_noirq, 86 .data_xfer = ata_data_xfer_noirq,
104 87
105 .irq_handler = ata_interrupt, 88 .irq_handler = ata_interrupt,
106 .irq_clear = ata_bmdma_irq_clear, 89 .irq_clear = ata_bmdma_irq_clear,
90 .irq_on = ata_irq_on,
91 .irq_ack = ata_irq_ack,
107 92
108 .port_start = ata_port_start, 93 .port_start = ata_port_start,
109 .port_stop = ata_port_stop,
110 .host_stop = pata_platform_host_stop
111}; 94};
112 95
113static void pata_platform_setup_port(struct ata_ioports *ioaddr, 96static void pata_platform_setup_port(struct ata_ioports *ioaddr,
@@ -153,7 +136,6 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
153 struct resource *io_res, *ctl_res; 136 struct resource *io_res, *ctl_res;
154 struct ata_probe_ent ae; 137 struct ata_probe_ent ae;
155 unsigned int mmio; 138 unsigned int mmio;
156 int ret;
157 139
158 /* 140 /*
159 * Simple resource validation .. 141 * Simple resource validation ..
@@ -207,46 +189,29 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
207 * Handle the MMIO case 189 * Handle the MMIO case
208 */ 190 */
209 if (mmio) { 191 if (mmio) {
210 ae.port_flags |= ATA_FLAG_MMIO; 192 ae.port[0].cmd_addr = devm_ioremap(&pdev->dev, io_res->start,
211
212 ae.port[0].cmd_addr = (unsigned long)ioremap(io_res->start,
213 io_res->end - io_res->start + 1); 193 io_res->end - io_res->start + 1);
214 if (unlikely(!ae.port[0].cmd_addr)) { 194 ae.port[0].ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
215 dev_err(&pdev->dev, "failed to remap IO base\n");
216 return -ENXIO;
217 }
218
219 ae.port[0].ctl_addr = (unsigned long)ioremap(ctl_res->start,
220 ctl_res->end - ctl_res->start + 1); 195 ctl_res->end - ctl_res->start + 1);
221 if (unlikely(!ae.port[0].ctl_addr)) {
222 dev_err(&pdev->dev, "failed to remap CTL base\n");
223 ret = -ENXIO;
224 goto bad_remap;
225 }
226 } else { 196 } else {
227 ae.port[0].cmd_addr = io_res->start; 197 ae.port[0].cmd_addr = devm_ioport_map(&pdev->dev, io_res->start,
228 ae.port[0].ctl_addr = ctl_res->start; 198 io_res->end - io_res->start + 1);
199 ae.port[0].ctl_addr = devm_ioport_map(&pdev->dev, ctl_res->start,
200 ctl_res->end - ctl_res->start + 1);
201 }
202 if (!ae.port[0].cmd_addr || !ae.port[0].ctl_addr) {
203 dev_err(&pdev->dev, "failed to map IO/CTL base\n");
204 return -ENOMEM;
229 } 205 }
230 206
231 ae.port[0].altstatus_addr = ae.port[0].ctl_addr; 207 ae.port[0].altstatus_addr = ae.port[0].ctl_addr;
232 208
233 pata_platform_setup_port(&ae.port[0], pdev->dev.platform_data); 209 pata_platform_setup_port(&ae.port[0], pdev->dev.platform_data);
234 210
235 if (unlikely(ata_device_add(&ae) == 0)) { 211 if (unlikely(ata_device_add(&ae) == 0))
236 ret = -ENODEV; 212 return -ENODEV;
237 goto add_failed;
238 }
239 213
240 return 0; 214 return 0;
241
242add_failed:
243 if (ae.port[0].ctl_addr && mmio)
244 iounmap((void __iomem *)ae.port[0].ctl_addr);
245bad_remap:
246 if (ae.port[0].cmd_addr && mmio)
247 iounmap((void __iomem *)ae.port[0].cmd_addr);
248
249 return ret;
250} 215}
251 216
252/** 217/**
@@ -261,7 +226,7 @@ static int __devexit pata_platform_remove(struct platform_device *pdev)
261 struct device *dev = &pdev->dev; 226 struct device *dev = &pdev->dev;
262 struct ata_host *host = dev_get_drvdata(dev); 227 struct ata_host *host = dev_get_drvdata(dev);
263 228
264 ata_host_remove(host); 229 ata_host_detach(host);
265 dev_set_drvdata(dev, NULL); 230 dev_set_drvdata(dev, NULL);
266 231
267 return 0; 232 return 0;
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index afc0d990e7d6..1b3b4ed8eb19 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -131,22 +131,24 @@ static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned
131 131
132 if (ata_id_has_dword_io(adev->id)) { 132 if (ata_id_has_dword_io(adev->id)) {
133 if (write_data) 133 if (write_data)
134 outsl(ap->ioaddr.data_addr, buf, buflen >> 2); 134 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
135 else 135 else
136 insl(ap->ioaddr.data_addr, buf, buflen >> 2); 136 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
137 137
138 if (unlikely(slop)) { 138 if (unlikely(slop)) {
139 u32 pad; 139 u32 pad;
140 if (write_data) { 140 if (write_data) {
141 memcpy(&pad, buf + buflen - slop, slop); 141 memcpy(&pad, buf + buflen - slop, slop);
142 outl(le32_to_cpu(pad), ap->ioaddr.data_addr); 142 pad = le32_to_cpu(pad);
143 iowrite32(pad, ap->ioaddr.data_addr);
143 } else { 144 } else {
144 pad = cpu_to_le32(inl(ap->ioaddr.data_addr)); 145 pad = ioread32(ap->ioaddr.data_addr);
146 pad = cpu_to_le32(pad);
145 memcpy(buf + buflen - slop, &pad, slop); 147 memcpy(buf + buflen - slop, &pad, slop);
146 } 148 }
147 } 149 }
148 } else 150 } else
149 ata_pio_data_xfer(adev, buf, buflen, write_data); 151 ata_data_xfer(adev, buf, buflen, write_data);
150} 152}
151 153
152static struct scsi_host_template qdi_sht = { 154static struct scsi_host_template qdi_sht = {
@@ -189,10 +191,10 @@ static struct ata_port_operations qdi6500_port_ops = {
189 191
190 .irq_handler = ata_interrupt, 192 .irq_handler = ata_interrupt,
191 .irq_clear = ata_bmdma_irq_clear, 193 .irq_clear = ata_bmdma_irq_clear,
194 .irq_on = ata_irq_on,
195 .irq_ack = ata_irq_ack,
192 196
193 .port_start = ata_port_start, 197 .port_start = ata_port_start,
194 .port_stop = ata_port_stop,
195 .host_stop = ata_host_stop
196}; 198};
197 199
198static struct ata_port_operations qdi6580_port_ops = { 200static struct ata_port_operations qdi6580_port_ops = {
@@ -217,10 +219,10 @@ static struct ata_port_operations qdi6580_port_ops = {
217 219
218 .irq_handler = ata_interrupt, 220 .irq_handler = ata_interrupt,
219 .irq_clear = ata_bmdma_irq_clear, 221 .irq_clear = ata_bmdma_irq_clear,
222 .irq_on = ata_irq_on,
223 .irq_ack = ata_irq_ack,
220 224
221 .port_start = ata_port_start, 225 .port_start = ata_port_start,
222 .port_stop = ata_port_stop,
223 .host_stop = ata_host_stop
224}; 226};
225 227
226/** 228/**
@@ -238,10 +240,9 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
238{ 240{
239 struct ata_probe_ent ae; 241 struct ata_probe_ent ae;
240 struct platform_device *pdev; 242 struct platform_device *pdev;
243 void __iomem *io_addr, *ctl_addr;
241 int ret; 244 int ret;
242 245
243 unsigned long ctrl = io + 0x206;
244
245 /* 246 /*
246 * Fill in a probe structure first of all 247 * Fill in a probe structure first of all
247 */ 248 */
@@ -250,6 +251,12 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
250 if (IS_ERR(pdev)) 251 if (IS_ERR(pdev))
251 return PTR_ERR(pdev); 252 return PTR_ERR(pdev);
252 253
254 ret = -ENOMEM;
255 io_addr = devm_ioport_map(&pdev->dev, io, 8);
256 ctl_addr = devm_ioport_map(&pdev->dev, io + 0x206, 1);
257 if (!io_addr || !ctl_addr)
258 goto fail;
259
253 memset(&ae, 0, sizeof(struct ata_probe_ent)); 260 memset(&ae, 0, sizeof(struct ata_probe_ent));
254 INIT_LIST_HEAD(&ae.node); 261 INIT_LIST_HEAD(&ae.node);
255 ae.dev = &pdev->dev; 262 ae.dev = &pdev->dev;
@@ -267,9 +274,9 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
267 ae.irq = irq; 274 ae.irq = irq;
268 ae.irq_flags = 0; 275 ae.irq_flags = 0;
269 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; 276 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
270 ae.port[0].cmd_addr = io; 277 ae.port[0].cmd_addr = io_addr;
271 ae.port[0].altstatus_addr = ctrl; 278 ae.port[0].altstatus_addr = ctl_addr;
272 ae.port[0].ctl_addr = ctrl; 279 ae.port[0].ctl_addr = ctl_addr;
273 ata_std_ports(&ae.port[0]); 280 ata_std_ports(&ae.port[0]);
274 281
275 /* 282 /*
@@ -282,14 +289,17 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
282 qdi_data[nr_qdi_host].platform_dev = pdev; 289 qdi_data[nr_qdi_host].platform_dev = pdev;
283 290
284 printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io); 291 printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
285 ret = ata_device_add(&ae); 292
286 if (ret == 0) { 293 ret = -ENODEV;
287 platform_device_unregister(pdev); 294 if (!ata_device_add(&ae))
288 return -ENODEV; 295 goto fail;
289 }
290 296
291 qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev); 297 qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
292 return 0; 298 return 0;
299
300 fail:
301 platform_device_unregister(pdev);
302 return ret;
293} 303}
294 304
295/** 305/**
@@ -382,7 +392,7 @@ static __exit void qdi_exit(void)
382 int i; 392 int i;
383 393
384 for (i = 0; i < nr_qdi_host; i++) { 394 for (i = 0; i < nr_qdi_host; i++) {
385 ata_host_remove(qdi_host[i]); 395 ata_host_detach(qdi_host[i]);
386 /* Free the control resource. The 6580 dual channel has the resources 396 /* Free the control resource. The 6580 dual channel has the resources
387 * claimed as a pair of 2 byte resources so we need no special cases... 397 * claimed as a pair of 2 byte resources so we need no special cases...
388 */ 398 */
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 065541d034ad..0d1e571ef633 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -255,14 +255,14 @@ static const struct ata_port_operations radisys_pata_ops = {
255 .bmdma_status = ata_bmdma_status, 255 .bmdma_status = ata_bmdma_status,
256 .qc_prep = ata_qc_prep, 256 .qc_prep = ata_qc_prep,
257 .qc_issue = radisys_qc_issue_prot, 257 .qc_issue = radisys_qc_issue_prot,
258 .data_xfer = ata_pio_data_xfer, 258 .data_xfer = ata_data_xfer,
259 259
260 .irq_handler = ata_interrupt, 260 .irq_handler = ata_interrupt,
261 .irq_clear = ata_bmdma_irq_clear, 261 .irq_clear = ata_bmdma_irq_clear,
262 .irq_on = ata_irq_on,
263 .irq_ack = ata_irq_ack,
262 264
263 .port_start = ata_port_start, 265 .port_start = ata_port_start,
264 .port_stop = ata_port_stop,
265 .host_stop = ata_host_stop,
266}; 266};
267 267
268 268
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index cec0729225e1..71a2bac09e0d 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -115,7 +115,7 @@ static struct ata_port_operations rz1000_port_ops = {
115 .qc_prep = ata_qc_prep, 115 .qc_prep = ata_qc_prep,
116 .qc_issue = ata_qc_issue_prot, 116 .qc_issue = ata_qc_issue_prot,
117 117
118 .data_xfer = ata_pio_data_xfer, 118 .data_xfer = ata_data_xfer,
119 119
120 .freeze = ata_bmdma_freeze, 120 .freeze = ata_bmdma_freeze,
121 .thaw = ata_bmdma_thaw, 121 .thaw = ata_bmdma_thaw,
@@ -124,10 +124,10 @@ static struct ata_port_operations rz1000_port_ops = {
124 124
125 .irq_handler = ata_interrupt, 125 .irq_handler = ata_interrupt,
126 .irq_clear = ata_bmdma_irq_clear, 126 .irq_clear = ata_bmdma_irq_clear,
127 .irq_on = ata_irq_on,
128 .irq_ack = ata_irq_ack,
127 129
128 .port_start = ata_port_start, 130 .port_start = ata_port_start,
129 .port_stop = ata_port_stop,
130 .host_stop = ata_host_stop
131}; 131};
132 132
133static int rz1000_fifo_disable(struct pci_dev *pdev) 133static int rz1000_fifo_disable(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index a3b35bc50394..58e42fbd14f9 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -220,14 +220,14 @@ static struct ata_port_operations sc1200_port_ops = {
220 .qc_prep = ata_qc_prep, 220 .qc_prep = ata_qc_prep,
221 .qc_issue = sc1200_qc_issue_prot, 221 .qc_issue = sc1200_qc_issue_prot,
222 222
223 .data_xfer = ata_pio_data_xfer, 223 .data_xfer = ata_data_xfer,
224 224
225 .irq_handler = ata_interrupt, 225 .irq_handler = ata_interrupt,
226 .irq_clear = ata_bmdma_irq_clear, 226 .irq_clear = ata_bmdma_irq_clear,
227 .irq_on = ata_irq_on,
228 .irq_ack = ata_irq_ack,
227 229
228 .port_start = ata_port_start, 230 .port_start = ata_port_start,
229 .port_stop = ata_port_stop,
230 .host_stop = ata_host_stop
231}; 231};
232 232
233/** 233/**
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index f02b6a3b0f10..ad5b43fef3d1 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -218,25 +218,18 @@ static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct a
218static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) 218static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
219{ 219{
220 const char *p; 220 const char *p;
221 char model_num[40]; 221 char model_num[ATA_ID_PROD_LEN + 1];
222 int len, i; 222 int i;
223 223
224 /* Disk, UDMA */ 224 /* Disk, UDMA */
225 if (adev->class != ATA_DEV_ATA) 225 if (adev->class != ATA_DEV_ATA)
226 return ata_pci_default_filter(ap, adev, mask); 226 return ata_pci_default_filter(ap, adev, mask);
227 227
228 /* Actually do need to check */ 228 /* Actually do need to check */
229 ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); 229 ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
230 /* Precuationary - why not do this in the libata core ?? */
231 230
232 len = strlen(model_num); 231 for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
233 while ((len > 0) && (model_num[len - 1] == ' ')) { 232 if (!strcmp(p, model_num))
234 len--;
235 model_num[len] = 0;
236 }
237
238 for(i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
239 if (!strncmp(p, model_num, len))
240 mask &= ~(0x1F << ATA_SHIFT_UDMA); 233 mask &= ~(0x1F << ATA_SHIFT_UDMA);
241 } 234 }
242 return ata_pci_default_filter(ap, adev, mask); 235 return ata_pci_default_filter(ap, adev, mask);
@@ -355,14 +348,14 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
355 .qc_prep = ata_qc_prep, 348 .qc_prep = ata_qc_prep,
356 .qc_issue = ata_qc_issue_prot, 349 .qc_issue = ata_qc_issue_prot,
357 350
358 .data_xfer = ata_pio_data_xfer, 351 .data_xfer = ata_data_xfer,
359 352
360 .irq_handler = ata_interrupt, 353 .irq_handler = ata_interrupt,
361 .irq_clear = ata_bmdma_irq_clear, 354 .irq_clear = ata_bmdma_irq_clear,
355 .irq_on = ata_irq_on,
356 .irq_ack = ata_irq_ack,
362 357
363 .port_start = ata_port_start, 358 .port_start = ata_port_start,
364 .port_stop = ata_port_stop,
365 .host_stop = ata_host_stop
366}; 359};
367 360
368static struct ata_port_operations serverworks_csb_port_ops = { 361static struct ata_port_operations serverworks_csb_port_ops = {
@@ -390,14 +383,14 @@ static struct ata_port_operations serverworks_csb_port_ops = {
390 .qc_prep = ata_qc_prep, 383 .qc_prep = ata_qc_prep,
391 .qc_issue = ata_qc_issue_prot, 384 .qc_issue = ata_qc_issue_prot,
392 385
393 .data_xfer = ata_pio_data_xfer, 386 .data_xfer = ata_data_xfer,
394 387
395 .irq_handler = ata_interrupt, 388 .irq_handler = ata_interrupt,
396 .irq_clear = ata_bmdma_irq_clear, 389 .irq_clear = ata_bmdma_irq_clear,
390 .irq_on = ata_irq_on,
391 .irq_ack = ata_irq_ack,
397 392
398 .port_start = ata_port_start, 393 .port_start = ata_port_start,
399 .port_stop = ata_port_stop,
400 .host_stop = ata_host_stop
401}; 394};
402 395
403static int serverworks_fixup_osb4(struct pci_dev *pdev) 396static int serverworks_fixup_osb4(struct pci_dev *pdev)
@@ -559,7 +552,7 @@ static int serverworks_reinit_one(struct pci_dev *pdev)
559{ 552{
560 /* Force master latency timer to 64 PCI clocks */ 553 /* Force master latency timer to 64 PCI clocks */
561 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40); 554 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
562 555
563 switch (pdev->device) 556 switch (pdev->device)
564 { 557 {
565 case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE: 558 case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index e8dfd8fc3ff7..ed79fabe025c 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -252,14 +252,14 @@ static struct ata_port_operations sil680_port_ops = {
252 .qc_prep = ata_qc_prep, 252 .qc_prep = ata_qc_prep,
253 .qc_issue = ata_qc_issue_prot, 253 .qc_issue = ata_qc_issue_prot,
254 254
255 .data_xfer = ata_pio_data_xfer, 255 .data_xfer = ata_data_xfer,
256 256
257 .irq_handler = ata_interrupt, 257 .irq_handler = ata_interrupt,
258 .irq_clear = ata_bmdma_irq_clear, 258 .irq_clear = ata_bmdma_irq_clear,
259 .irq_on = ata_irq_on,
260 .irq_ack = ata_irq_ack,
259 261
260 .port_start = ata_port_start, 262 .port_start = ata_port_start,
261 .port_stop = ata_port_stop,
262 .host_stop = ata_host_stop
263}; 263};
264 264
265/** 265/**
@@ -270,7 +270,7 @@ static struct ata_port_operations sil680_port_ops = {
270 * is powered up on boot and when we resume in case we resumed from RAM. 270 * is powered up on boot and when we resume in case we resumed from RAM.
271 * Returns the final clock settings. 271 * Returns the final clock settings.
272 */ 272 */
273 273
274static u8 sil680_init_chip(struct pci_dev *pdev) 274static u8 sil680_init_chip(struct pci_dev *pdev)
275{ 275{
276 u32 class_rev = 0; 276 u32 class_rev = 0;
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 916cedb3d755..560103d55b2e 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -32,7 +32,9 @@
32#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
33#include <linux/libata.h> 33#include <linux/libata.h>
34#include <linux/ata.h> 34#include <linux/ata.h>
35#include "libata.h"
35 36
37#undef DRV_NAME /* already defined in libata.h, for libata-core */
36#define DRV_NAME "pata_sis" 38#define DRV_NAME "pata_sis"
37#define DRV_VERSION "0.4.5" 39#define DRV_VERSION "0.4.5"
38 40
@@ -43,6 +45,34 @@ struct sis_chipset {
43 up code later */ 45 up code later */
44}; 46};
45 47
48struct sis_laptop {
49 u16 device;
50 u16 subvendor;
51 u16 subdevice;
52};
53
54static const struct sis_laptop sis_laptop[] = {
55 /* devid, subvendor, subdev */
56 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
57 /* end marker */
58 { 0, }
59};
60
61static int sis_short_ata40(struct pci_dev *dev)
62{
63 const struct sis_laptop *lap = &sis_laptop[0];
64
65 while (lap->device) {
66 if (lap->device == dev->device &&
67 lap->subvendor == dev->subsystem_vendor &&
68 lap->subdevice == dev->subsystem_device)
69 return 1;
70 lap++;
71 }
72
73 return 0;
74}
75
46/** 76/**
47 * sis_port_base - return PCI configuration base for dev 77 * sis_port_base - return PCI configuration base for dev
48 * @adev: device 78 * @adev: device
@@ -79,7 +109,7 @@ static int sis_133_pre_reset(struct ata_port *ap)
79 109
80 /* The top bit of this register is the cable detect bit */ 110 /* The top bit of this register is the cable detect bit */
81 pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp); 111 pci_read_config_word(pdev, 0x50 + 2 * ap->port_no, &tmp);
82 if (tmp & 0x8000) 112 if ((tmp & 0x8000) && !sis_short_ata40(pdev))
83 ap->cbl = ATA_CBL_PATA40; 113 ap->cbl = ATA_CBL_PATA40;
84 else 114 else
85 ap->cbl = ATA_CBL_PATA80; 115 ap->cbl = ATA_CBL_PATA80;
@@ -127,7 +157,7 @@ static int sis_66_pre_reset(struct ata_port *ap)
127 /* Older chips keep cable detect in bits 4/5 of reg 0x48 */ 157 /* Older chips keep cable detect in bits 4/5 of reg 0x48 */
128 pci_read_config_byte(pdev, 0x48, &tmp); 158 pci_read_config_byte(pdev, 0x48, &tmp);
129 tmp >>= ap->port_no; 159 tmp >>= ap->port_no;
130 if (tmp & 0x10) 160 if ((tmp & 0x10) && !sis_short_ata40(pdev))
131 ap->cbl = ATA_CBL_PATA40; 161 ap->cbl = ATA_CBL_PATA40;
132 else 162 else
133 ap->cbl = ATA_CBL_PATA80; 163 ap->cbl = ATA_CBL_PATA80;
@@ -573,14 +603,14 @@ static const struct ata_port_operations sis_133_ops = {
573 .bmdma_status = ata_bmdma_status, 603 .bmdma_status = ata_bmdma_status,
574 .qc_prep = ata_qc_prep, 604 .qc_prep = ata_qc_prep,
575 .qc_issue = ata_qc_issue_prot, 605 .qc_issue = ata_qc_issue_prot,
576 .data_xfer = ata_pio_data_xfer, 606 .data_xfer = ata_data_xfer,
577 607
578 .irq_handler = ata_interrupt, 608 .irq_handler = ata_interrupt,
579 .irq_clear = ata_bmdma_irq_clear, 609 .irq_clear = ata_bmdma_irq_clear,
610 .irq_on = ata_irq_on,
611 .irq_ack = ata_irq_ack,
580 612
581 .port_start = ata_port_start, 613 .port_start = ata_port_start,
582 .port_stop = ata_port_stop,
583 .host_stop = ata_host_stop,
584}; 614};
585 615
586static const struct ata_port_operations sis_133_early_ops = { 616static const struct ata_port_operations sis_133_early_ops = {
@@ -606,14 +636,14 @@ static const struct ata_port_operations sis_133_early_ops = {
606 .bmdma_status = ata_bmdma_status, 636 .bmdma_status = ata_bmdma_status,
607 .qc_prep = ata_qc_prep, 637 .qc_prep = ata_qc_prep,
608 .qc_issue = ata_qc_issue_prot, 638 .qc_issue = ata_qc_issue_prot,
609 .data_xfer = ata_pio_data_xfer, 639 .data_xfer = ata_data_xfer,
610 640
611 .irq_handler = ata_interrupt, 641 .irq_handler = ata_interrupt,
612 .irq_clear = ata_bmdma_irq_clear, 642 .irq_clear = ata_bmdma_irq_clear,
643 .irq_on = ata_irq_on,
644 .irq_ack = ata_irq_ack,
613 645
614 .port_start = ata_port_start, 646 .port_start = ata_port_start,
615 .port_stop = ata_port_stop,
616 .host_stop = ata_host_stop,
617}; 647};
618 648
619static const struct ata_port_operations sis_100_ops = { 649static const struct ata_port_operations sis_100_ops = {
@@ -640,14 +670,14 @@ static const struct ata_port_operations sis_100_ops = {
640 .bmdma_status = ata_bmdma_status, 670 .bmdma_status = ata_bmdma_status,
641 .qc_prep = ata_qc_prep, 671 .qc_prep = ata_qc_prep,
642 .qc_issue = ata_qc_issue_prot, 672 .qc_issue = ata_qc_issue_prot,
643 .data_xfer = ata_pio_data_xfer, 673 .data_xfer = ata_data_xfer,
644 674
645 .irq_handler = ata_interrupt, 675 .irq_handler = ata_interrupt,
646 .irq_clear = ata_bmdma_irq_clear, 676 .irq_clear = ata_bmdma_irq_clear,
677 .irq_on = ata_irq_on,
678 .irq_ack = ata_irq_ack,
647 679
648 .port_start = ata_port_start, 680 .port_start = ata_port_start,
649 .port_stop = ata_port_stop,
650 .host_stop = ata_host_stop,
651}; 681};
652 682
653static const struct ata_port_operations sis_66_ops = { 683static const struct ata_port_operations sis_66_ops = {
@@ -673,14 +703,14 @@ static const struct ata_port_operations sis_66_ops = {
673 .bmdma_status = ata_bmdma_status, 703 .bmdma_status = ata_bmdma_status,
674 .qc_prep = ata_qc_prep, 704 .qc_prep = ata_qc_prep,
675 .qc_issue = ata_qc_issue_prot, 705 .qc_issue = ata_qc_issue_prot,
676 .data_xfer = ata_pio_data_xfer, 706 .data_xfer = ata_data_xfer,
677 707
678 .irq_handler = ata_interrupt, 708 .irq_handler = ata_interrupt,
679 .irq_clear = ata_bmdma_irq_clear, 709 .irq_clear = ata_bmdma_irq_clear,
710 .irq_on = ata_irq_on,
711 .irq_ack = ata_irq_ack,
680 712
681 .port_start = ata_port_start, 713 .port_start = ata_port_start,
682 .port_stop = ata_port_stop,
683 .host_stop = ata_host_stop,
684}; 714};
685 715
686static const struct ata_port_operations sis_old_ops = { 716static const struct ata_port_operations sis_old_ops = {
@@ -706,14 +736,14 @@ static const struct ata_port_operations sis_old_ops = {
706 .bmdma_status = ata_bmdma_status, 736 .bmdma_status = ata_bmdma_status,
707 .qc_prep = ata_qc_prep, 737 .qc_prep = ata_qc_prep,
708 .qc_issue = ata_qc_issue_prot, 738 .qc_issue = ata_qc_issue_prot,
709 .data_xfer = ata_pio_data_xfer, 739 .data_xfer = ata_data_xfer,
710 740
711 .irq_handler = ata_interrupt, 741 .irq_handler = ata_interrupt,
712 .irq_clear = ata_bmdma_irq_clear, 742 .irq_clear = ata_bmdma_irq_clear,
743 .irq_on = ata_irq_on,
744 .irq_ack = ata_irq_ack,
713 745
714 .port_start = ata_port_start, 746 .port_start = ata_port_start,
715 .port_stop = ata_port_stop,
716 .host_stop = ata_host_stop,
717}; 747};
718 748
719static struct ata_port_info sis_info = { 749static struct ata_port_info sis_info = {
@@ -753,7 +783,7 @@ static struct ata_port_info sis_info100_early = {
753 .pio_mask = 0x1f, /* pio0-4 */ 783 .pio_mask = 0x1f, /* pio0-4 */
754 .port_ops = &sis_66_ops, 784 .port_ops = &sis_66_ops,
755}; 785};
756static struct ata_port_info sis_info133 = { 786struct ata_port_info sis_info133 = {
757 .sht = &sis_sht, 787 .sht = &sis_sht,
758 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, 788 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
759 .pio_mask = 0x1f, /* pio0-4 */ 789 .pio_mask = 0x1f, /* pio0-4 */
@@ -768,6 +798,8 @@ static struct ata_port_info sis_info133_early = {
768 .port_ops = &sis_133_early_ops, 798 .port_ops = &sis_133_early_ops,
769}; 799};
770 800
801/* Privately shared with the SiS180 SATA driver, not for use elsewhere */
802EXPORT_SYMBOL_GPL(sis_info133);
771 803
772static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis) 804static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
773{ 805{
@@ -847,7 +879,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
847 struct sis_chipset *chipset = NULL; 879 struct sis_chipset *chipset = NULL;
848 880
849 static struct sis_chipset sis_chipsets[] = { 881 static struct sis_chipset sis_chipsets[] = {
850 882
851 { 0x0968, &sis_info133 }, 883 { 0x0968, &sis_info133 },
852 { 0x0966, &sis_info133 }, 884 { 0x0966, &sis_info133 },
853 { 0x0965, &sis_info133 }, 885 { 0x0965, &sis_info133 },
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index e94f515ef54b..f2fa158d07ca 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -139,13 +139,13 @@ static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev)
139{ 139{
140 switch(adev->dma_mode) { 140 switch(adev->dma_mode) {
141 case XFER_MW_DMA_0: 141 case XFER_MW_DMA_0:
142 sl82c105_configure_piomode(ap, adev, 1); 142 sl82c105_configure_piomode(ap, adev, 0);
143 break; 143 break;
144 case XFER_MW_DMA_1: 144 case XFER_MW_DMA_1:
145 sl82c105_configure_piomode(ap, adev, 3); 145 sl82c105_configure_piomode(ap, adev, 3);
146 break; 146 break;
147 case XFER_MW_DMA_2: 147 case XFER_MW_DMA_2:
148 sl82c105_configure_piomode(ap, adev, 3); 148 sl82c105_configure_piomode(ap, adev, 4);
149 break; 149 break;
150 default: 150 default:
151 BUG(); 151 BUG();
@@ -262,14 +262,14 @@ static struct ata_port_operations sl82c105_port_ops = {
262 .qc_prep = ata_qc_prep, 262 .qc_prep = ata_qc_prep,
263 .qc_issue = ata_qc_issue_prot, 263 .qc_issue = ata_qc_issue_prot,
264 264
265 .data_xfer = ata_pio_data_xfer, 265 .data_xfer = ata_data_xfer,
266 266
267 .irq_handler = ata_interrupt, 267 .irq_handler = ata_interrupt,
268 .irq_clear = ata_bmdma_irq_clear, 268 .irq_clear = ata_bmdma_irq_clear,
269 .irq_on = ata_irq_on,
270 .irq_ack = ata_irq_ack,
269 271
270 .port_start = ata_port_start, 272 .port_start = ata_port_start,
271 .port_stop = ata_port_stop,
272 .host_stop = ata_host_stop
273}; 273};
274 274
275/** 275/**
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index a142971f1307..453ab90b721e 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -221,14 +221,14 @@ static struct ata_port_operations triflex_port_ops = {
221 .qc_prep = ata_qc_prep, 221 .qc_prep = ata_qc_prep,
222 .qc_issue = ata_qc_issue_prot, 222 .qc_issue = ata_qc_issue_prot,
223 223
224 .data_xfer = ata_pio_data_xfer, 224 .data_xfer = ata_data_xfer,
225 225
226 .irq_handler = ata_interrupt, 226 .irq_handler = ata_interrupt,
227 .irq_clear = ata_bmdma_irq_clear, 227 .irq_clear = ata_bmdma_irq_clear,
228 .irq_on = ata_irq_on,
229 .irq_ack = ata_irq_ack,
228 230
229 .port_start = ata_port_start, 231 .port_start = ata_port_start,
230 .port_stop = ata_port_stop,
231 .host_stop = ata_host_stop
232}; 232};
233 233
234static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) 234static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 0219419cae97..220fcd6c5492 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -23,6 +23,7 @@
23 * VIA VT8233c - UDMA100 23 * VIA VT8233c - UDMA100
24 * VIA VT8235 - UDMA133 24 * VIA VT8235 - UDMA133
25 * VIA VT8237 - UDMA133 25 * VIA VT8237 - UDMA133
26 * VIA VT8237S - UDMA133
26 * VIA VT8251 - UDMA133 27 * VIA VT8251 - UDMA133
27 * 28 *
28 * Most registers remain compatible across chips. Others start reserved 29 * Most registers remain compatible across chips. Others start reserved
@@ -61,7 +62,7 @@
61#include <linux/libata.h> 62#include <linux/libata.h>
62 63
63#define DRV_NAME "pata_via" 64#define DRV_NAME "pata_via"
64#define DRV_VERSION "0.2.0" 65#define DRV_VERSION "0.2.1"
65 66
66/* 67/*
67 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx 68 * The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
@@ -333,14 +334,14 @@ static struct ata_port_operations via_port_ops = {
333 .qc_prep = ata_qc_prep, 334 .qc_prep = ata_qc_prep,
334 .qc_issue = ata_qc_issue_prot, 335 .qc_issue = ata_qc_issue_prot,
335 336
336 .data_xfer = ata_pio_data_xfer, 337 .data_xfer = ata_data_xfer,
337 338
338 .irq_handler = ata_interrupt, 339 .irq_handler = ata_interrupt,
339 .irq_clear = ata_bmdma_irq_clear, 340 .irq_clear = ata_bmdma_irq_clear,
341 .irq_on = ata_irq_on,
342 .irq_ack = ata_irq_ack,
340 343
341 .port_start = ata_port_start, 344 .port_start = ata_port_start,
342 .port_stop = ata_port_stop,
343 .host_stop = ata_host_stop
344}; 345};
345 346
346static struct ata_port_operations via_port_ops_noirq = { 347static struct ata_port_operations via_port_ops_noirq = {
@@ -368,14 +369,14 @@ static struct ata_port_operations via_port_ops_noirq = {
368 .qc_prep = ata_qc_prep, 369 .qc_prep = ata_qc_prep,
369 .qc_issue = ata_qc_issue_prot, 370 .qc_issue = ata_qc_issue_prot,
370 371
371 .data_xfer = ata_pio_data_xfer_noirq, 372 .data_xfer = ata_data_xfer_noirq,
372 373
373 .irq_handler = ata_interrupt, 374 .irq_handler = ata_interrupt,
374 .irq_clear = ata_bmdma_irq_clear, 375 .irq_clear = ata_bmdma_irq_clear,
376 .irq_on = ata_irq_on,
377 .irq_ack = ata_irq_ack,
375 378
376 .port_start = ata_port_start, 379 .port_start = ata_port_start,
377 .port_stop = ata_port_stop,
378 .host_stop = ata_host_stop
379}; 380};
380 381
381/** 382/**
@@ -390,11 +391,11 @@ static struct ata_port_operations via_port_ops_noirq = {
390static void via_config_fifo(struct pci_dev *pdev, unsigned int flags) 391static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
391{ 392{
392 u8 enable; 393 u8 enable;
393 394
394 /* 0x40 low bits indicate enabled channels */ 395 /* 0x40 low bits indicate enabled channels */
395 pci_read_config_byte(pdev, 0x40 , &enable); 396 pci_read_config_byte(pdev, 0x40 , &enable);
396 enable &= 3; 397 enable &= 3;
397 398
398 if (flags & VIA_SET_FIFO) { 399 if (flags & VIA_SET_FIFO) {
399 static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20}; 400 static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
400 u8 fifo; 401 u8 fifo;
@@ -515,7 +516,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
515 516
516 /* Initialise the FIFO for the enabled channels. */ 517 /* Initialise the FIFO for the enabled channels. */
517 via_config_fifo(pdev, config->flags); 518 via_config_fifo(pdev, config->flags);
518 519
519 /* Clock set up */ 520 /* Clock set up */
520 switch(config->flags & VIA_UDMA) { 521 switch(config->flags & VIA_UDMA) {
521 case VIA_UDMA_NONE: 522 case VIA_UDMA_NONE:
@@ -574,7 +575,7 @@ static int via_reinit_one(struct pci_dev *pdev)
574 u32 timing; 575 u32 timing;
575 struct ata_host *host = dev_get_drvdata(&pdev->dev); 576 struct ata_host *host = dev_get_drvdata(&pdev->dev);
576 const struct via_isa_bridge *config = host->private_data; 577 const struct via_isa_bridge *config = host->private_data;
577 578
578 via_config_fifo(pdev, config->flags); 579 via_config_fifo(pdev, config->flags);
579 580
580 if ((config->flags & VIA_UDMA) == VIA_UDMA_66) { 581 if ((config->flags & VIA_UDMA) == VIA_UDMA_66) {
@@ -589,7 +590,7 @@ static int via_reinit_one(struct pci_dev *pdev)
589 timing &= ~0x80008; 590 timing &= ~0x80008;
590 pci_write_config_dword(pdev, 0x50, timing); 591 pci_write_config_dword(pdev, 0x50, timing);
591 } 592 }
592 return ata_pci_device_resume(pdev); 593 return ata_pci_device_resume(pdev);
593} 594}
594 595
595static const struct pci_device_id via[] = { 596static const struct pci_device_id via[] = {
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 5d1f518e1cc7..0888b4f19f4c 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -5,7 +5,7 @@
5 * Support for the Winbond 83759A when operating in advanced mode. 5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported. 6 * Multichip mode is not currently supported.
7 */ 7 */
8 8
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
@@ -69,7 +69,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
69 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2); 69 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
70 70
71 reg = winbond_readcfg(winbond->config, 0x81); 71 reg = winbond_readcfg(winbond->config, 0x81);
72 72
73 /* Get the timing data in cycles */ 73 /* Get the timing data in cycles */
74 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */ 74 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
75 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); 75 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
@@ -80,9 +80,9 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
80 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F; 80 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
81 timing = (active << 4) | recovery; 81 timing = (active << 4) | recovery;
82 winbond_writecfg(winbond->config, timing, reg); 82 winbond_writecfg(winbond->config, timing, reg);
83 83
84 /* Load the setup timing */ 84 /* Load the setup timing */
85 85
86 reg = 0x35; 86 reg = 0x35;
87 if (adev->class != ATA_DEV_ATA) 87 if (adev->class != ATA_DEV_ATA)
88 reg |= 0x08; /* FIFO off */ 88 reg |= 0x08; /* FIFO off */
@@ -100,22 +100,24 @@ static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsig
100 100
101 if (ata_id_has_dword_io(adev->id)) { 101 if (ata_id_has_dword_io(adev->id)) {
102 if (write_data) 102 if (write_data)
103 outsl(ap->ioaddr.data_addr, buf, buflen >> 2); 103 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else 104 else
105 insl(ap->ioaddr.data_addr, buf, buflen >> 2); 105 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
106 106
107 if (unlikely(slop)) { 107 if (unlikely(slop)) {
108 u32 pad; 108 u32 pad;
109 if (write_data) { 109 if (write_data) {
110 memcpy(&pad, buf + buflen - slop, slop); 110 memcpy(&pad, buf + buflen - slop, slop);
111 outl(le32_to_cpu(pad), ap->ioaddr.data_addr); 111 pad = le32_to_cpu(pad);
112 iowrite32(pad, ap->ioaddr.data_addr);
112 } else { 113 } else {
113 pad = cpu_to_le16(inl(ap->ioaddr.data_addr)); 114 pad = ioread32(ap->ioaddr.data_addr);
115 pad = cpu_to_le16(pad);
114 memcpy(buf + buflen - slop, &pad, slop); 116 memcpy(buf + buflen - slop, &pad, slop);
115 } 117 }
116 } 118 }
117 } else 119 } else
118 ata_pio_data_xfer(adev, buf, buflen, write_data); 120 ata_data_xfer(adev, buf, buflen, write_data);
119} 121}
120 122
121static struct scsi_host_template winbond_sht = { 123static struct scsi_host_template winbond_sht = {
@@ -158,10 +160,10 @@ static struct ata_port_operations winbond_port_ops = {
158 160
159 .irq_handler = ata_interrupt, 161 .irq_handler = ata_interrupt,
160 .irq_clear = ata_bmdma_irq_clear, 162 .irq_clear = ata_bmdma_irq_clear,
163 .irq_on = ata_irq_on,
164 .irq_ack = ata_irq_ack,
161 165
162 .port_start = ata_port_start, 166 .port_start = ata_port_start,
163 .port_stop = ata_port_stop,
164 .host_stop = ata_host_stop
165}; 167};
166 168
167/** 169/**
@@ -194,13 +196,15 @@ static __init int winbond_init_one(unsigned long port)
194 winbond_writecfg(port, 0x85, reg); 196 winbond_writecfg(port, 0x85, reg);
195 197
196 reg = winbond_readcfg(port, 0x81); 198 reg = winbond_readcfg(port, 0x81);
197 199
198 if (!(reg & 0x03)) /* Disabled */ 200 if (!(reg & 0x03)) /* Disabled */
199 return 0; 201 return 0;
200 202
201 for (i = 0; i < 2 ; i ++) { 203 for (i = 0; i < 2 ; i ++) {
204 unsigned long cmd_port = 0x1F0 - (0x80 * i);
205 void __iomem *cmd_addr, *ctl_addr;
202 206
203 if (reg & (1 << i)) { 207 if (reg & (1 << i)) {
204 /* 208 /*
205 * Fill in a probe structure first of all 209 * Fill in a probe structure first of all
206 */ 210 */
@@ -209,6 +213,13 @@ static __init int winbond_init_one(unsigned long port)
209 if (IS_ERR(pdev)) 213 if (IS_ERR(pdev))
210 return PTR_ERR(pdev); 214 return PTR_ERR(pdev);
211 215
216 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
217 ctl_addr = devm_ioport_map(&pdev->dev, cmd_port + 0x0206, 1);
218 if (!cmd_addr || !ctl_addr) {
219 platform_device_unregister(pdev);
220 return -ENOMEM;
221 }
222
212 memset(&ae, 0, sizeof(struct ata_probe_ent)); 223 memset(&ae, 0, sizeof(struct ata_probe_ent));
213 INIT_LIST_HEAD(&ae.node); 224 INIT_LIST_HEAD(&ae.node);
214 ae.dev = &pdev->dev; 225 ae.dev = &pdev->dev;
@@ -217,14 +228,14 @@ static __init int winbond_init_one(unsigned long port)
217 ae.pio_mask = 0x1F; 228 ae.pio_mask = 0x1F;
218 229
219 ae.sht = &winbond_sht; 230 ae.sht = &winbond_sht;
220 231
221 ae.n_ports = 1; 232 ae.n_ports = 1;
222 ae.irq = 14 + i; 233 ae.irq = 14 + i;
223 ae.irq_flags = 0; 234 ae.irq_flags = 0;
224 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; 235 ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
225 ae.port[0].cmd_addr = 0x1F0 - (0x80 * i); 236 ae.port[0].cmd_addr = cmd_addr;
226 ae.port[0].altstatus_addr = ae.port[0].cmd_addr + 0x0206; 237 ae.port[0].altstatus_addr = ctl_addr;
227 ae.port[0].ctl_addr = ae.port[0].altstatus_addr; 238 ae.port[0].ctl_addr = ctl_addr;
228 ata_std_ports(&ae.port[0]); 239 ata_std_ports(&ae.port[0]);
229 /* 240 /*
230 * Hook in a private data structure per channel 241 * Hook in a private data structure per channel
@@ -257,7 +268,7 @@ static __init int winbond_init(void)
257 268
258 int ct = 0; 269 int ct = 0;
259 int i; 270 int i;
260 271
261 if (probe_winbond == 0) 272 if (probe_winbond == 0)
262 return -ENODEV; 273 return -ENODEV;
263 274
@@ -288,7 +299,7 @@ static __exit void winbond_exit(void)
288 int i; 299 int i;
289 300
290 for (i = 0; i < nr_winbond_host; i++) { 301 for (i = 0; i < nr_winbond_host; i++) {
291 ata_host_remove(winbond_host[i]); 302 ata_host_detach(winbond_host[i]);
292 release_region(winbond_data[i].config, 2); 303 release_region(winbond_data[i].config, 2);
293 platform_device_unregister(winbond_data[i].platform_dev); 304 platform_device_unregister(winbond_data[i].platform_dev);
294 } 305 }
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 90786d7a20bb..b4ed8ce553e6 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -42,7 +42,6 @@
42#include <linux/sched.h> 42#include <linux/sched.h>
43#include <linux/device.h> 43#include <linux/device.h>
44#include <scsi/scsi_host.h> 44#include <scsi/scsi_host.h>
45#include <asm/io.h>
46#include <linux/libata.h> 45#include <linux/libata.h>
47 46
48#define DRV_NAME "pdc_adma" 47#define DRV_NAME "pdc_adma"
@@ -52,9 +51,15 @@
52#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) 51#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
53 52
54/* macro to calculate base address for ADMA regs */ 53/* macro to calculate base address for ADMA regs */
55#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20)) 54#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
55
56/* macro to obtain addresses from ata_host */
57#define ADMA_HOST_REGS(host,port_no) \
58 ADMA_REGS((host)->iomap[ADMA_MMIO_BAR], port_no)
56 59
57enum { 60enum {
61 ADMA_MMIO_BAR = 4,
62
58 ADMA_PORTS = 2, 63 ADMA_PORTS = 2,
59 ADMA_CPB_BYTES = 40, 64 ADMA_CPB_BYTES = 40,
60 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16, 65 ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
@@ -167,9 +172,11 @@ static const struct ata_port_operations adma_ata_ops = {
167 .qc_prep = adma_qc_prep, 172 .qc_prep = adma_qc_prep,
168 .qc_issue = adma_qc_issue, 173 .qc_issue = adma_qc_issue,
169 .eng_timeout = adma_eng_timeout, 174 .eng_timeout = adma_eng_timeout,
170 .data_xfer = ata_mmio_data_xfer, 175 .data_xfer = ata_data_xfer,
171 .irq_handler = adma_intr, 176 .irq_handler = adma_intr,
172 .irq_clear = adma_irq_clear, 177 .irq_clear = adma_irq_clear,
178 .irq_on = ata_irq_on,
179 .irq_ack = ata_irq_ack,
173 .port_start = adma_port_start, 180 .port_start = adma_port_start,
174 .port_stop = adma_port_stop, 181 .port_stop = adma_port_stop,
175 .host_stop = adma_host_stop, 182 .host_stop = adma_host_stop,
@@ -235,11 +242,10 @@ static void adma_reset_engine(void __iomem *chan)
235static void adma_reinit_engine(struct ata_port *ap) 242static void adma_reinit_engine(struct ata_port *ap)
236{ 243{
237 struct adma_port_priv *pp = ap->private_data; 244 struct adma_port_priv *pp = ap->private_data;
238 void __iomem *mmio_base = ap->host->mmio_base; 245 void __iomem *chan = ADMA_HOST_REGS(ap->host, ap->port_no);
239 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
240 246
241 /* mask/clear ATA interrupts */ 247 /* mask/clear ATA interrupts */
242 writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr); 248 writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
243 ata_check_status(ap); 249 ata_check_status(ap);
244 250
245 /* reset the ADMA engine */ 251 /* reset the ADMA engine */
@@ -263,7 +269,7 @@ static void adma_reinit_engine(struct ata_port *ap)
263 269
264static inline void adma_enter_reg_mode(struct ata_port *ap) 270static inline void adma_enter_reg_mode(struct ata_port *ap)
265{ 271{
266 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no); 272 void __iomem *chan = ADMA_HOST_REGS(ap->host, ap->port_no);
267 273
268 writew(aPIOMD4, chan + ADMA_CONTROL); 274 writew(aPIOMD4, chan + ADMA_CONTROL);
269 readb(chan + ADMA_STATUS); /* flush */ 275 readb(chan + ADMA_STATUS); /* flush */
@@ -410,7 +416,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
410static inline void adma_packet_start(struct ata_queued_cmd *qc) 416static inline void adma_packet_start(struct ata_queued_cmd *qc)
411{ 417{
412 struct ata_port *ap = qc->ap; 418 struct ata_port *ap = qc->ap;
413 void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no); 419 void __iomem *chan = ADMA_HOST_REGS(ap->host, ap->port_no);
414 420
415 VPRINTK("ENTER, ap %p\n", ap); 421 VPRINTK("ENTER, ap %p\n", ap);
416 422
@@ -443,13 +449,12 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
443static inline unsigned int adma_intr_pkt(struct ata_host *host) 449static inline unsigned int adma_intr_pkt(struct ata_host *host)
444{ 450{
445 unsigned int handled = 0, port_no; 451 unsigned int handled = 0, port_no;
446 u8 __iomem *mmio_base = host->mmio_base;
447 452
448 for (port_no = 0; port_no < host->n_ports; ++port_no) { 453 for (port_no = 0; port_no < host->n_ports; ++port_no) {
449 struct ata_port *ap = host->ports[port_no]; 454 struct ata_port *ap = host->ports[port_no];
450 struct adma_port_priv *pp; 455 struct adma_port_priv *pp;
451 struct ata_queued_cmd *qc; 456 struct ata_queued_cmd *qc;
452 void __iomem *chan = ADMA_REGS(mmio_base, port_no); 457 void __iomem *chan = ADMA_HOST_REGS(host, port_no);
453 u8 status = readb(chan + ADMA_STATUS); 458 u8 status = readb(chan + ADMA_STATUS);
454 459
455 if (status == 0) 460 if (status == 0)
@@ -523,7 +528,7 @@ static irqreturn_t adma_intr(int irq, void *dev_instance)
523 return IRQ_RETVAL(handled); 528 return IRQ_RETVAL(handled);
524} 529}
525 530
526static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base) 531static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
527{ 532{
528 port->cmd_addr = 533 port->cmd_addr =
529 port->data_addr = base + 0x000; 534 port->data_addr = base + 0x000;
@@ -550,48 +555,28 @@ static int adma_port_start(struct ata_port *ap)
550 if (rc) 555 if (rc)
551 return rc; 556 return rc;
552 adma_enter_reg_mode(ap); 557 adma_enter_reg_mode(ap);
553 rc = -ENOMEM; 558 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
554 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
555 if (!pp) 559 if (!pp)
556 goto err_out; 560 return -ENOMEM;
557 pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, 561 pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma,
558 GFP_KERNEL); 562 GFP_KERNEL);
559 if (!pp->pkt) 563 if (!pp->pkt)
560 goto err_out_kfree; 564 return -ENOMEM;
561 /* paranoia? */ 565 /* paranoia? */
562 if ((pp->pkt_dma & 7) != 0) { 566 if ((pp->pkt_dma & 7) != 0) {
563 printk("bad alignment for pp->pkt_dma: %08x\n", 567 printk("bad alignment for pp->pkt_dma: %08x\n",
564 (u32)pp->pkt_dma); 568 (u32)pp->pkt_dma);
565 dma_free_coherent(dev, ADMA_PKT_BYTES, 569 return -ENOMEM;
566 pp->pkt, pp->pkt_dma);
567 goto err_out_kfree;
568 } 570 }
569 memset(pp->pkt, 0, ADMA_PKT_BYTES); 571 memset(pp->pkt, 0, ADMA_PKT_BYTES);
570 ap->private_data = pp; 572 ap->private_data = pp;
571 adma_reinit_engine(ap); 573 adma_reinit_engine(ap);
572 return 0; 574 return 0;
573
574err_out_kfree:
575 kfree(pp);
576err_out:
577 ata_port_stop(ap);
578 return rc;
579} 575}
580 576
581static void adma_port_stop(struct ata_port *ap) 577static void adma_port_stop(struct ata_port *ap)
582{ 578{
583 struct device *dev = ap->host->dev; 579 adma_reset_engine(ADMA_HOST_REGS(ap->host, ap->port_no));
584 struct adma_port_priv *pp = ap->private_data;
585
586 adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
587 if (pp != NULL) {
588 ap->private_data = NULL;
589 if (pp->pkt != NULL)
590 dma_free_coherent(dev, ADMA_PKT_BYTES,
591 pp->pkt, pp->pkt_dma);
592 kfree(pp);
593 }
594 ata_port_stop(ap);
595} 580}
596 581
597static void adma_host_stop(struct ata_host *host) 582static void adma_host_stop(struct ata_host *host)
@@ -599,16 +584,14 @@ static void adma_host_stop(struct ata_host *host)
599 unsigned int port_no; 584 unsigned int port_no;
600 585
601 for (port_no = 0; port_no < ADMA_PORTS; ++port_no) 586 for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
602 adma_reset_engine(ADMA_REGS(host->mmio_base, port_no)); 587 adma_reset_engine(ADMA_HOST_REGS(host, port_no));
603
604 ata_pci_host_stop(host);
605} 588}
606 589
607static void adma_host_init(unsigned int chip_id, 590static void adma_host_init(unsigned int chip_id,
608 struct ata_probe_ent *probe_ent) 591 struct ata_probe_ent *probe_ent)
609{ 592{
610 unsigned int port_no; 593 unsigned int port_no;
611 void __iomem *mmio_base = probe_ent->mmio_base; 594 void __iomem *mmio_base = probe_ent->iomap[ADMA_MMIO_BAR];
612 595
613 /* enable/lock aGO operation */ 596 /* enable/lock aGO operation */
614 writeb(7, mmio_base + ADMA_MODE_LOCK); 597 writeb(7, mmio_base + ADMA_MODE_LOCK);
@@ -638,7 +621,7 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
638} 621}
639 622
640static int adma_ata_init_one(struct pci_dev *pdev, 623static int adma_ata_init_one(struct pci_dev *pdev,
641 const struct pci_device_id *ent) 624 const struct pci_device_id *ent)
642{ 625{
643 static int printed_version; 626 static int printed_version;
644 struct ata_probe_ent *probe_ent = NULL; 627 struct ata_probe_ent *probe_ent = NULL;
@@ -649,34 +632,25 @@ static int adma_ata_init_one(struct pci_dev *pdev,
649 if (!printed_version++) 632 if (!printed_version++)
650 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 633 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
651 634
652 rc = pci_enable_device(pdev); 635 rc = pcim_enable_device(pdev);
653 if (rc) 636 if (rc)
654 return rc; 637 return rc;
655 638
656 rc = pci_request_regions(pdev, DRV_NAME); 639 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
657 if (rc) 640 return -ENODEV;
658 goto err_out;
659
660 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
661 rc = -ENODEV;
662 goto err_out_regions;
663 }
664 641
665 mmio_base = pci_iomap(pdev, 4, 0); 642 rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
666 if (mmio_base == NULL) { 643 if (rc)
667 rc = -ENOMEM; 644 return rc;
668 goto err_out_regions; 645 mmio_base = pcim_iomap_table(pdev)[ADMA_MMIO_BAR];
669 }
670 646
671 rc = adma_set_dma_masks(pdev, mmio_base); 647 rc = adma_set_dma_masks(pdev, mmio_base);
672 if (rc) 648 if (rc)
673 goto err_out_iounmap; 649 return rc;
674 650
675 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 651 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
676 if (probe_ent == NULL) { 652 if (probe_ent == NULL)
677 rc = -ENOMEM; 653 return -ENOMEM;
678 goto err_out_iounmap;
679 }
680 654
681 probe_ent->dev = pci_dev_to_dev(pdev); 655 probe_ent->dev = pci_dev_to_dev(pdev);
682 INIT_LIST_HEAD(&probe_ent->node); 656 INIT_LIST_HEAD(&probe_ent->node);
@@ -690,12 +664,12 @@ static int adma_ata_init_one(struct pci_dev *pdev,
690 664
691 probe_ent->irq = pdev->irq; 665 probe_ent->irq = pdev->irq;
692 probe_ent->irq_flags = IRQF_SHARED; 666 probe_ent->irq_flags = IRQF_SHARED;
693 probe_ent->mmio_base = mmio_base;
694 probe_ent->n_ports = ADMA_PORTS; 667 probe_ent->n_ports = ADMA_PORTS;
668 probe_ent->iomap = pcim_iomap_table(pdev);
695 669
696 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { 670 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
697 adma_ata_setup_port(&probe_ent->port[port_no], 671 adma_ata_setup_port(&probe_ent->port[port_no],
698 ADMA_ATA_REGS((unsigned long)mmio_base, port_no)); 672 ADMA_ATA_REGS(mmio_base, port_no));
699 } 673 }
700 674
701 pci_set_master(pdev); 675 pci_set_master(pdev);
@@ -703,19 +677,11 @@ static int adma_ata_init_one(struct pci_dev *pdev,
703 /* initialize adapter */ 677 /* initialize adapter */
704 adma_host_init(board_idx, probe_ent); 678 adma_host_init(board_idx, probe_ent);
705 679
706 rc = ata_device_add(probe_ent); 680 if (!ata_device_add(probe_ent))
707 kfree(probe_ent); 681 return -ENODEV;
708 if (rc != ADMA_PORTS)
709 goto err_out_iounmap;
710 return 0;
711 682
712err_out_iounmap: 683 devm_kfree(&pdev->dev, probe_ent);
713 pci_iounmap(pdev, mmio_base); 684 return 0;
714err_out_regions:
715 pci_release_regions(pdev);
716err_out:
717 pci_disable_device(pdev);
718 return rc;
719} 685}
720 686
721static int __init adma_ata_init(void) 687static int __init adma_ata_init(void)
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
new file mode 100644
index 000000000000..c5335f422801
--- /dev/null
+++ b/drivers/ata/sata_inic162x.c
@@ -0,0 +1,781 @@
1/*
2 * sata_inic162x.c - Driver for Initio 162x SATA controllers
3 *
4 * Copyright 2006 SUSE Linux Products GmbH
5 * Copyright 2006 Tejun Heo <teheo@novell.com>
6 *
7 * This file is released under GPL v2.
8 *
9 * This controller is eccentric and easily locks up if something isn't
10 * right. Documentation is available at initio's website but it only
11 * documents registers (not programming model).
12 *
13 * - ATA disks work.
14 * - Hotplug works.
15 * - ATAPI read works but burning doesn't. This thing is really
16 * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and
17 * ATAPI DMA WRITE should be programmed. If you've got a clue, be
18 * my guest.
19 * - Both STR and STD work.
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <scsi/scsi_host.h>
26#include <linux/libata.h>
27#include <linux/blkdev.h>
28#include <scsi/scsi_device.h>
29
30#define DRV_NAME "sata_inic162x"
31#define DRV_VERSION "0.1"
32
33enum {
34 MMIO_BAR = 5,
35
36 NR_PORTS = 2,
37
38 HOST_CTL = 0x7c,
39 HOST_STAT = 0x7e,
40 HOST_IRQ_STAT = 0xbc,
41 HOST_IRQ_MASK = 0xbe,
42
43 PORT_SIZE = 0x40,
44
45 /* registers for ATA TF operation */
46 PORT_TF = 0x00,
47 PORT_ALT_STAT = 0x08,
48 PORT_IRQ_STAT = 0x09,
49 PORT_IRQ_MASK = 0x0a,
50 PORT_PRD_CTL = 0x0b,
51 PORT_PRD_ADDR = 0x0c,
52 PORT_PRD_XFERLEN = 0x10,
53
54 /* IDMA register */
55 PORT_IDMA_CTL = 0x14,
56
57 PORT_SCR = 0x20,
58
59 /* HOST_CTL bits */
60 HCTL_IRQOFF = (1 << 8), /* global IRQ off */
61 HCTL_PWRDWN = (1 << 13), /* power down PHYs */
62 HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */
63 HCTL_RPGSEL = (1 << 15), /* register page select */
64
65 HCTL_KNOWN_BITS = HCTL_IRQOFF | HCTL_PWRDWN | HCTL_SOFTRST |
66 HCTL_RPGSEL,
67
68 /* HOST_IRQ_(STAT|MASK) bits */
69 HIRQ_PORT0 = (1 << 0),
70 HIRQ_PORT1 = (1 << 1),
71 HIRQ_SOFT = (1 << 14),
72 HIRQ_GLOBAL = (1 << 15), /* STAT only */
73
74 /* PORT_IRQ_(STAT|MASK) bits */
75 PIRQ_OFFLINE = (1 << 0), /* device unplugged */
76 PIRQ_ONLINE = (1 << 1), /* device plugged */
77 PIRQ_COMPLETE = (1 << 2), /* completion interrupt */
78 PIRQ_FATAL = (1 << 3), /* fatal error */
79 PIRQ_ATA = (1 << 4), /* ATA interrupt */
80 PIRQ_REPLY = (1 << 5), /* reply FIFO not empty */
81 PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */
82
83 PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL,
84
85 PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA,
86 PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE,
87 PIRQ_MASK_FREEZE = 0xff,
88
89 /* PORT_PRD_CTL bits */
90 PRD_CTL_START = (1 << 0),
91 PRD_CTL_WR = (1 << 3),
92 PRD_CTL_DMAEN = (1 << 7), /* DMA enable */
93
94 /* PORT_IDMA_CTL bits */
95 IDMA_CTL_RST_ATA = (1 << 2), /* hardreset ATA bus */
96 IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */
97 IDMA_CTL_GO = (1 << 7), /* IDMA mode go */
98 IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */
99};
100
101struct inic_host_priv {
102 u16 cached_hctl;
103};
104
105struct inic_port_priv {
106 u8 dfl_prdctl;
107 u8 cached_prdctl;
108 u8 cached_pirq_mask;
109};
110
111static int inic_slave_config(struct scsi_device *sdev)
112{
113 /* This controller is braindamaged. dma_boundary is 0xffff
114 * like others but it will lock up the whole machine HARD if
115 * 65536 byte PRD entry is fed. Reduce maximum segment size.
116 */
117 blk_queue_max_segment_size(sdev->request_queue, 65536 - 512);
118
119 return ata_scsi_slave_config(sdev);
120}
121
122static struct scsi_host_template inic_sht = {
123 .module = THIS_MODULE,
124 .name = DRV_NAME,
125 .ioctl = ata_scsi_ioctl,
126 .queuecommand = ata_scsi_queuecmd,
127 .can_queue = ATA_DEF_QUEUE,
128 .this_id = ATA_SHT_THIS_ID,
129 .sg_tablesize = LIBATA_MAX_PRD,
130 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
131 .emulated = ATA_SHT_EMULATED,
132 .use_clustering = ATA_SHT_USE_CLUSTERING,
133 .proc_name = DRV_NAME,
134 .dma_boundary = ATA_DMA_BOUNDARY,
135 .slave_configure = inic_slave_config,
136 .slave_destroy = ata_scsi_slave_destroy,
137 .bios_param = ata_std_bios_param,
138 .suspend = ata_scsi_device_suspend,
139 .resume = ata_scsi_device_resume,
140};
141
142static const int scr_map[] = {
143 [SCR_STATUS] = 0,
144 [SCR_ERROR] = 1,
145 [SCR_CONTROL] = 2,
146};
147
148static void __iomem * inic_port_base(struct ata_port *ap)
149{
150 return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
151}
152
153static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
154{
155 void __iomem *port_base = inic_port_base(ap);
156 struct inic_port_priv *pp = ap->private_data;
157
158 writeb(mask, port_base + PORT_IRQ_MASK);
159 pp->cached_pirq_mask = mask;
160}
161
162static void inic_set_pirq_mask(struct ata_port *ap, u8 mask)
163{
164 struct inic_port_priv *pp = ap->private_data;
165
166 if (pp->cached_pirq_mask != mask)
167 __inic_set_pirq_mask(ap, mask);
168}
169
170static void inic_reset_port(void __iomem *port_base)
171{
172 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
173 u16 ctl;
174
175 ctl = readw(idma_ctl);
176 ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO);
177
178 /* mask IRQ and assert reset */
179 writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl);
180 readw(idma_ctl); /* flush */
181
182 /* give it some time */
183 msleep(1);
184
185 /* release reset */
186 writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl);
187
188 /* clear irq */
189 writeb(0xff, port_base + PORT_IRQ_STAT);
190
191 /* reenable ATA IRQ, turn off IDMA mode */
192 writew(ctl, idma_ctl);
193}
194
195static u32 inic_scr_read(struct ata_port *ap, unsigned sc_reg)
196{
197 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
198 void __iomem *addr;
199 u32 val;
200
201 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
202 return 0xffffffffU;
203
204 addr = scr_addr + scr_map[sc_reg] * 4;
205 val = readl(scr_addr + scr_map[sc_reg] * 4);
206
207 /* this controller has stuck DIAG.N, ignore it */
208 if (sc_reg == SCR_ERROR)
209 val &= ~SERR_PHYRDY_CHG;
210 return val;
211}
212
213static void inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
214{
215 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
216 void __iomem *addr;
217
218 if (unlikely(sc_reg >= ARRAY_SIZE(scr_map)))
219 return;
220
221 addr = scr_addr + scr_map[sc_reg] * 4;
222 writel(val, scr_addr + scr_map[sc_reg] * 4);
223}
224
225/*
226 * In TF mode, inic162x is very similar to SFF device. TF registers
227 * function the same. DMA engine behaves similary using the same PRD
228 * format as BMDMA but different command register, interrupt and event
229 * notification methods are used. The following inic_bmdma_*()
230 * functions do the impedance matching.
231 */
232static void inic_bmdma_setup(struct ata_queued_cmd *qc)
233{
234 struct ata_port *ap = qc->ap;
235 struct inic_port_priv *pp = ap->private_data;
236 void __iomem *port_base = inic_port_base(ap);
237 int rw = qc->tf.flags & ATA_TFLAG_WRITE;
238
239 /* make sure device sees PRD table writes */
240 wmb();
241
242 /* load transfer length */
243 writel(qc->nbytes, port_base + PORT_PRD_XFERLEN);
244
245 /* turn on DMA and specify data direction */
246 pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN;
247 if (!rw)
248 pp->cached_prdctl |= PRD_CTL_WR;
249 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
250
251 /* issue r/w command */
252 ap->ops->exec_command(ap, &qc->tf);
253}
254
255static void inic_bmdma_start(struct ata_queued_cmd *qc)
256{
257 struct ata_port *ap = qc->ap;
258 struct inic_port_priv *pp = ap->private_data;
259 void __iomem *port_base = inic_port_base(ap);
260
261 /* start host DMA transaction */
262 pp->cached_prdctl |= PRD_CTL_START;
263 writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL);
264}
265
266static void inic_bmdma_stop(struct ata_queued_cmd *qc)
267{
268 struct ata_port *ap = qc->ap;
269 struct inic_port_priv *pp = ap->private_data;
270 void __iomem *port_base = inic_port_base(ap);
271
272 /* stop DMA engine */
273 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
274}
275
276static u8 inic_bmdma_status(struct ata_port *ap)
277{
278 /* event is already verified by the interrupt handler */
279 return ATA_DMA_INTR;
280}
281
282static void inic_irq_clear(struct ata_port *ap)
283{
284 /* noop */
285}
286
287static void inic_host_intr(struct ata_port *ap)
288{
289 void __iomem *port_base = inic_port_base(ap);
290 struct ata_eh_info *ehi = &ap->eh_info;
291 u8 irq_stat;
292
293 /* fetch and clear irq */
294 irq_stat = readb(port_base + PORT_IRQ_STAT);
295 writeb(irq_stat, port_base + PORT_IRQ_STAT);
296
297 if (likely(!(irq_stat & PIRQ_ERR))) {
298 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
299
300 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
301 ata_chk_status(ap); /* clear ATA interrupt */
302 return;
303 }
304
305 if (likely(ata_host_intr(ap, qc)))
306 return;
307
308 ata_chk_status(ap); /* clear ATA interrupt */
309 ata_port_printk(ap, KERN_WARNING, "unhandled "
310 "interrupt, irq_stat=%x\n", irq_stat);
311 return;
312 }
313
314 /* error */
315 ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat);
316
317 if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) {
318 ata_ehi_hotplugged(ehi);
319 ata_port_freeze(ap);
320 } else
321 ata_port_abort(ap);
322}
323
324static irqreturn_t inic_interrupt(int irq, void *dev_instance)
325{
326 struct ata_host *host = dev_instance;
327 void __iomem *mmio_base = host->iomap[MMIO_BAR];
328 u16 host_irq_stat;
329 int i, handled = 0;;
330
331 host_irq_stat = readw(mmio_base + HOST_IRQ_STAT);
332
333 if (unlikely(!(host_irq_stat & HIRQ_GLOBAL)))
334 goto out;
335
336 spin_lock(&host->lock);
337
338 for (i = 0; i < NR_PORTS; i++) {
339 struct ata_port *ap = host->ports[i];
340
341 if (!(host_irq_stat & (HIRQ_PORT0 << i)))
342 continue;
343
344 if (likely(ap && !(ap->flags & ATA_FLAG_DISABLED))) {
345 inic_host_intr(ap);
346 handled++;
347 } else {
348 if (ata_ratelimit())
349 dev_printk(KERN_ERR, host->dev, "interrupt "
350 "from disabled port %d (0x%x)\n",
351 i, host_irq_stat);
352 }
353 }
354
355 spin_unlock(&host->lock);
356
357 out:
358 return IRQ_RETVAL(handled);
359}
360
361static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
362{
363 struct ata_port *ap = qc->ap;
364
365 /* ATA IRQ doesn't wait for DMA transfer completion and vice
366 * versa. Mask IRQ selectively to detect command completion.
367 * Without it, ATA DMA read command can cause data corruption.
368 *
369 * Something similar might be needed for ATAPI writes. I
370 * tried a lot of combinations but couldn't find the solution.
371 */
372 if (qc->tf.protocol == ATA_PROT_DMA &&
373 !(qc->tf.flags & ATA_TFLAG_WRITE))
374 inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
375 else
376 inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
377
378 /* Issuing a command to yet uninitialized port locks up the
379 * controller. Most of the time, this happens for the first
380 * command after reset which are ATA and ATAPI IDENTIFYs.
381 * Fast fail if stat is 0x7f or 0xff for those commands.
382 */
383 if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
384 qc->tf.command == ATA_CMD_ID_ATAPI)) {
385 u8 stat = ata_chk_status(ap);
386 if (stat == 0x7f || stat == 0xff)
387 return AC_ERR_HSM;
388 }
389
390 return ata_qc_issue_prot(qc);
391}
392
393static void inic_freeze(struct ata_port *ap)
394{
395 void __iomem *port_base = inic_port_base(ap);
396
397 __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE);
398
399 ata_chk_status(ap);
400 writeb(0xff, port_base + PORT_IRQ_STAT);
401
402 readb(port_base + PORT_IRQ_STAT); /* flush */
403}
404
405static void inic_thaw(struct ata_port *ap)
406{
407 void __iomem *port_base = inic_port_base(ap);
408
409 ata_chk_status(ap);
410 writeb(0xff, port_base + PORT_IRQ_STAT);
411
412 __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);
413
414 readb(port_base + PORT_IRQ_STAT); /* flush */
415}
416
417/*
418 * SRST and SControl hardreset don't give valid signature on this
419 * controller. Only controller specific hardreset mechanism works.
420 */
421static int inic_hardreset(struct ata_port *ap, unsigned int *class)
422{
423 void __iomem *port_base = inic_port_base(ap);
424 void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
425 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
426 u16 val;
427 int rc;
428
429 /* hammer it into sane state */
430 inic_reset_port(port_base);
431
432 val = readw(idma_ctl);
433 writew(val | IDMA_CTL_RST_ATA, idma_ctl);
434 readw(idma_ctl); /* flush */
435 msleep(1);
436 writew(val & ~IDMA_CTL_RST_ATA, idma_ctl);
437
438 rc = sata_phy_resume(ap, timing);
439 if (rc) {
440 ata_port_printk(ap, KERN_WARNING, "failed to resume "
441 "link after reset (errno=%d)\n", rc);
442 return rc;
443 }
444
445 *class = ATA_DEV_NONE;
446 if (ata_port_online(ap)) {
447 struct ata_taskfile tf;
448
449 /* wait a while before checking status */
450 msleep(150);
451
452 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
453 ata_port_printk(ap, KERN_WARNING,
454 "device busy after hardreset\n");
455 return -EIO;
456 }
457
458 ata_tf_read(ap, &tf);
459 *class = ata_dev_classify(&tf);
460 if (*class == ATA_DEV_UNKNOWN)
461 *class = ATA_DEV_NONE;
462 }
463
464 return 0;
465}
466
467static void inic_error_handler(struct ata_port *ap)
468{
469 void __iomem *port_base = inic_port_base(ap);
470 struct inic_port_priv *pp = ap->private_data;
471 unsigned long flags;
472
473 /* reset PIO HSM and stop DMA engine */
474 inic_reset_port(port_base);
475
476 spin_lock_irqsave(ap->lock, flags);
477 ap->hsm_task_state = HSM_ST_IDLE;
478 writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL);
479 spin_unlock_irqrestore(ap->lock, flags);
480
481 /* PIO and DMA engines have been stopped, perform recovery */
482 ata_do_eh(ap, ata_std_prereset, NULL, inic_hardreset,
483 ata_std_postreset);
484}
485
486static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
487{
488 /* make DMA engine forget about the failed command */
489 if (qc->err_mask)
490 inic_reset_port(inic_port_base(qc->ap));
491}
492
493static void inic_dev_config(struct ata_port *ap, struct ata_device *dev)
494{
495 /* inic can only handle upto LBA28 max sectors */
496 if (dev->max_sectors > ATA_MAX_SECTORS)
497 dev->max_sectors = ATA_MAX_SECTORS;
498}
499
500static void init_port(struct ata_port *ap)
501{
502 void __iomem *port_base = inic_port_base(ap);
503
504 /* Setup PRD address */
505 writel(ap->prd_dma, port_base + PORT_PRD_ADDR);
506}
507
508static int inic_port_resume(struct ata_port *ap)
509{
510 init_port(ap);
511 return 0;
512}
513
514static int inic_port_start(struct ata_port *ap)
515{
516 void __iomem *port_base = inic_port_base(ap);
517 struct inic_port_priv *pp;
518 u8 tmp;
519 int rc;
520
521 /* alloc and initialize private data */
522 pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL);
523 if (!pp)
524 return -ENOMEM;
525 ap->private_data = pp;
526
527 /* default PRD_CTL value, DMAEN, WR and START off */
528 tmp = readb(port_base + PORT_PRD_CTL);
529 tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START);
530 pp->dfl_prdctl = tmp;
531
532 /* Alloc resources */
533 rc = ata_port_start(ap);
534 if (rc) {
535 kfree(pp);
536 return rc;
537 }
538
539 init_port(ap);
540
541 return 0;
542}
543
544static struct ata_port_operations inic_port_ops = {
545 .port_disable = ata_port_disable,
546 .tf_load = ata_tf_load,
547 .tf_read = ata_tf_read,
548 .check_status = ata_check_status,
549 .exec_command = ata_exec_command,
550 .dev_select = ata_std_dev_select,
551
552 .scr_read = inic_scr_read,
553 .scr_write = inic_scr_write,
554
555 .bmdma_setup = inic_bmdma_setup,
556 .bmdma_start = inic_bmdma_start,
557 .bmdma_stop = inic_bmdma_stop,
558 .bmdma_status = inic_bmdma_status,
559
560 .irq_handler = inic_interrupt,
561 .irq_clear = inic_irq_clear,
562 .irq_on = ata_irq_on,
563 .irq_ack = ata_irq_ack,
564
565 .qc_prep = ata_qc_prep,
566 .qc_issue = inic_qc_issue,
567 .data_xfer = ata_data_xfer,
568
569 .freeze = inic_freeze,
570 .thaw = inic_thaw,
571 .error_handler = inic_error_handler,
572 .post_internal_cmd = inic_post_internal_cmd,
573 .dev_config = inic_dev_config,
574
575 .port_resume = inic_port_resume,
576
577 .port_start = inic_port_start,
578};
579
580static struct ata_port_info inic_port_info = {
581 .sht = &inic_sht,
582 /* For some reason, ATA_PROT_ATAPI is broken on this
583 * controller, and no, PIO_POLLING does't fix it. It somehow
584 * manages to report the wrong ireason and ignoring ireason
585 * results in machine lock up. Tell libata to always prefer
586 * DMA.
587 */
588 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
589 .pio_mask = 0x1f, /* pio0-4 */
590 .mwdma_mask = 0x07, /* mwdma0-2 */
591 .udma_mask = 0x7f, /* udma0-6 */
592 .port_ops = &inic_port_ops
593};
594
595static int init_controller(void __iomem *mmio_base, u16 hctl)
596{
597 int i;
598 u16 val;
599
600 hctl &= ~HCTL_KNOWN_BITS;
601
602 /* Soft reset whole controller. Spec says reset duration is 3
603 * PCI clocks, be generous and give it 10ms.
604 */
605 writew(hctl | HCTL_SOFTRST, mmio_base + HOST_CTL);
606 readw(mmio_base + HOST_CTL); /* flush */
607
608 for (i = 0; i < 10; i++) {
609 msleep(1);
610 val = readw(mmio_base + HOST_CTL);
611 if (!(val & HCTL_SOFTRST))
612 break;
613 }
614
615 if (val & HCTL_SOFTRST)
616 return -EIO;
617
618 /* mask all interrupts and reset ports */
619 for (i = 0; i < NR_PORTS; i++) {
620 void __iomem *port_base = mmio_base + i * PORT_SIZE;
621
622 writeb(0xff, port_base + PORT_IRQ_MASK);
623 inic_reset_port(port_base);
624 }
625
626 /* port IRQ is masked now, unmask global IRQ */
627 writew(hctl & ~HCTL_IRQOFF, mmio_base + HOST_CTL);
628 val = readw(mmio_base + HOST_IRQ_MASK);
629 val &= ~(HIRQ_PORT0 | HIRQ_PORT1);
630 writew(val, mmio_base + HOST_IRQ_MASK);
631
632 return 0;
633}
634
635static int inic_pci_device_resume(struct pci_dev *pdev)
636{
637 struct ata_host *host = dev_get_drvdata(&pdev->dev);
638 struct inic_host_priv *hpriv = host->private_data;
639 void __iomem *mmio_base = host->iomap[MMIO_BAR];
640 int rc;
641
642 ata_pci_device_do_resume(pdev);
643
644 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
645 printk("XXX\n");
646 rc = init_controller(mmio_base, hpriv->cached_hctl);
647 if (rc)
648 return rc;
649 }
650
651 ata_host_resume(host);
652
653 return 0;
654}
655
656static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
657{
658 static int printed_version;
659 struct ata_port_info *pinfo = &inic_port_info;
660 struct ata_probe_ent *probe_ent;
661 struct inic_host_priv *hpriv;
662 void __iomem * const *iomap;
663 int i, rc;
664
665 if (!printed_version++)
666 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
667
668 rc = pcim_enable_device(pdev);
669 if (rc)
670 return rc;
671
672 rc = pci_request_regions(pdev, DRV_NAME);
673 if (rc)
674 return rc;
675
676 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
677 if (rc)
678 return rc;
679 iomap = pcim_iomap_table(pdev);
680
681 /* Set dma_mask. This devices doesn't support 64bit addressing. */
682 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
683 if (rc) {
684 dev_printk(KERN_ERR, &pdev->dev,
685 "32-bit DMA enable failed\n");
686 return rc;
687 }
688
689 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
690 if (rc) {
691 dev_printk(KERN_ERR, &pdev->dev,
692 "32-bit consistent DMA enable failed\n");
693 return rc;
694 }
695
696 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
697 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
698 if (!probe_ent || !hpriv)
699 return -ENOMEM;
700
701 probe_ent->dev = &pdev->dev;
702 INIT_LIST_HEAD(&probe_ent->node);
703
704 probe_ent->sht = pinfo->sht;
705 probe_ent->port_flags = pinfo->flags;
706 probe_ent->pio_mask = pinfo->pio_mask;
707 probe_ent->mwdma_mask = pinfo->mwdma_mask;
708 probe_ent->udma_mask = pinfo->udma_mask;
709 probe_ent->port_ops = pinfo->port_ops;
710 probe_ent->n_ports = NR_PORTS;
711
712 probe_ent->irq = pdev->irq;
713 probe_ent->irq_flags = SA_SHIRQ;
714
715 probe_ent->iomap = iomap;
716
717 for (i = 0; i < NR_PORTS; i++) {
718 struct ata_ioports *port = &probe_ent->port[i];
719 void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE;
720
721 port->cmd_addr = iomap[2 * i];
722 port->altstatus_addr =
723 port->ctl_addr = (void __iomem *)
724 ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
725 port->scr_addr = port_base + PORT_SCR;
726
727 ata_std_ports(port);
728 }
729
730 probe_ent->private_data = hpriv;
731 hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
732
733 rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
734 if (rc) {
735 dev_printk(KERN_ERR, &pdev->dev,
736 "failed to initialize controller\n");
737 return rc;
738 }
739
740 pci_set_master(pdev);
741
742 if (!ata_device_add(probe_ent))
743 return -ENODEV;
744
745 devm_kfree(&pdev->dev, probe_ent);
746
747 return 0;
748}
749
750static const struct pci_device_id inic_pci_tbl[] = {
751 { PCI_VDEVICE(INIT, 0x1622), },
752 { },
753};
754
755static struct pci_driver inic_pci_driver = {
756 .name = DRV_NAME,
757 .id_table = inic_pci_tbl,
758 .suspend = ata_pci_device_suspend,
759 .resume = inic_pci_device_resume,
760 .probe = inic_init_one,
761 .remove = ata_pci_remove_one,
762};
763
764static int __init inic_init(void)
765{
766 return pci_register_driver(&inic_pci_driver);
767}
768
769static void __exit inic_exit(void)
770{
771 pci_unregister_driver(&inic_pci_driver);
772}
773
774MODULE_AUTHOR("Tejun Heo");
775MODULE_DESCRIPTION("low-level driver for Initio 162x SATA");
776MODULE_LICENSE("GPL v2");
777MODULE_DEVICE_TABLE(pci, inic_pci_tbl);
778MODULE_VERSION(DRV_VERSION);
779
780module_init(inic_init);
781module_exit(inic_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index aae0b5201c1e..769eca52442c 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -34,7 +34,6 @@
34#include <scsi/scsi_host.h> 34#include <scsi/scsi_host.h>
35#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
36#include <linux/libata.h> 36#include <linux/libata.h>
37#include <asm/io.h>
38 37
39#define DRV_NAME "sata_mv" 38#define DRV_NAME "sata_mv"
40#define DRV_VERSION "0.7" 39#define DRV_VERSION "0.7"
@@ -342,7 +341,6 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 341static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap); 342static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep); 343static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host *host);
346static int mv_port_start(struct ata_port *ap); 344static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap); 345static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc); 346static void mv_qc_prep(struct ata_queued_cmd *qc);
@@ -406,19 +404,20 @@ static const struct ata_port_operations mv5_ops = {
406 404
407 .qc_prep = mv_qc_prep, 405 .qc_prep = mv_qc_prep,
408 .qc_issue = mv_qc_issue, 406 .qc_issue = mv_qc_issue,
409 .data_xfer = ata_mmio_data_xfer, 407 .data_xfer = ata_data_xfer,
410 408
411 .eng_timeout = mv_eng_timeout, 409 .eng_timeout = mv_eng_timeout,
412 410
413 .irq_handler = mv_interrupt, 411 .irq_handler = mv_interrupt,
414 .irq_clear = mv_irq_clear, 412 .irq_clear = mv_irq_clear,
413 .irq_on = ata_irq_on,
414 .irq_ack = ata_irq_ack,
415 415
416 .scr_read = mv5_scr_read, 416 .scr_read = mv5_scr_read,
417 .scr_write = mv5_scr_write, 417 .scr_write = mv5_scr_write,
418 418
419 .port_start = mv_port_start, 419 .port_start = mv_port_start,
420 .port_stop = mv_port_stop, 420 .port_stop = mv_port_stop,
421 .host_stop = mv_host_stop,
422}; 421};
423 422
424static const struct ata_port_operations mv6_ops = { 423static const struct ata_port_operations mv6_ops = {
@@ -434,19 +433,20 @@ static const struct ata_port_operations mv6_ops = {
434 433
435 .qc_prep = mv_qc_prep, 434 .qc_prep = mv_qc_prep,
436 .qc_issue = mv_qc_issue, 435 .qc_issue = mv_qc_issue,
437 .data_xfer = ata_mmio_data_xfer, 436 .data_xfer = ata_data_xfer,
438 437
439 .eng_timeout = mv_eng_timeout, 438 .eng_timeout = mv_eng_timeout,
440 439
441 .irq_handler = mv_interrupt, 440 .irq_handler = mv_interrupt,
442 .irq_clear = mv_irq_clear, 441 .irq_clear = mv_irq_clear,
442 .irq_on = ata_irq_on,
443 .irq_ack = ata_irq_ack,
443 444
444 .scr_read = mv_scr_read, 445 .scr_read = mv_scr_read,
445 .scr_write = mv_scr_write, 446 .scr_write = mv_scr_write,
446 447
447 .port_start = mv_port_start, 448 .port_start = mv_port_start,
448 .port_stop = mv_port_stop, 449 .port_stop = mv_port_stop,
449 .host_stop = mv_host_stop,
450}; 450};
451 451
452static const struct ata_port_operations mv_iie_ops = { 452static const struct ata_port_operations mv_iie_ops = {
@@ -462,19 +462,20 @@ static const struct ata_port_operations mv_iie_ops = {
462 462
463 .qc_prep = mv_qc_prep_iie, 463 .qc_prep = mv_qc_prep_iie,
464 .qc_issue = mv_qc_issue, 464 .qc_issue = mv_qc_issue,
465 .data_xfer = ata_mmio_data_xfer, 465 .data_xfer = ata_data_xfer,
466 466
467 .eng_timeout = mv_eng_timeout, 467 .eng_timeout = mv_eng_timeout,
468 468
469 .irq_handler = mv_interrupt, 469 .irq_handler = mv_interrupt,
470 .irq_clear = mv_irq_clear, 470 .irq_clear = mv_irq_clear,
471 .irq_on = ata_irq_on,
472 .irq_ack = ata_irq_ack,
471 473
472 .scr_read = mv_scr_read, 474 .scr_read = mv_scr_read,
473 .scr_write = mv_scr_write, 475 .scr_write = mv_scr_write,
474 476
475 .port_start = mv_port_start, 477 .port_start = mv_port_start,
476 .port_stop = mv_port_stop, 478 .port_stop = mv_port_stop,
477 .host_stop = mv_host_stop,
478}; 479};
479 480
480static const struct ata_port_info mv_port_info[] = { 481static const struct ata_port_info mv_port_info[] = {
@@ -620,7 +621,7 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
620 621
621static inline void __iomem *mv_ap_base(struct ata_port *ap) 622static inline void __iomem *mv_ap_base(struct ata_port *ap)
622{ 623{
623 return mv_port_base(ap->host->mmio_base, ap->port_no); 624 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
624} 625}
625 626
626static inline int mv_get_hc_count(unsigned long port_flags) 627static inline int mv_get_hc_count(unsigned long port_flags)
@@ -809,35 +810,6 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
809 } 810 }
810} 811}
811 812
812/**
813 * mv_host_stop - Host specific cleanup/stop routine.
814 * @host: host data structure
815 *
816 * Disable ints, cleanup host memory, call general purpose
817 * host_stop.
818 *
819 * LOCKING:
820 * Inherited from caller.
821 */
822static void mv_host_stop(struct ata_host *host)
823{
824 struct mv_host_priv *hpriv = host->private_data;
825 struct pci_dev *pdev = to_pci_dev(host->dev);
826
827 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
828 pci_disable_msi(pdev);
829 } else {
830 pci_intx(pdev, 0);
831 }
832 kfree(hpriv);
833 ata_host_stop(host);
834}
835
836static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
837{
838 dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
839}
840
841static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) 813static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
842{ 814{
843 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 815 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
@@ -883,22 +855,21 @@ static int mv_port_start(struct ata_port *ap)
883 void __iomem *port_mmio = mv_ap_base(ap); 855 void __iomem *port_mmio = mv_ap_base(ap);
884 void *mem; 856 void *mem;
885 dma_addr_t mem_dma; 857 dma_addr_t mem_dma;
886 int rc = -ENOMEM; 858 int rc;
887 859
888 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 860 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
889 if (!pp) 861 if (!pp)
890 goto err_out; 862 return -ENOMEM;
891 memset(pp, 0, sizeof(*pp));
892 863
893 mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, 864 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
894 GFP_KERNEL); 865 GFP_KERNEL);
895 if (!mem) 866 if (!mem)
896 goto err_out_pp; 867 return -ENOMEM;
897 memset(mem, 0, MV_PORT_PRIV_DMA_SZ); 868 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
898 869
899 rc = ata_pad_alloc(ap, dev); 870 rc = ata_pad_alloc(ap, dev);
900 if (rc) 871 if (rc)
901 goto err_out_priv; 872 return rc;
902 873
903 /* First item in chunk of DMA memory: 874 /* First item in chunk of DMA memory:
904 * 32-slot command request table (CRQB), 32 bytes each in size 875 * 32-slot command request table (CRQB), 32 bytes each in size
@@ -951,13 +922,6 @@ static int mv_port_start(struct ata_port *ap)
951 */ 922 */
952 ap->private_data = pp; 923 ap->private_data = pp;
953 return 0; 924 return 0;
954
955err_out_priv:
956 mv_priv_free(pp, dev);
957err_out_pp:
958 kfree(pp);
959err_out:
960 return rc;
961} 925}
962 926
963/** 927/**
@@ -971,18 +935,11 @@ err_out:
971 */ 935 */
972static void mv_port_stop(struct ata_port *ap) 936static void mv_port_stop(struct ata_port *ap)
973{ 937{
974 struct device *dev = ap->host->dev;
975 struct mv_port_priv *pp = ap->private_data;
976 unsigned long flags; 938 unsigned long flags;
977 939
978 spin_lock_irqsave(&ap->host->lock, flags); 940 spin_lock_irqsave(&ap->host->lock, flags);
979 mv_stop_dma(ap); 941 mv_stop_dma(ap);
980 spin_unlock_irqrestore(&ap->host->lock, flags); 942 spin_unlock_irqrestore(&ap->host->lock, flags);
981
982 ap->private_data = NULL;
983 ata_pad_free(ap, dev);
984 mv_priv_free(pp, dev);
985 kfree(pp);
986} 943}
987 944
988/** 945/**
@@ -1348,7 +1305,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1348 */ 1305 */
1349static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) 1306static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1350{ 1307{
1351 void __iomem *mmio = host->mmio_base; 1308 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1352 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1309 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1353 struct ata_queued_cmd *qc; 1310 struct ata_queued_cmd *qc;
1354 u32 hc_irq_cause; 1311 u32 hc_irq_cause;
@@ -1391,8 +1348,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1391 } else { 1348 } else {
1392 /* PIO: check for device (drive) interrupt */ 1349 /* PIO: check for device (drive) interrupt */
1393 if ((DEV_IRQ << hard_port) & hc_irq_cause) { 1350 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1394 ata_status = readb((void __iomem *) 1351 ata_status = readb(ap->ioaddr.status_addr);
1395 ap->ioaddr.status_addr);
1396 handled = 1; 1352 handled = 1;
1397 /* ignore spurious intr if drive still BUSY */ 1353 /* ignore spurious intr if drive still BUSY */
1398 if (ata_status & ATA_BUSY) { 1354 if (ata_status & ATA_BUSY) {
@@ -1452,7 +1408,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1452{ 1408{
1453 struct ata_host *host = dev_instance; 1409 struct ata_host *host = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs; 1410 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host->mmio_base; 1411 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1456 struct mv_host_priv *hpriv; 1412 struct mv_host_priv *hpriv;
1457 u32 irq_stat; 1413 u32 irq_stat;
1458 1414
@@ -1528,22 +1484,24 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1528 1484
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1485static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{ 1486{
1531 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1487 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1488 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1489 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533 1490
1534 if (ofs != 0xffffffffU) 1491 if (ofs != 0xffffffffU)
1535 return readl(mmio + ofs); 1492 return readl(addr + ofs);
1536 else 1493 else
1537 return (u32) ofs; 1494 return (u32) ofs;
1538} 1495}
1539 1496
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1497static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{ 1498{
1542 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1499 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1500 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1501 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544 1502
1545 if (ofs != 0xffffffffU) 1503 if (ofs != 0xffffffffU)
1546 writelfl(val, mmio + ofs); 1504 writelfl(val, addr + ofs);
1547} 1505}
1548 1506
1549static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio) 1507static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
@@ -1905,7 +1863,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1905static void mv_stop_and_reset(struct ata_port *ap) 1863static void mv_stop_and_reset(struct ata_port *ap)
1906{ 1864{
1907 struct mv_host_priv *hpriv = ap->host->private_data; 1865 struct mv_host_priv *hpriv = ap->host->private_data;
1908 void __iomem *mmio = ap->host->mmio_base; 1866 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1909 1867
1910 mv_stop_dma(ap); 1868 mv_stop_dma(ap);
1911 1869
@@ -2003,10 +1961,10 @@ comreset_retry:
2003 break; 1961 break;
2004 } 1962 }
2005 1963
2006 tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); 1964 tf.lbah = readb(ap->ioaddr.lbah_addr);
2007 tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); 1965 tf.lbam = readb(ap->ioaddr.lbam_addr);
2008 tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr); 1966 tf.lbal = readb(ap->ioaddr.lbal_addr);
2009 tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); 1967 tf.nsect = readb(ap->ioaddr.nsect_addr);
2010 1968
2011 dev->class = ata_dev_classify(&tf); 1969 dev->class = ata_dev_classify(&tf);
2012 if (!ata_dev_enabled(dev)) { 1970 if (!ata_dev_enabled(dev)) {
@@ -2038,17 +1996,17 @@ static void mv_phy_reset(struct ata_port *ap)
2038 */ 1996 */
2039static void mv_eng_timeout(struct ata_port *ap) 1997static void mv_eng_timeout(struct ata_port *ap)
2040{ 1998{
1999 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2041 struct ata_queued_cmd *qc; 2000 struct ata_queued_cmd *qc;
2042 unsigned long flags; 2001 unsigned long flags;
2043 2002
2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); 2003 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2045 DPRINTK("All regs @ start of eng_timeout\n"); 2004 DPRINTK("All regs @ start of eng_timeout\n");
2046 mv_dump_all_regs(ap->host->mmio_base, ap->port_no, 2005 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2047 to_pci_dev(ap->host->dev));
2048 2006
2049 qc = ata_qc_from_tag(ap, ap->active_tag); 2007 qc = ata_qc_from_tag(ap, ap->active_tag);
2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 2008 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2051 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); 2009 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2052 2010
2053 spin_lock_irqsave(&ap->host->lock, flags); 2011 spin_lock_irqsave(&ap->host->lock, flags);
2054 mv_err_intr(ap, 0); 2012 mv_err_intr(ap, 0);
@@ -2076,7 +2034,7 @@ static void mv_eng_timeout(struct ata_port *ap)
2076 */ 2034 */
2077static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 2035static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2078{ 2036{
2079 unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; 2037 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2080 unsigned serr_ofs; 2038 unsigned serr_ofs;
2081 2039
2082 /* PIO related setup 2040 /* PIO related setup
@@ -2224,7 +2182,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2224 unsigned int board_idx) 2182 unsigned int board_idx)
2225{ 2183{
2226 int rc = 0, n_hc, port, hc; 2184 int rc = 0, n_hc, port, hc;
2227 void __iomem *mmio = probe_ent->mmio_base; 2185 void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR];
2228 struct mv_host_priv *hpriv = probe_ent->private_data; 2186 struct mv_host_priv *hpriv = probe_ent->private_data;
2229 2187
2230 /* global interrupt mask */ 2188 /* global interrupt mask */
@@ -2342,49 +2300,36 @@ static void mv_print_info(struct ata_probe_ent *probe_ent)
2342static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2300static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2343{ 2301{
2344 static int printed_version = 0; 2302 static int printed_version = 0;
2345 struct ata_probe_ent *probe_ent = NULL; 2303 struct device *dev = &pdev->dev;
2304 struct ata_probe_ent *probe_ent;
2346 struct mv_host_priv *hpriv; 2305 struct mv_host_priv *hpriv;
2347 unsigned int board_idx = (unsigned int)ent->driver_data; 2306 unsigned int board_idx = (unsigned int)ent->driver_data;
2348 void __iomem *mmio_base; 2307 int rc;
2349 int pci_dev_busy = 0, rc;
2350 2308
2351 if (!printed_version++) 2309 if (!printed_version++)
2352 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 2310 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2353 2311
2354 rc = pci_enable_device(pdev); 2312 rc = pcim_enable_device(pdev);
2355 if (rc) { 2313 if (rc)
2356 return rc; 2314 return rc;
2357 }
2358 pci_set_master(pdev); 2315 pci_set_master(pdev);
2359 2316
2360 rc = pci_request_regions(pdev, DRV_NAME); 2317 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2361 if (rc) { 2318 if (rc == -EBUSY)
2362 pci_dev_busy = 1; 2319 pcim_pin_device(pdev);
2363 goto err_out; 2320 if (rc)
2364 } 2321 return rc;
2365 2322
2366 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 2323 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
2367 if (probe_ent == NULL) { 2324 if (probe_ent == NULL)
2368 rc = -ENOMEM; 2325 return -ENOMEM;
2369 goto err_out_regions;
2370 }
2371 2326
2372 memset(probe_ent, 0, sizeof(*probe_ent));
2373 probe_ent->dev = pci_dev_to_dev(pdev); 2327 probe_ent->dev = pci_dev_to_dev(pdev);
2374 INIT_LIST_HEAD(&probe_ent->node); 2328 INIT_LIST_HEAD(&probe_ent->node);
2375 2329
2376 mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); 2330 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2377 if (mmio_base == NULL) { 2331 if (!hpriv)
2378 rc = -ENOMEM; 2332 return -ENOMEM;
2379 goto err_out_free_ent;
2380 }
2381
2382 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
2383 if (!hpriv) {
2384 rc = -ENOMEM;
2385 goto err_out_iounmap;
2386 }
2387 memset(hpriv, 0, sizeof(*hpriv));
2388 2333
2389 probe_ent->sht = mv_port_info[board_idx].sht; 2334 probe_ent->sht = mv_port_info[board_idx].sht;
2390 probe_ent->port_flags = mv_port_info[board_idx].flags; 2335 probe_ent->port_flags = mv_port_info[board_idx].flags;
@@ -2394,53 +2339,26 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2394 2339
2395 probe_ent->irq = pdev->irq; 2340 probe_ent->irq = pdev->irq;
2396 probe_ent->irq_flags = IRQF_SHARED; 2341 probe_ent->irq_flags = IRQF_SHARED;
2397 probe_ent->mmio_base = mmio_base; 2342 probe_ent->iomap = pcim_iomap_table(pdev);
2398 probe_ent->private_data = hpriv; 2343 probe_ent->private_data = hpriv;
2399 2344
2400 /* initialize adapter */ 2345 /* initialize adapter */
2401 rc = mv_init_host(pdev, probe_ent, board_idx); 2346 rc = mv_init_host(pdev, probe_ent, board_idx);
2402 if (rc) { 2347 if (rc)
2403 goto err_out_hpriv; 2348 return rc;
2404 }
2405 2349
2406 /* Enable interrupts */ 2350 /* Enable interrupts */
2407 if (msi && pci_enable_msi(pdev) == 0) { 2351 if (msi && !pci_enable_msi(pdev))
2408 hpriv->hp_flags |= MV_HP_FLAG_MSI;
2409 } else {
2410 pci_intx(pdev, 1); 2352 pci_intx(pdev, 1);
2411 }
2412 2353
2413 mv_dump_pci_cfg(pdev, 0x68); 2354 mv_dump_pci_cfg(pdev, 0x68);
2414 mv_print_info(probe_ent); 2355 mv_print_info(probe_ent);
2415 2356
2416 if (ata_device_add(probe_ent) == 0) { 2357 if (ata_device_add(probe_ent) == 0)
2417 rc = -ENODEV; /* No devices discovered */ 2358 return -ENODEV;
2418 goto err_out_dev_add;
2419 }
2420 2359
2421 kfree(probe_ent); 2360 devm_kfree(dev, probe_ent);
2422 return 0; 2361 return 0;
2423
2424err_out_dev_add:
2425 if (MV_HP_FLAG_MSI & hpriv->hp_flags) {
2426 pci_disable_msi(pdev);
2427 } else {
2428 pci_intx(pdev, 0);
2429 }
2430err_out_hpriv:
2431 kfree(hpriv);
2432err_out_iounmap:
2433 pci_iounmap(pdev, mmio_base);
2434err_out_free_ent:
2435 kfree(probe_ent);
2436err_out_regions:
2437 pci_release_regions(pdev);
2438err_out:
2439 if (!pci_dev_busy) {
2440 pci_disable_device(pdev);
2441 }
2442
2443 return rc;
2444} 2362}
2445 2363
2446static int __init mv_init(void) 2364static int __init mv_init(void)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f7a963eb1f02..095ef1b2cd0e 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -49,11 +49,13 @@
49#include <linux/libata.h> 49#include <linux/libata.h>
50 50
51#define DRV_NAME "sata_nv" 51#define DRV_NAME "sata_nv"
52#define DRV_VERSION "3.2" 52#define DRV_VERSION "3.3"
53 53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL 54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55 55
56enum { 56enum {
57 NV_MMIO_BAR = 5,
58
57 NV_PORTS = 2, 59 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f, 60 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07, 61 NV_MWDMA_MASK = 0x07,
@@ -213,12 +215,21 @@ struct nv_adma_port_priv {
213 dma_addr_t cpb_dma; 215 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd; 216 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma; 217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
216 u8 flags; 221 u8 flags;
217}; 222};
218 223
224struct nv_host_priv {
225 unsigned long type;
226};
227
219#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) 228#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
220 229
221static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 230static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231static void nv_remove_one (struct pci_dev *pdev);
232static int nv_pci_device_resume(struct pci_dev *pdev);
222static void nv_ck804_host_stop(struct ata_host *host); 233static void nv_ck804_host_stop(struct ata_host *host);
223static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 234static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 235static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
@@ -239,6 +250,8 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239static void nv_adma_irq_clear(struct ata_port *ap); 250static void nv_adma_irq_clear(struct ata_port *ap);
240static int nv_adma_port_start(struct ata_port *ap); 251static int nv_adma_port_start(struct ata_port *ap);
241static void nv_adma_port_stop(struct ata_port *ap); 252static void nv_adma_port_stop(struct ata_port *ap);
253static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254static int nv_adma_port_resume(struct ata_port *ap);
242static void nv_adma_error_handler(struct ata_port *ap); 255static void nv_adma_error_handler(struct ata_port *ap);
243static void nv_adma_host_stop(struct ata_host *host); 256static void nv_adma_host_stop(struct ata_host *host);
244static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc); 257static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
@@ -284,7 +297,9 @@ static struct pci_driver nv_pci_driver = {
284 .name = DRV_NAME, 297 .name = DRV_NAME,
285 .id_table = nv_pci_tbl, 298 .id_table = nv_pci_tbl,
286 .probe = nv_init_one, 299 .probe = nv_init_one,
287 .remove = ata_pci_remove_one, 300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
288}; 303};
289 304
290static struct scsi_host_template nv_sht = { 305static struct scsi_host_template nv_sht = {
@@ -303,6 +318,8 @@ static struct scsi_host_template nv_sht = {
303 .slave_configure = ata_scsi_slave_config, 318 .slave_configure = ata_scsi_slave_config,
304 .slave_destroy = ata_scsi_slave_destroy, 319 .slave_destroy = ata_scsi_slave_destroy,
305 .bios_param = ata_std_bios_param, 320 .bios_param = ata_std_bios_param,
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
306}; 323};
307 324
308static struct scsi_host_template nv_adma_sht = { 325static struct scsi_host_template nv_adma_sht = {
@@ -321,6 +338,8 @@ static struct scsi_host_template nv_adma_sht = {
321 .slave_configure = nv_adma_slave_config, 338 .slave_configure = nv_adma_slave_config,
322 .slave_destroy = ata_scsi_slave_destroy, 339 .slave_destroy = ata_scsi_slave_destroy,
323 .bios_param = ata_std_bios_param, 340 .bios_param = ata_std_bios_param,
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
324}; 343};
325 344
326static const struct ata_port_operations nv_generic_ops = { 345static const struct ata_port_operations nv_generic_ops = {
@@ -340,14 +359,14 @@ static const struct ata_port_operations nv_generic_ops = {
340 .thaw = ata_bmdma_thaw, 359 .thaw = ata_bmdma_thaw,
341 .error_handler = nv_error_handler, 360 .error_handler = nv_error_handler,
342 .post_internal_cmd = ata_bmdma_post_internal_cmd, 361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
343 .data_xfer = ata_pio_data_xfer, 362 .data_xfer = ata_data_xfer,
344 .irq_handler = nv_generic_interrupt, 363 .irq_handler = nv_generic_interrupt,
345 .irq_clear = ata_bmdma_irq_clear, 364 .irq_clear = ata_bmdma_irq_clear,
365 .irq_on = ata_irq_on,
366 .irq_ack = ata_irq_ack,
346 .scr_read = nv_scr_read, 367 .scr_read = nv_scr_read,
347 .scr_write = nv_scr_write, 368 .scr_write = nv_scr_write,
348 .port_start = ata_port_start, 369 .port_start = ata_port_start,
349 .port_stop = ata_port_stop,
350 .host_stop = ata_pci_host_stop,
351}; 370};
352 371
353static const struct ata_port_operations nv_nf2_ops = { 372static const struct ata_port_operations nv_nf2_ops = {
@@ -367,14 +386,14 @@ static const struct ata_port_operations nv_nf2_ops = {
367 .thaw = nv_nf2_thaw, 386 .thaw = nv_nf2_thaw,
368 .error_handler = nv_error_handler, 387 .error_handler = nv_error_handler,
369 .post_internal_cmd = ata_bmdma_post_internal_cmd, 388 .post_internal_cmd = ata_bmdma_post_internal_cmd,
370 .data_xfer = ata_pio_data_xfer, 389 .data_xfer = ata_data_xfer,
371 .irq_handler = nv_nf2_interrupt, 390 .irq_handler = nv_nf2_interrupt,
372 .irq_clear = ata_bmdma_irq_clear, 391 .irq_clear = ata_bmdma_irq_clear,
392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
373 .scr_read = nv_scr_read, 394 .scr_read = nv_scr_read,
374 .scr_write = nv_scr_write, 395 .scr_write = nv_scr_write,
375 .port_start = ata_port_start, 396 .port_start = ata_port_start,
376 .port_stop = ata_port_stop,
377 .host_stop = ata_pci_host_stop,
378}; 397};
379 398
380static const struct ata_port_operations nv_ck804_ops = { 399static const struct ata_port_operations nv_ck804_ops = {
@@ -394,13 +413,14 @@ static const struct ata_port_operations nv_ck804_ops = {
394 .thaw = nv_ck804_thaw, 413 .thaw = nv_ck804_thaw,
395 .error_handler = nv_error_handler, 414 .error_handler = nv_error_handler,
396 .post_internal_cmd = ata_bmdma_post_internal_cmd, 415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
397 .data_xfer = ata_pio_data_xfer, 416 .data_xfer = ata_data_xfer,
398 .irq_handler = nv_ck804_interrupt, 417 .irq_handler = nv_ck804_interrupt,
399 .irq_clear = ata_bmdma_irq_clear, 418 .irq_clear = ata_bmdma_irq_clear,
419 .irq_on = ata_irq_on,
420 .irq_ack = ata_irq_ack,
400 .scr_read = nv_scr_read, 421 .scr_read = nv_scr_read,
401 .scr_write = nv_scr_write, 422 .scr_write = nv_scr_write,
402 .port_start = ata_port_start, 423 .port_start = ata_port_start,
403 .port_stop = ata_port_stop,
404 .host_stop = nv_ck804_host_stop, 424 .host_stop = nv_ck804_host_stop,
405}; 425};
406 426
@@ -422,13 +442,17 @@ static const struct ata_port_operations nv_adma_ops = {
422 .thaw = nv_ck804_thaw, 442 .thaw = nv_ck804_thaw,
423 .error_handler = nv_adma_error_handler, 443 .error_handler = nv_adma_error_handler,
424 .post_internal_cmd = nv_adma_bmdma_stop, 444 .post_internal_cmd = nv_adma_bmdma_stop,
425 .data_xfer = ata_mmio_data_xfer, 445 .data_xfer = ata_data_xfer,
426 .irq_handler = nv_adma_interrupt, 446 .irq_handler = nv_adma_interrupt,
427 .irq_clear = nv_adma_irq_clear, 447 .irq_clear = nv_adma_irq_clear,
448 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
428 .scr_read = nv_scr_read, 450 .scr_read = nv_scr_read,
429 .scr_write = nv_scr_write, 451 .scr_write = nv_scr_write,
430 .port_start = nv_adma_port_start, 452 .port_start = nv_adma_port_start,
431 .port_stop = nv_adma_port_stop, 453 .port_stop = nv_adma_port_stop,
454 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume,
432 .host_stop = nv_adma_host_stop, 456 .host_stop = nv_adma_host_stop,
433}; 457};
434 458
@@ -467,6 +491,7 @@ static struct ata_port_info nv_port_info[] = {
467 { 491 {
468 .sht = &nv_adma_sht, 492 .sht = &nv_adma_sht,
469 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
494 ATA_FLAG_HRST_TO_RESUME |
470 ATA_FLAG_MMIO | ATA_FLAG_NCQ, 495 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
471 .pio_mask = NV_PIO_MASK, 496 .pio_mask = NV_PIO_MASK,
472 .mwdma_mask = NV_MWDMA_MASK, 497 .mwdma_mask = NV_MWDMA_MASK,
@@ -483,57 +508,72 @@ MODULE_VERSION(DRV_VERSION);
483 508
484static int adma_enabled = 1; 509static int adma_enabled = 1;
485 510
486static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
487 unsigned int port_no)
488{
489 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
490 return mmio;
491}
492
493static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
494{
495 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
496}
497
498static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
499{
500 return (ap->host->mmio_base + NV_ADMA_GEN);
501}
502
503static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
504{
505 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
506}
507
508static void nv_adma_register_mode(struct ata_port *ap) 511static void nv_adma_register_mode(struct ata_port *ap)
509{ 512{
510 void __iomem *mmio = nv_adma_ctl_block(ap);
511 struct nv_adma_port_priv *pp = ap->private_data; 513 struct nv_adma_port_priv *pp = ap->private_data;
512 u16 tmp; 514 void __iomem *mmio = pp->ctl_block;
515 u16 tmp, status;
516 int count = 0;
513 517
514 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) 518 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
515 return; 519 return;
516 520
521 status = readw(mmio + NV_ADMA_STAT);
522 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
523 ndelay(50);
524 status = readw(mmio + NV_ADMA_STAT);
525 count++;
526 }
527 if(count == 20)
528 ata_port_printk(ap, KERN_WARNING,
529 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
530 status);
531
517 tmp = readw(mmio + NV_ADMA_CTL); 532 tmp = readw(mmio + NV_ADMA_CTL);
518 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 533 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
519 534
535 count = 0;
536 status = readw(mmio + NV_ADMA_STAT);
537 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
538 ndelay(50);
539 status = readw(mmio + NV_ADMA_STAT);
540 count++;
541 }
542 if(count == 20)
543 ata_port_printk(ap, KERN_WARNING,
544 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
545 status);
546
520 pp->flags |= NV_ADMA_PORT_REGISTER_MODE; 547 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
521} 548}
522 549
523static void nv_adma_mode(struct ata_port *ap) 550static void nv_adma_mode(struct ata_port *ap)
524{ 551{
525 void __iomem *mmio = nv_adma_ctl_block(ap);
526 struct nv_adma_port_priv *pp = ap->private_data; 552 struct nv_adma_port_priv *pp = ap->private_data;
527 u16 tmp; 553 void __iomem *mmio = pp->ctl_block;
554 u16 tmp, status;
555 int count = 0;
528 556
529 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) 557 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
530 return; 558 return;
531 559
532 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); 560 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
533 561
534 tmp = readw(mmio + NV_ADMA_CTL); 562 tmp = readw(mmio + NV_ADMA_CTL);
535 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 563 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
536 564
565 status = readw(mmio + NV_ADMA_STAT);
566 while(((status & NV_ADMA_STAT_LEGACY) ||
567 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
568 ndelay(50);
569 status = readw(mmio + NV_ADMA_STAT);
570 count++;
571 }
572 if(count == 20)
573 ata_port_printk(ap, KERN_WARNING,
574 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
575 status);
576
537 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; 577 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
538} 578}
539 579
@@ -568,7 +608,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
568 /* Subtract 1 since an extra entry may be needed for padding, see 608 /* Subtract 1 since an extra entry may be needed for padding, see
569 libata-scsi.c */ 609 libata-scsi.c */
570 sg_tablesize = LIBATA_MAX_PRD - 1; 610 sg_tablesize = LIBATA_MAX_PRD - 1;
571 611
572 /* Since the legacy DMA engine is in use, we need to disable ADMA 612 /* Since the legacy DMA engine is in use, we need to disable ADMA
573 on the port. */ 613 on the port. */
574 adma_enable = 0; 614 adma_enable = 0;
@@ -580,7 +620,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
580 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; 620 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
581 adma_enable = 1; 621 adma_enable = 1;
582 } 622 }
583 623
584 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg); 624 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
585 625
586 if(ap->port_no == 1) 626 if(ap->port_no == 1)
@@ -589,7 +629,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
589 else 629 else
590 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | 630 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
591 NV_MCP_SATA_CFG_20_PORT0_PWB_EN; 631 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
592 632
593 if(adma_enable) { 633 if(adma_enable) {
594 new_reg = current_reg | config_mask; 634 new_reg = current_reg | config_mask;
595 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; 635 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
@@ -598,10 +638,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
598 new_reg = current_reg & ~config_mask; 638 new_reg = current_reg & ~config_mask;
599 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; 639 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
600 } 640 }
601 641
602 if(current_reg != new_reg) 642 if(current_reg != new_reg)
603 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); 643 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
604 644
605 blk_queue_bounce_limit(sdev->request_queue, bounce_limit); 645 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
606 blk_queue_segment_boundary(sdev->request_queue, segment_boundary); 646 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
607 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); 647 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
@@ -648,53 +688,62 @@ static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
648 return idx; 688 return idx;
649} 689}
650 690
651static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) 691static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
652{ 692{
653 struct nv_adma_port_priv *pp = ap->private_data; 693 struct nv_adma_port_priv *pp = ap->private_data;
654 int complete = 0, have_err = 0;
655 u8 flags = pp->cpb[cpb_num].resp_flags; 694 u8 flags = pp->cpb[cpb_num].resp_flags;
656 695
657 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); 696 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
658 697
659 if (flags & NV_CPB_RESP_DONE) { 698 if (unlikely((force_err ||
660 VPRINTK("CPB flags done, flags=0x%x\n", flags); 699 flags & (NV_CPB_RESP_ATA_ERR |
661 complete = 1; 700 NV_CPB_RESP_CMD_ERR |
662 } 701 NV_CPB_RESP_CPB_ERR)))) {
663 if (flags & NV_CPB_RESP_ATA_ERR) { 702 struct ata_eh_info *ehi = &ap->eh_info;
664 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags); 703 int freeze = 0;
665 have_err = 1; 704
666 complete = 1; 705 ata_ehi_clear_desc(ehi);
667 } 706 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
668 if (flags & NV_CPB_RESP_CMD_ERR) { 707 if (flags & NV_CPB_RESP_ATA_ERR) {
669 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags); 708 ata_ehi_push_desc(ehi, ": ATA error");
670 have_err = 1; 709 ehi->err_mask |= AC_ERR_DEV;
671 complete = 1; 710 } else if (flags & NV_CPB_RESP_CMD_ERR) {
672 } 711 ata_ehi_push_desc(ehi, ": CMD error");
673 if (flags & NV_CPB_RESP_CPB_ERR) { 712 ehi->err_mask |= AC_ERR_DEV;
674 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags); 713 } else if (flags & NV_CPB_RESP_CPB_ERR) {
675 have_err = 1; 714 ata_ehi_push_desc(ehi, ": CPB error");
676 complete = 1; 715 ehi->err_mask |= AC_ERR_SYSTEM;
716 freeze = 1;
717 } else {
718 /* notifier error, but no error in CPB flags? */
719 ehi->err_mask |= AC_ERR_OTHER;
720 freeze = 1;
721 }
722 /* Kill all commands. EH will determine what actually failed. */
723 if (freeze)
724 ata_port_freeze(ap);
725 else
726 ata_port_abort(ap);
727 return 1;
677 } 728 }
678 if(complete || force_err) 729
679 { 730 if (flags & NV_CPB_RESP_DONE) {
680 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); 731 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
681 if(likely(qc)) { 732 VPRINTK("CPB flags done, flags=0x%x\n", flags);
682 u8 ata_status = 0; 733 if (likely(qc)) {
683 /* Only use the ATA port status for non-NCQ commands. 734 /* Grab the ATA port status for non-NCQ commands.
684 For NCQ commands the current status may have nothing to do with 735 For NCQ commands the current status may have nothing to do with
685 the command just completed. */ 736 the command just completed. */
686 if(qc->tf.protocol != ATA_PROT_NCQ) 737 if (qc->tf.protocol != ATA_PROT_NCQ) {
687 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4)); 738 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
688 739 qc->err_mask |= ac_err_mask(ata_status);
689 if(have_err || force_err) 740 }
690 ata_status |= ATA_ERR;
691
692 qc->err_mask |= ac_err_mask(ata_status);
693 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num, 741 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
694 qc->err_mask); 742 qc->err_mask);
695 ata_qc_complete(qc); 743 ata_qc_complete(qc);
696 } 744 }
697 } 745 }
746 return 0;
698} 747}
699 748
700static int nv_host_intr(struct ata_port *ap, u8 irq_stat) 749static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
@@ -735,15 +784,14 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
735 784
736 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 785 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
737 struct nv_adma_port_priv *pp = ap->private_data; 786 struct nv_adma_port_priv *pp = ap->private_data;
738 void __iomem *mmio = nv_adma_ctl_block(ap); 787 void __iomem *mmio = pp->ctl_block;
739 u16 status; 788 u16 status;
740 u32 gen_ctl; 789 u32 gen_ctl;
741 int have_global_err = 0;
742 u32 notifier, notifier_error; 790 u32 notifier, notifier_error;
743 791
744 /* if in ATA register mode, use standard ata interrupt handler */ 792 /* if in ATA register mode, use standard ata interrupt handler */
745 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 793 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
746 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804) 794 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
747 >> (NV_INT_PORT_SHIFT * i); 795 >> (NV_INT_PORT_SHIFT * i);
748 if(ata_tag_valid(ap->active_tag)) 796 if(ata_tag_valid(ap->active_tag))
749 /** NV_INT_DEV indication seems unreliable at times 797 /** NV_INT_DEV indication seems unreliable at times
@@ -758,7 +806,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
758 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 806 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
759 notifier_clears[i] = notifier | notifier_error; 807 notifier_clears[i] = notifier | notifier_error;
760 808
761 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL); 809 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
762 810
763 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 811 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
764 !notifier_error) 812 !notifier_error)
@@ -774,52 +822,60 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
774 readw(mmio + NV_ADMA_STAT); /* flush posted write */ 822 readw(mmio + NV_ADMA_STAT); /* flush posted write */
775 rmb(); 823 rmb();
776 824
777 /* freeze if hotplugged */ 825 handled++; /* irq handled if we got here */
778 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) { 826
779 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n"); 827 /* freeze if hotplugged or controller error */
828 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
829 NV_ADMA_STAT_HOTUNPLUG |
830 NV_ADMA_STAT_TIMEOUT))) {
831 struct ata_eh_info *ehi = &ap->eh_info;
832
833 ata_ehi_clear_desc(ehi);
834 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
835 if (status & NV_ADMA_STAT_TIMEOUT) {
836 ehi->err_mask |= AC_ERR_SYSTEM;
837 ata_ehi_push_desc(ehi, ": timeout");
838 } else if (status & NV_ADMA_STAT_HOTPLUG) {
839 ata_ehi_hotplugged(ehi);
840 ata_ehi_push_desc(ehi, ": hotplug");
841 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
842 ata_ehi_hotplugged(ehi);
843 ata_ehi_push_desc(ehi, ": hot unplug");
844 }
780 ata_port_freeze(ap); 845 ata_port_freeze(ap);
781 handled++;
782 continue; 846 continue;
783 } 847 }
784 848
785 if (status & NV_ADMA_STAT_TIMEOUT) { 849 if (status & (NV_ADMA_STAT_DONE |
786 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status); 850 NV_ADMA_STAT_CPBERR)) {
787 have_global_err = 1;
788 }
789 if (status & NV_ADMA_STAT_CPBERR) {
790 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
791 have_global_err = 1;
792 }
793 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
794 /** Check CPBs for completed commands */ 851 /** Check CPBs for completed commands */
795 852
796 if(ata_tag_valid(ap->active_tag)) 853 if (ata_tag_valid(ap->active_tag)) {
797 /* Non-NCQ command */ 854 /* Non-NCQ command */
798 nv_adma_check_cpb(ap, ap->active_tag, have_global_err || 855 nv_adma_check_cpb(ap, ap->active_tag,
799 (notifier_error & (1 << ap->active_tag))); 856 notifier_error & (1 << ap->active_tag));
800 else { 857 } else {
801 int pos; 858 int pos, error = 0;
802 u32 active = ap->sactive; 859 u32 active = ap->sactive;
803 while( (pos = ffs(active)) ) { 860
861 while ((pos = ffs(active)) && !error) {
804 pos--; 862 pos--;
805 nv_adma_check_cpb(ap, pos, have_global_err || 863 error = nv_adma_check_cpb(ap, pos,
806 (notifier_error & (1 << pos)) ); 864 notifier_error & (1 << pos) );
807 active &= ~(1 << pos ); 865 active &= ~(1 << pos );
808 } 866 }
809 } 867 }
810 } 868 }
811
812 handled++; /* irq handled if we got here */
813 } 869 }
814 } 870 }
815 871
816 if(notifier_clears[0] || notifier_clears[1]) { 872 if(notifier_clears[0] || notifier_clears[1]) {
817 /* Note: Both notifier clear registers must be written 873 /* Note: Both notifier clear registers must be written
818 if either is set, even if one is zero, according to NVIDIA. */ 874 if either is set, even if one is zero, according to NVIDIA. */
819 writel(notifier_clears[0], 875 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
820 nv_adma_notifier_clear_block(host->ports[0])); 876 writel(notifier_clears[0], pp->notifier_clear_block);
821 writel(notifier_clears[1], 877 pp = host->ports[1]->private_data;
822 nv_adma_notifier_clear_block(host->ports[1])); 878 writel(notifier_clears[1], pp->notifier_clear_block);
823 } 879 }
824 880
825 spin_unlock(&host->lock); 881 spin_unlock(&host->lock);
@@ -829,19 +885,20 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
829 885
830static void nv_adma_irq_clear(struct ata_port *ap) 886static void nv_adma_irq_clear(struct ata_port *ap)
831{ 887{
832 void __iomem *mmio = nv_adma_ctl_block(ap); 888 struct nv_adma_port_priv *pp = ap->private_data;
889 void __iomem *mmio = pp->ctl_block;
833 u16 status = readw(mmio + NV_ADMA_STAT); 890 u16 status = readw(mmio + NV_ADMA_STAT);
834 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); 891 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
835 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); 892 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
836 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; 893 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
837 894
838 /* clear ADMA status */ 895 /* clear ADMA status */
839 writew(status, mmio + NV_ADMA_STAT); 896 writew(status, mmio + NV_ADMA_STAT);
840 writel(notifier | notifier_error, 897 writel(notifier | notifier_error,
841 nv_adma_notifier_clear_block(ap)); 898 pp->notifier_clear_block);
842 899
843 /** clear legacy status */ 900 /** clear legacy status */
844 outb(inb(dma_stat_addr), dma_stat_addr); 901 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
845} 902}
846 903
847static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) 904static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
@@ -857,15 +914,15 @@ static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
857 } 914 }
858 915
859 /* load PRD table addr. */ 916 /* load PRD table addr. */
860 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 917 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
861 918
862 /* specify data direction, triple-check start bit is clear */ 919 /* specify data direction, triple-check start bit is clear */
863 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 920 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
864 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 921 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
865 if (!rw) 922 if (!rw)
866 dmactl |= ATA_DMA_WR; 923 dmactl |= ATA_DMA_WR;
867 924
868 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 925 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
869 926
870 /* issue r/w command */ 927 /* issue r/w command */
871 ata_exec_command(ap, &qc->tf); 928 ata_exec_command(ap, &qc->tf);
@@ -883,9 +940,9 @@ static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
883 } 940 }
884 941
885 /* start host DMA transaction */ 942 /* start host DMA transaction */
886 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 943 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
887 outb(dmactl | ATA_DMA_START, 944 iowrite8(dmactl | ATA_DMA_START,
888 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 945 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
889} 946}
890 947
891static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc) 948static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
@@ -897,8 +954,8 @@ static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
897 return; 954 return;
898 955
899 /* clear start/stop bit */ 956 /* clear start/stop bit */
900 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, 957 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
901 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 958 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
902 959
903 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 960 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
904 ata_altstatus(ap); /* dummy read */ 961 ata_altstatus(ap); /* dummy read */
@@ -910,7 +967,7 @@ static u8 nv_adma_bmdma_status(struct ata_port *ap)
910 967
911 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)); 968 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
912 969
913 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 970 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
914} 971}
915 972
916static int nv_adma_port_start(struct ata_port *ap) 973static int nv_adma_port_start(struct ata_port *ap)
@@ -920,7 +977,7 @@ static int nv_adma_port_start(struct ata_port *ap)
920 int rc; 977 int rc;
921 void *mem; 978 void *mem;
922 dma_addr_t mem_dma; 979 dma_addr_t mem_dma;
923 void __iomem *mmio = nv_adma_ctl_block(ap); 980 void __iomem *mmio;
924 u16 tmp; 981 u16 tmp;
925 982
926 VPRINTK("ENTER\n"); 983 VPRINTK("ENTER\n");
@@ -929,19 +986,21 @@ static int nv_adma_port_start(struct ata_port *ap)
929 if (rc) 986 if (rc)
930 return rc; 987 return rc;
931 988
932 pp = kzalloc(sizeof(*pp), GFP_KERNEL); 989 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
933 if (!pp) { 990 if (!pp)
934 rc = -ENOMEM; 991 return -ENOMEM;
935 goto err_out; 992
936 } 993 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
937 994 ap->port_no * NV_ADMA_PORT_SIZE;
938 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, 995 pp->ctl_block = mmio;
939 &mem_dma, GFP_KERNEL); 996 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
940 997 pp->notifier_clear_block = pp->gen_block +
941 if (!mem) { 998 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
942 rc = -ENOMEM; 999
943 goto err_out_kfree; 1000 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
944 } 1001 &mem_dma, GFP_KERNEL);
1002 if (!mem)
1003 return -ENOMEM;
945 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ); 1004 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
946 1005
947 /* 1006 /*
@@ -975,9 +1034,9 @@ static int nv_adma_port_start(struct ata_port *ap)
975 /* clear CPB fetch count */ 1034 /* clear CPB fetch count */
976 writew(0, mmio + NV_ADMA_CPB_COUNT); 1035 writew(0, mmio + NV_ADMA_CPB_COUNT);
977 1036
978 /* clear GO for register mode */ 1037 /* clear GO for register mode, enable interrupt */
979 tmp = readw(mmio + NV_ADMA_CTL); 1038 tmp = readw(mmio + NV_ADMA_CTL);
980 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 1039 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
981 1040
982 tmp = readw(mmio + NV_ADMA_CTL); 1041 tmp = readw(mmio + NV_ADMA_CTL);
983 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); 1042 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
@@ -987,53 +1046,89 @@ static int nv_adma_port_start(struct ata_port *ap)
987 readl( mmio + NV_ADMA_CTL ); /* flush posted write */ 1046 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
988 1047
989 return 0; 1048 return 0;
990
991err_out_kfree:
992 kfree(pp);
993err_out:
994 ata_port_stop(ap);
995 return rc;
996} 1049}
997 1050
998static void nv_adma_port_stop(struct ata_port *ap) 1051static void nv_adma_port_stop(struct ata_port *ap)
999{ 1052{
1000 struct device *dev = ap->host->dev;
1001 struct nv_adma_port_priv *pp = ap->private_data; 1053 struct nv_adma_port_priv *pp = ap->private_data;
1002 void __iomem *mmio = nv_adma_ctl_block(ap); 1054 void __iomem *mmio = pp->ctl_block;
1003 1055
1004 VPRINTK("ENTER\n"); 1056 VPRINTK("ENTER\n");
1057 writew(0, mmio + NV_ADMA_CTL);
1058}
1059
1060static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1061{
1062 struct nv_adma_port_priv *pp = ap->private_data;
1063 void __iomem *mmio = pp->ctl_block;
1005 1064
1065 /* Go to register mode - clears GO */
1066 nv_adma_register_mode(ap);
1067
1068 /* clear CPB fetch count */
1069 writew(0, mmio + NV_ADMA_CPB_COUNT);
1070
1071 /* disable interrupt, shut down port */
1006 writew(0, mmio + NV_ADMA_CTL); 1072 writew(0, mmio + NV_ADMA_CTL);
1007 1073
1008 ap->private_data = NULL; 1074 return 0;
1009 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1010 kfree(pp);
1011 ata_port_stop(ap);
1012} 1075}
1013 1076
1077static int nv_adma_port_resume(struct ata_port *ap)
1078{
1079 struct nv_adma_port_priv *pp = ap->private_data;
1080 void __iomem *mmio = pp->ctl_block;
1081 u16 tmp;
1082
1083 /* set CPB block location */
1084 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1085 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1086
1087 /* clear any outstanding interrupt conditions */
1088 writew(0xffff, mmio + NV_ADMA_STAT);
1089
1090 /* initialize port variables */
1091 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1092
1093 /* clear CPB fetch count */
1094 writew(0, mmio + NV_ADMA_CPB_COUNT);
1095
1096 /* clear GO for register mode, enable interrupt */
1097 tmp = readw(mmio + NV_ADMA_CTL);
1098 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1099
1100 tmp = readw(mmio + NV_ADMA_CTL);
1101 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1102 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1103 udelay(1);
1104 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1105 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1106
1107 return 0;
1108}
1014 1109
1015static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port) 1110static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1016{ 1111{
1017 void __iomem *mmio = probe_ent->mmio_base; 1112 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1018 struct ata_ioports *ioport = &probe_ent->port[port]; 1113 struct ata_ioports *ioport = &probe_ent->port[port];
1019 1114
1020 VPRINTK("ENTER\n"); 1115 VPRINTK("ENTER\n");
1021 1116
1022 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE; 1117 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1023 1118
1024 ioport->cmd_addr = (unsigned long) mmio; 1119 ioport->cmd_addr = mmio;
1025 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4); 1120 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1026 ioport->error_addr = 1121 ioport->error_addr =
1027 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4); 1122 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1028 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4); 1123 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1029 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4); 1124 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1030 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4); 1125 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1031 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4); 1126 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1032 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4); 1127 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1033 ioport->status_addr = 1128 ioport->status_addr =
1034 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4); 1129 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1035 ioport->altstatus_addr = 1130 ioport->altstatus_addr =
1036 ioport->ctl_addr = (unsigned long) mmio + 0x20; 1131 ioport->ctl_addr = mmio + 0x20;
1037} 1132}
1038 1133
1039static int nv_adma_host_init(struct ata_probe_ent *probe_ent) 1134static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
@@ -1056,15 +1151,6 @@ static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1056 for (i = 0; i < probe_ent->n_ports; i++) 1151 for (i = 0; i < probe_ent->n_ports; i++)
1057 nv_adma_setup_port(probe_ent, i); 1152 nv_adma_setup_port(probe_ent, i);
1058 1153
1059 for (i = 0; i < probe_ent->n_ports; i++) {
1060 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1061 u16 tmp;
1062
1063 /* enable interrupt, clear reset if not already clear */
1064 tmp = readw(mmio + NV_ADMA_CTL);
1065 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1066 }
1067
1068 return 0; 1154 return 0;
1069} 1155}
1070 1156
@@ -1110,18 +1196,31 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1110 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); 1196 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1111} 1197}
1112 1198
1199static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1200{
1201 struct nv_adma_port_priv *pp = qc->ap->private_data;
1202
1203 /* ADMA engine can only be used for non-ATAPI DMA commands,
1204 or interrupt-driven no-data commands. */
1205 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1206 (qc->tf.flags & ATA_TFLAG_POLLING))
1207 return 1;
1208
1209 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1210 (qc->tf.protocol == ATA_PROT_NODATA))
1211 return 0;
1212
1213 return 1;
1214}
1215
1113static void nv_adma_qc_prep(struct ata_queued_cmd *qc) 1216static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1114{ 1217{
1115 struct nv_adma_port_priv *pp = qc->ap->private_data; 1218 struct nv_adma_port_priv *pp = qc->ap->private_data;
1116 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; 1219 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1117 u8 ctl_flags = NV_CPB_CTL_CPB_VALID | 1220 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1118 NV_CPB_CTL_APRD_VALID |
1119 NV_CPB_CTL_IEN; 1221 NV_CPB_CTL_IEN;
1120 1222
1121 VPRINTK("qc->flags = 0x%lx\n", qc->flags); 1223 if (nv_adma_use_reg_mode(qc)) {
1122
1123 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1124 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1125 nv_adma_register_mode(qc->ap); 1224 nv_adma_register_mode(qc->ap);
1126 ata_qc_prep(qc); 1225 ata_qc_prep(qc);
1127 return; 1226 return;
@@ -1137,9 +1236,15 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1137 if (qc->tf.protocol == ATA_PROT_NCQ) 1236 if (qc->tf.protocol == ATA_PROT_NCQ)
1138 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA; 1237 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1139 1238
1239 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1240
1140 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); 1241 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1141 1242
1142 nv_adma_fill_sg(qc, cpb); 1243 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1244 nv_adma_fill_sg(qc, cpb);
1245 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1246 } else
1247 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1143 1248
1144 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are 1249 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1145 finished filling in all of the contents */ 1250 finished filling in all of the contents */
@@ -1150,14 +1255,13 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1150static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) 1255static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1151{ 1256{
1152 struct nv_adma_port_priv *pp = qc->ap->private_data; 1257 struct nv_adma_port_priv *pp = qc->ap->private_data;
1153 void __iomem *mmio = nv_adma_ctl_block(qc->ap); 1258 void __iomem *mmio = pp->ctl_block;
1154 1259
1155 VPRINTK("ENTER\n"); 1260 VPRINTK("ENTER\n");
1156 1261
1157 if (!(qc->flags & ATA_QCFLAG_DMAMAP) || 1262 if (nv_adma_use_reg_mode(qc)) {
1158 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1159 /* use ATA register mode */ 1263 /* use ATA register mode */
1160 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags); 1264 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1161 nv_adma_register_mode(qc->ap); 1265 nv_adma_register_mode(qc->ap);
1162 return ata_qc_issue_prot(qc); 1266 return ata_qc_issue_prot(qc);
1163 } else 1267 } else
@@ -1229,7 +1333,7 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1229 irqreturn_t ret; 1333 irqreturn_t ret;
1230 1334
1231 spin_lock(&host->lock); 1335 spin_lock(&host->lock);
1232 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); 1336 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1233 ret = nv_do_interrupt(host, irq_stat); 1337 ret = nv_do_interrupt(host, irq_stat);
1234 spin_unlock(&host->lock); 1338 spin_unlock(&host->lock);
1235 1339
@@ -1243,7 +1347,7 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1243 irqreturn_t ret; 1347 irqreturn_t ret;
1244 1348
1245 spin_lock(&host->lock); 1349 spin_lock(&host->lock);
1246 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804); 1350 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1247 ret = nv_do_interrupt(host, irq_stat); 1351 ret = nv_do_interrupt(host, irq_stat);
1248 spin_unlock(&host->lock); 1352 spin_unlock(&host->lock);
1249 1353
@@ -1255,7 +1359,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1255 if (sc_reg > SCR_CONTROL) 1359 if (sc_reg > SCR_CONTROL)
1256 return 0xffffffffU; 1360 return 0xffffffffU;
1257 1361
1258 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); 1362 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1259} 1363}
1260 1364
1261static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 1365static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
@@ -1263,36 +1367,36 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1263 if (sc_reg > SCR_CONTROL) 1367 if (sc_reg > SCR_CONTROL)
1264 return; 1368 return;
1265 1369
1266 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); 1370 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1267} 1371}
1268 1372
1269static void nv_nf2_freeze(struct ata_port *ap) 1373static void nv_nf2_freeze(struct ata_port *ap)
1270{ 1374{
1271 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; 1375 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1272 int shift = ap->port_no * NV_INT_PORT_SHIFT; 1376 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1273 u8 mask; 1377 u8 mask;
1274 1378
1275 mask = inb(scr_addr + NV_INT_ENABLE); 1379 mask = ioread8(scr_addr + NV_INT_ENABLE);
1276 mask &= ~(NV_INT_ALL << shift); 1380 mask &= ~(NV_INT_ALL << shift);
1277 outb(mask, scr_addr + NV_INT_ENABLE); 1381 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1278} 1382}
1279 1383
1280static void nv_nf2_thaw(struct ata_port *ap) 1384static void nv_nf2_thaw(struct ata_port *ap)
1281{ 1385{
1282 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; 1386 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1283 int shift = ap->port_no * NV_INT_PORT_SHIFT; 1387 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1284 u8 mask; 1388 u8 mask;
1285 1389
1286 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS); 1390 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1287 1391
1288 mask = inb(scr_addr + NV_INT_ENABLE); 1392 mask = ioread8(scr_addr + NV_INT_ENABLE);
1289 mask |= (NV_INT_MASK << shift); 1393 mask |= (NV_INT_MASK << shift);
1290 outb(mask, scr_addr + NV_INT_ENABLE); 1394 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1291} 1395}
1292 1396
1293static void nv_ck804_freeze(struct ata_port *ap) 1397static void nv_ck804_freeze(struct ata_port *ap)
1294{ 1398{
1295 void __iomem *mmio_base = ap->host->mmio_base; 1399 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1296 int shift = ap->port_no * NV_INT_PORT_SHIFT; 1400 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1297 u8 mask; 1401 u8 mask;
1298 1402
@@ -1303,7 +1407,7 @@ static void nv_ck804_freeze(struct ata_port *ap)
1303 1407
1304static void nv_ck804_thaw(struct ata_port *ap) 1408static void nv_ck804_thaw(struct ata_port *ap)
1305{ 1409{
1306 void __iomem *mmio_base = ap->host->mmio_base; 1410 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1307 int shift = ap->port_no * NV_INT_PORT_SHIFT; 1411 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1308 u8 mask; 1412 u8 mask;
1309 1413
@@ -1335,32 +1439,13 @@ static void nv_adma_error_handler(struct ata_port *ap)
1335{ 1439{
1336 struct nv_adma_port_priv *pp = ap->private_data; 1440 struct nv_adma_port_priv *pp = ap->private_data;
1337 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { 1441 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1338 void __iomem *mmio = nv_adma_ctl_block(ap); 1442 void __iomem *mmio = pp->ctl_block;
1339 int i; 1443 int i;
1340 u16 tmp; 1444 u16 tmp;
1341 1445
1342 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1343 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1344 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1345 u32 status = readw(mmio + NV_ADMA_STAT);
1346
1347 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1348 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1349 notifier, notifier_error, gen_ctl, status);
1350
1351 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1352 struct nv_adma_cpb *cpb = &pp->cpb[i];
1353 if( cpb->ctl_flags || cpb->resp_flags )
1354 ata_port_printk(ap, KERN_ERR,
1355 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1356 i, cpb->ctl_flags, cpb->resp_flags);
1357 }
1358
1359 /* Push us back into port register mode for error handling. */ 1446 /* Push us back into port register mode for error handling. */
1360 nv_adma_register_mode(ap); 1447 nv_adma_register_mode(ap);
1361 1448
1362 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1363
1364 /* Mark all of the CPBs as invalid to prevent them from being executed */ 1449 /* Mark all of the CPBs as invalid to prevent them from being executed */
1365 for( i=0;i<NV_ADMA_MAX_CPBS;i++) 1450 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1366 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; 1451 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
@@ -1386,10 +1471,10 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1386 static int printed_version = 0; 1471 static int printed_version = 0;
1387 struct ata_port_info *ppi[2]; 1472 struct ata_port_info *ppi[2];
1388 struct ata_probe_ent *probe_ent; 1473 struct ata_probe_ent *probe_ent;
1389 int pci_dev_busy = 0; 1474 struct nv_host_priv *hpriv;
1390 int rc; 1475 int rc;
1391 u32 bar; 1476 u32 bar;
1392 unsigned long base; 1477 void __iomem *base;
1393 unsigned long type = ent->driver_data; 1478 unsigned long type = ent->driver_data;
1394 int mask_set = 0; 1479 int mask_set = 0;
1395 1480
@@ -1400,17 +1485,17 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1400 if (pci_resource_start(pdev, bar) == 0) 1485 if (pci_resource_start(pdev, bar) == 0)
1401 return -ENODEV; 1486 return -ENODEV;
1402 1487
1403 if ( !printed_version++) 1488 if (!printed_version++)
1404 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1489 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1405 1490
1406 rc = pci_enable_device(pdev); 1491 rc = pcim_enable_device(pdev);
1407 if (rc) 1492 if (rc)
1408 goto err_out; 1493 return rc;
1409 1494
1410 rc = pci_request_regions(pdev, DRV_NAME); 1495 rc = pci_request_regions(pdev, DRV_NAME);
1411 if (rc) { 1496 if (rc) {
1412 pci_dev_busy = 1; 1497 pcim_pin_device(pdev);
1413 goto err_out_disable; 1498 return rc;
1414 } 1499 }
1415 1500
1416 if(type >= CK804 && adma_enabled) { 1501 if(type >= CK804 && adma_enabled) {
@@ -1424,27 +1509,31 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1424 if(!mask_set) { 1509 if(!mask_set) {
1425 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1510 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1426 if (rc) 1511 if (rc)
1427 goto err_out_regions; 1512 return rc;
1428 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1513 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1429 if (rc) 1514 if (rc)
1430 goto err_out_regions; 1515 return rc;
1431 } 1516 }
1432 1517
1433 rc = -ENOMEM; 1518 rc = -ENOMEM;
1434 1519
1520 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1521 if (!hpriv)
1522 return -ENOMEM;
1523
1435 ppi[0] = ppi[1] = &nv_port_info[type]; 1524 ppi[0] = ppi[1] = &nv_port_info[type];
1436 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 1525 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1437 if (!probe_ent) 1526 if (!probe_ent)
1438 goto err_out_regions; 1527 return -ENOMEM;
1439 1528
1440 probe_ent->mmio_base = pci_iomap(pdev, 5, 0); 1529 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1441 if (!probe_ent->mmio_base) { 1530 return -EIO;
1442 rc = -EIO; 1531 probe_ent->iomap = pcim_iomap_table(pdev);
1443 goto err_out_free_ent;
1444 }
1445 1532
1446 base = (unsigned long)probe_ent->mmio_base; 1533 probe_ent->private_data = hpriv;
1534 hpriv->type = type;
1447 1535
1536 base = probe_ent->iomap[NV_MMIO_BAR];
1448 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET; 1537 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1449 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; 1538 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1450 1539
@@ -1462,28 +1551,72 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1462 if (type == ADMA) { 1551 if (type == ADMA) {
1463 rc = nv_adma_host_init(probe_ent); 1552 rc = nv_adma_host_init(probe_ent);
1464 if (rc) 1553 if (rc)
1465 goto err_out_iounmap; 1554 return rc;
1466 } 1555 }
1467 1556
1468 rc = ata_device_add(probe_ent); 1557 rc = ata_device_add(probe_ent);
1469 if (rc != NV_PORTS) 1558 if (rc != NV_PORTS)
1470 goto err_out_iounmap; 1559 return -ENODEV;
1471
1472 kfree(probe_ent);
1473 1560
1561 devm_kfree(&pdev->dev, probe_ent);
1474 return 0; 1562 return 0;
1563}
1475 1564
1476err_out_iounmap: 1565static void nv_remove_one (struct pci_dev *pdev)
1477 pci_iounmap(pdev, probe_ent->mmio_base); 1566{
1478err_out_free_ent: 1567 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1479 kfree(probe_ent); 1568 struct nv_host_priv *hpriv = host->private_data;
1480err_out_regions: 1569
1481 pci_release_regions(pdev); 1570 ata_pci_remove_one(pdev);
1482err_out_disable: 1571 kfree(hpriv);
1483 if (!pci_dev_busy) 1572}
1484 pci_disable_device(pdev); 1573
1485err_out: 1574static int nv_pci_device_resume(struct pci_dev *pdev)
1486 return rc; 1575{
1576 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1577 struct nv_host_priv *hpriv = host->private_data;
1578 int rc;
1579
1580 rc = ata_pci_device_do_resume(pdev);
1581 if(rc)
1582 return rc;
1583
1584 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1585 if(hpriv->type >= CK804) {
1586 u8 regval;
1587
1588 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1589 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1590 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1591 }
1592 if(hpriv->type == ADMA) {
1593 u32 tmp32;
1594 struct nv_adma_port_priv *pp;
1595 /* enable/disable ADMA on the ports appropriately */
1596 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1597
1598 pp = host->ports[0]->private_data;
1599 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1600 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1601 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1602 else
1603 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1604 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1605 pp = host->ports[1]->private_data;
1606 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1607 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1608 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1609 else
1610 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1611 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1612
1613 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1614 }
1615 }
1616
1617 ata_host_resume(host);
1618
1619 return 0;
1487} 1620}
1488 1621
1489static void nv_ck804_host_stop(struct ata_host *host) 1622static void nv_ck804_host_stop(struct ata_host *host)
@@ -1495,25 +1628,13 @@ static void nv_ck804_host_stop(struct ata_host *host)
1495 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval); 1628 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1496 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN; 1629 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1497 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); 1630 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1498
1499 ata_pci_host_stop(host);
1500} 1631}
1501 1632
1502static void nv_adma_host_stop(struct ata_host *host) 1633static void nv_adma_host_stop(struct ata_host *host)
1503{ 1634{
1504 struct pci_dev *pdev = to_pci_dev(host->dev); 1635 struct pci_dev *pdev = to_pci_dev(host->dev);
1505 int i;
1506 u32 tmp32; 1636 u32 tmp32;
1507 1637
1508 for (i = 0; i < host->n_ports; i++) {
1509 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1510 u16 tmp;
1511
1512 /* disable interrupt */
1513 tmp = readw(mmio + NV_ADMA_CTL);
1514 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1515 }
1516
1517 /* disable ADMA on the ports */ 1638 /* disable ADMA on the ports */
1518 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); 1639 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1519 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | 1640 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f055874a6ec5..3be4cc338d7b 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -39,10 +39,10 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41#include <linux/device.h> 41#include <linux/device.h>
42#include <scsi/scsi.h>
42#include <scsi/scsi_host.h> 43#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h> 44#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h> 45#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h" 46#include "sata_promise.h"
47 47
48#define DRV_NAME "sata_promise" 48#define DRV_NAME "sata_promise"
@@ -50,6 +50,17 @@
50 50
51 51
52enum { 52enum {
53 PDC_MMIO_BAR = 3,
54
55 /* register offsets */
56 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
57 PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
58 PDC_SECTOR_NUMBER = 0x0C, /* Sector number reg (per port) */
59 PDC_CYLINDER_LOW = 0x10, /* Cylinder low reg (per port) */
60 PDC_CYLINDER_HIGH = 0x14, /* Cylinder high reg (per port) */
61 PDC_DEVICE = 0x18, /* Device/Head reg (per port) */
62 PDC_COMMAND = 0x1C, /* Command/status reg (per port) */
63 PDC_ALTSTATUS = 0x38, /* Alternate-status/device-control reg (per port) */
53 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 64 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
54 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ 65 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
55 PDC_FLASH_CTL = 0x44, /* Flash control register */ 66 PDC_FLASH_CTL = 0x44, /* Flash control register */
@@ -71,13 +82,23 @@ enum {
71 82
72 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */ 83 PDC_HAS_PATA = (1 << 1), /* PDC20375/20575 has PATA */
73 84
85 /* Sequence counter control registers bit definitions */
86 PDC_SEQCNTRL_INT_MASK = (1 << 5), /* Sequence Interrupt Mask */
87
88 /* Feature register values */
89 PDC_FEATURE_ATAPI_PIO = 0x00, /* ATAPI data xfer by PIO */
90 PDC_FEATURE_ATAPI_DMA = 0x01, /* ATAPI data xfer by DMA */
91
92 /* Device/Head register values */
93 PDC_DEVICE_SATA = 0xE0, /* Device/Head value for SATA devices */
94
74 /* PDC_CTLSTAT bit definitions */ 95 /* PDC_CTLSTAT bit definitions */
75 PDC_DMA_ENABLE = (1 << 7), 96 PDC_DMA_ENABLE = (1 << 7),
76 PDC_IRQ_DISABLE = (1 << 10), 97 PDC_IRQ_DISABLE = (1 << 10),
77 PDC_RESET = (1 << 11), /* HDMA reset */ 98 PDC_RESET = (1 << 11), /* HDMA reset */
78 99
79 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY | 100 PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY |
80 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 101 ATA_FLAG_MMIO |
81 ATA_FLAG_PIO_POLLING, 102 ATA_FLAG_PIO_POLLING,
82 103
83 /* hp->flags bits */ 104 /* hp->flags bits */
@@ -92,6 +113,7 @@ struct pdc_port_priv {
92 113
93struct pdc_host_priv { 114struct pdc_host_priv {
94 unsigned long flags; 115 unsigned long flags;
116 unsigned long port_flags[ATA_MAX_PORTS];
95}; 117};
96 118
97static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); 119static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
@@ -100,14 +122,14 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
100static irqreturn_t pdc_interrupt (int irq, void *dev_instance); 122static irqreturn_t pdc_interrupt (int irq, void *dev_instance);
101static void pdc_eng_timeout(struct ata_port *ap); 123static void pdc_eng_timeout(struct ata_port *ap);
102static int pdc_port_start(struct ata_port *ap); 124static int pdc_port_start(struct ata_port *ap);
103static void pdc_port_stop(struct ata_port *ap);
104static void pdc_pata_phy_reset(struct ata_port *ap); 125static void pdc_pata_phy_reset(struct ata_port *ap);
105static void pdc_qc_prep(struct ata_queued_cmd *qc); 126static void pdc_qc_prep(struct ata_queued_cmd *qc);
106static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 127static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
107static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 128static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
129static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
130static int pdc_old_check_atapi_dma(struct ata_queued_cmd *qc);
108static void pdc_irq_clear(struct ata_port *ap); 131static void pdc_irq_clear(struct ata_port *ap);
109static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 132static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
110static void pdc_host_stop(struct ata_host *host);
111static void pdc_freeze(struct ata_port *ap); 133static void pdc_freeze(struct ata_port *ap);
112static void pdc_thaw(struct ata_port *ap); 134static void pdc_thaw(struct ata_port *ap);
113static void pdc_error_handler(struct ata_port *ap); 135static void pdc_error_handler(struct ata_port *ap);
@@ -139,6 +161,34 @@ static const struct ata_port_operations pdc_sata_ops = {
139 .check_status = ata_check_status, 161 .check_status = ata_check_status,
140 .exec_command = pdc_exec_command_mmio, 162 .exec_command = pdc_exec_command_mmio,
141 .dev_select = ata_std_dev_select, 163 .dev_select = ata_std_dev_select,
164 .check_atapi_dma = pdc_check_atapi_dma,
165
166 .qc_prep = pdc_qc_prep,
167 .qc_issue = pdc_qc_issue_prot,
168 .freeze = pdc_freeze,
169 .thaw = pdc_thaw,
170 .error_handler = pdc_error_handler,
171 .post_internal_cmd = pdc_post_internal_cmd,
172 .data_xfer = ata_data_xfer,
173 .irq_handler = pdc_interrupt,
174 .irq_clear = pdc_irq_clear,
175 .irq_on = ata_irq_on,
176 .irq_ack = ata_irq_ack,
177
178 .scr_read = pdc_sata_scr_read,
179 .scr_write = pdc_sata_scr_write,
180 .port_start = pdc_port_start,
181};
182
183/* First-generation chips need a more restrictive ->check_atapi_dma op */
184static const struct ata_port_operations pdc_old_sata_ops = {
185 .port_disable = ata_port_disable,
186 .tf_load = pdc_tf_load_mmio,
187 .tf_read = ata_tf_read,
188 .check_status = ata_check_status,
189 .exec_command = pdc_exec_command_mmio,
190 .dev_select = ata_std_dev_select,
191 .check_atapi_dma = pdc_old_check_atapi_dma,
142 192
143 .qc_prep = pdc_qc_prep, 193 .qc_prep = pdc_qc_prep,
144 .qc_issue = pdc_qc_issue_prot, 194 .qc_issue = pdc_qc_issue_prot,
@@ -146,15 +196,15 @@ static const struct ata_port_operations pdc_sata_ops = {
146 .thaw = pdc_thaw, 196 .thaw = pdc_thaw,
147 .error_handler = pdc_error_handler, 197 .error_handler = pdc_error_handler,
148 .post_internal_cmd = pdc_post_internal_cmd, 198 .post_internal_cmd = pdc_post_internal_cmd,
149 .data_xfer = ata_mmio_data_xfer, 199 .data_xfer = ata_data_xfer,
150 .irq_handler = pdc_interrupt, 200 .irq_handler = pdc_interrupt,
151 .irq_clear = pdc_irq_clear, 201 .irq_clear = pdc_irq_clear,
202 .irq_on = ata_irq_on,
203 .irq_ack = ata_irq_ack,
152 204
153 .scr_read = pdc_sata_scr_read, 205 .scr_read = pdc_sata_scr_read,
154 .scr_write = pdc_sata_scr_write, 206 .scr_write = pdc_sata_scr_write,
155 .port_start = pdc_port_start, 207 .port_start = pdc_port_start,
156 .port_stop = pdc_port_stop,
157 .host_stop = pdc_host_stop,
158}; 208};
159 209
160static const struct ata_port_operations pdc_pata_ops = { 210static const struct ata_port_operations pdc_pata_ops = {
@@ -164,30 +214,31 @@ static const struct ata_port_operations pdc_pata_ops = {
164 .check_status = ata_check_status, 214 .check_status = ata_check_status,
165 .exec_command = pdc_exec_command_mmio, 215 .exec_command = pdc_exec_command_mmio,
166 .dev_select = ata_std_dev_select, 216 .dev_select = ata_std_dev_select,
217 .check_atapi_dma = pdc_check_atapi_dma,
167 218
168 .phy_reset = pdc_pata_phy_reset, 219 .phy_reset = pdc_pata_phy_reset,
169 220
170 .qc_prep = pdc_qc_prep, 221 .qc_prep = pdc_qc_prep,
171 .qc_issue = pdc_qc_issue_prot, 222 .qc_issue = pdc_qc_issue_prot,
172 .data_xfer = ata_mmio_data_xfer, 223 .data_xfer = ata_data_xfer,
173 .eng_timeout = pdc_eng_timeout, 224 .eng_timeout = pdc_eng_timeout,
174 .irq_handler = pdc_interrupt, 225 .irq_handler = pdc_interrupt,
175 .irq_clear = pdc_irq_clear, 226 .irq_clear = pdc_irq_clear,
227 .irq_on = ata_irq_on,
228 .irq_ack = ata_irq_ack,
176 229
177 .port_start = pdc_port_start, 230 .port_start = pdc_port_start,
178 .port_stop = pdc_port_stop,
179 .host_stop = pdc_host_stop,
180}; 231};
181 232
182static const struct ata_port_info pdc_port_info[] = { 233static const struct ata_port_info pdc_port_info[] = {
183 /* board_2037x */ 234 /* board_2037x */
184 { 235 {
185 .sht = &pdc_ata_sht, 236 .sht = &pdc_ata_sht,
186 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 237 .flags = PDC_COMMON_FLAGS,
187 .pio_mask = 0x1f, /* pio0-4 */ 238 .pio_mask = 0x1f, /* pio0-4 */
188 .mwdma_mask = 0x07, /* mwdma0-2 */ 239 .mwdma_mask = 0x07, /* mwdma0-2 */
189 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 240 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
190 .port_ops = &pdc_sata_ops, 241 .port_ops = &pdc_old_sata_ops,
191 }, 242 },
192 243
193 /* board_20319 */ 244 /* board_20319 */
@@ -197,7 +248,7 @@ static const struct ata_port_info pdc_port_info[] = {
197 .pio_mask = 0x1f, /* pio0-4 */ 248 .pio_mask = 0x1f, /* pio0-4 */
198 .mwdma_mask = 0x07, /* mwdma0-2 */ 249 .mwdma_mask = 0x07, /* mwdma0-2 */
199 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 250 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
200 .port_ops = &pdc_sata_ops, 251 .port_ops = &pdc_old_sata_ops,
201 }, 252 },
202 253
203 /* board_20619 */ 254 /* board_20619 */
@@ -213,7 +264,7 @@ static const struct ata_port_info pdc_port_info[] = {
213 /* board_2057x */ 264 /* board_2057x */
214 { 265 {
215 .sht = &pdc_ata_sht, 266 .sht = &pdc_ata_sht,
216 .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 267 .flags = PDC_COMMON_FLAGS,
217 .pio_mask = 0x1f, /* pio0-4 */ 268 .pio_mask = 0x1f, /* pio0-4 */
218 .mwdma_mask = 0x07, /* mwdma0-2 */ 269 .mwdma_mask = 0x07, /* mwdma0-2 */
219 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ 270 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
@@ -271,21 +322,22 @@ static int pdc_port_start(struct ata_port *ap)
271 struct pdc_port_priv *pp; 322 struct pdc_port_priv *pp;
272 int rc; 323 int rc;
273 324
325 /* fix up port flags and cable type for SATA+PATA chips */
326 ap->flags |= hp->port_flags[ap->port_no];
327 if (ap->flags & ATA_FLAG_SATA)
328 ap->cbl = ATA_CBL_SATA;
329
274 rc = ata_port_start(ap); 330 rc = ata_port_start(ap);
275 if (rc) 331 if (rc)
276 return rc; 332 return rc;
277 333
278 pp = kzalloc(sizeof(*pp), GFP_KERNEL); 334 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
279 if (!pp) { 335 if (!pp)
280 rc = -ENOMEM; 336 return -ENOMEM;
281 goto err_out;
282 }
283 337
284 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 338 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
285 if (!pp->pkt) { 339 if (!pp->pkt)
286 rc = -ENOMEM; 340 return -ENOMEM;
287 goto err_out_kfree;
288 }
289 341
290 ap->private_data = pp; 342 ap->private_data = pp;
291 343
@@ -300,40 +352,11 @@ static int pdc_port_start(struct ata_port *ap)
300 } 352 }
301 353
302 return 0; 354 return 0;
303
304err_out_kfree:
305 kfree(pp);
306err_out:
307 ata_port_stop(ap);
308 return rc;
309}
310
311
312static void pdc_port_stop(struct ata_port *ap)
313{
314 struct device *dev = ap->host->dev;
315 struct pdc_port_priv *pp = ap->private_data;
316
317 ap->private_data = NULL;
318 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
319 kfree(pp);
320 ata_port_stop(ap);
321} 355}
322 356
323
324static void pdc_host_stop(struct ata_host *host)
325{
326 struct pdc_host_priv *hp = host->private_data;
327
328 ata_pci_host_stop(host);
329
330 kfree(hp);
331}
332
333
334static void pdc_reset_port(struct ata_port *ap) 357static void pdc_reset_port(struct ata_port *ap)
335{ 358{
336 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; 359 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
337 unsigned int i; 360 unsigned int i;
338 u32 tmp; 361 u32 tmp;
339 362
@@ -377,18 +400,102 @@ static void pdc_pata_phy_reset(struct ata_port *ap)
377 400
378static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) 401static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
379{ 402{
380 if (sc_reg > SCR_CONTROL) 403 if (sc_reg > SCR_CONTROL || ap->cbl != ATA_CBL_SATA)
381 return 0xffffffffU; 404 return 0xffffffffU;
382 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 405 return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
383} 406}
384 407
385 408
386static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, 409static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
387 u32 val) 410 u32 val)
388{ 411{
389 if (sc_reg > SCR_CONTROL) 412 if (sc_reg > SCR_CONTROL || ap->cbl != ATA_CBL_SATA)
390 return; 413 return;
391 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 414 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
415}
416
417static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
418{
419 struct ata_port *ap = qc->ap;
420 dma_addr_t sg_table = ap->prd_dma;
421 unsigned int cdb_len = qc->dev->cdb_len;
422 u8 *cdb = qc->cdb;
423 struct pdc_port_priv *pp = ap->private_data;
424 u8 *buf = pp->pkt;
425 u32 *buf32 = (u32 *) buf;
426 unsigned int dev_sel, feature, nbytes;
427
428 /* set control bits (byte 0), zero delay seq id (byte 3),
429 * and seq id (byte 2)
430 */
431 switch (qc->tf.protocol) {
432 case ATA_PROT_ATAPI_DMA:
433 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
434 buf32[0] = cpu_to_le32(PDC_PKT_READ);
435 else
436 buf32[0] = 0;
437 break;
438 case ATA_PROT_ATAPI_NODATA:
439 buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
440 break;
441 default:
442 BUG();
443 break;
444 }
445 buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */
446 buf32[2] = 0; /* no next-packet */
447
448 /* select drive */
449 if (sata_scr_valid(ap)) {
450 dev_sel = PDC_DEVICE_SATA;
451 } else {
452 dev_sel = ATA_DEVICE_OBS;
453 if (qc->dev->devno != 0)
454 dev_sel |= ATA_DEV1;
455 }
456 buf[12] = (1 << 5) | ATA_REG_DEVICE;
457 buf[13] = dev_sel;
458 buf[14] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_CLEAR_BSY;
459 buf[15] = dev_sel; /* once more, waiting for BSY to clear */
460
461 buf[16] = (1 << 5) | ATA_REG_NSECT;
462 buf[17] = 0x00;
463 buf[18] = (1 << 5) | ATA_REG_LBAL;
464 buf[19] = 0x00;
465
466 /* set feature and byte counter registers */
467 if (qc->tf.protocol != ATA_PROT_ATAPI_DMA) {
468 feature = PDC_FEATURE_ATAPI_PIO;
469 /* set byte counter register to real transfer byte count */
470 nbytes = qc->nbytes;
471 if (nbytes > 0xffff)
472 nbytes = 0xffff;
473 } else {
474 feature = PDC_FEATURE_ATAPI_DMA;
475 /* set byte counter register to 0 */
476 nbytes = 0;
477 }
478 buf[20] = (1 << 5) | ATA_REG_FEATURE;
479 buf[21] = feature;
480 buf[22] = (1 << 5) | ATA_REG_BYTEL;
481 buf[23] = nbytes & 0xFF;
482 buf[24] = (1 << 5) | ATA_REG_BYTEH;
483 buf[25] = (nbytes >> 8) & 0xFF;
484
485 /* send ATAPI packet command 0xA0 */
486 buf[26] = (1 << 5) | ATA_REG_CMD;
487 buf[27] = ATA_CMD_PACKET;
488
489 /* select drive and check DRQ */
490 buf[28] = (1 << 5) | ATA_REG_DEVICE | PDC_PKT_WAIT_DRDY;
491 buf[29] = dev_sel;
492
493 /* we can represent cdb lengths 2/4/6/8/10/12/14/16 */
494 BUG_ON(cdb_len & ~0x1E);
495
496 /* append the CDB as the final part */
497 buf[30] = (((cdb_len >> 1) & 7) << 5) | ATA_REG_DATA | PDC_LAST_REG;
498 memcpy(buf+31, cdb, cdb_len);
392} 499}
393 500
394static void pdc_qc_prep(struct ata_queued_cmd *qc) 501static void pdc_qc_prep(struct ata_queued_cmd *qc)
@@ -415,6 +522,17 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
415 pdc_pkt_footer(&qc->tf, pp->pkt, i); 522 pdc_pkt_footer(&qc->tf, pp->pkt, i);
416 break; 523 break;
417 524
525 case ATA_PROT_ATAPI:
526 ata_qc_prep(qc);
527 break;
528
529 case ATA_PROT_ATAPI_DMA:
530 ata_qc_prep(qc);
531 /*FALLTHROUGH*/
532 case ATA_PROT_ATAPI_NODATA:
533 pdc_atapi_pkt(qc);
534 break;
535
418 default: 536 default:
419 break; 537 break;
420 } 538 }
@@ -517,7 +635,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
517{ 635{
518 unsigned int handled = 0; 636 unsigned int handled = 0;
519 u32 tmp; 637 u32 tmp;
520 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; 638 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
521 639
522 tmp = readl(mmio); 640 tmp = readl(mmio);
523 if (tmp & PDC_ERR_MASK) { 641 if (tmp & PDC_ERR_MASK) {
@@ -528,6 +646,8 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
528 switch (qc->tf.protocol) { 646 switch (qc->tf.protocol) {
529 case ATA_PROT_DMA: 647 case ATA_PROT_DMA:
530 case ATA_PROT_NODATA: 648 case ATA_PROT_NODATA:
649 case ATA_PROT_ATAPI_DMA:
650 case ATA_PROT_ATAPI_NODATA:
531 qc->err_mask |= ac_err_mask(ata_wait_idle(ap)); 651 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
532 ata_qc_complete(qc); 652 ata_qc_complete(qc);
533 handled = 1; 653 handled = 1;
@@ -544,7 +664,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
544static void pdc_irq_clear(struct ata_port *ap) 664static void pdc_irq_clear(struct ata_port *ap)
545{ 665{
546 struct ata_host *host = ap->host; 666 struct ata_host *host = ap->host;
547 void __iomem *mmio = host->mmio_base; 667 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
548 668
549 readl(mmio + PDC_INT_SEQMASK); 669 readl(mmio + PDC_INT_SEQMASK);
550} 670}
@@ -560,12 +680,12 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
560 680
561 VPRINTK("ENTER\n"); 681 VPRINTK("ENTER\n");
562 682
563 if (!host || !host->mmio_base) { 683 if (!host || !host->iomap[PDC_MMIO_BAR]) {
564 VPRINTK("QUICK EXIT\n"); 684 VPRINTK("QUICK EXIT\n");
565 return IRQ_NONE; 685 return IRQ_NONE;
566 } 686 }
567 687
568 mmio_base = host->mmio_base; 688 mmio_base = host->iomap[PDC_MMIO_BAR];
569 689
570 /* reading should also clear interrupts */ 690 /* reading should also clear interrupts */
571 mask = readl(mmio_base + PDC_INT_SEQMASK); 691 mask = readl(mmio_base + PDC_INT_SEQMASK);
@@ -610,32 +730,34 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
610{ 730{
611 struct ata_port *ap = qc->ap; 731 struct ata_port *ap = qc->ap;
612 struct pdc_port_priv *pp = ap->private_data; 732 struct pdc_port_priv *pp = ap->private_data;
733 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
613 unsigned int port_no = ap->port_no; 734 unsigned int port_no = ap->port_no;
614 u8 seq = (u8) (port_no + 1); 735 u8 seq = (u8) (port_no + 1);
615 736
616 VPRINTK("ENTER, ap %p\n", ap); 737 VPRINTK("ENTER, ap %p\n", ap);
617 738
618 writel(0x00000001, ap->host->mmio_base + (seq * 4)); 739 writel(0x00000001, mmio + (seq * 4));
619 readl(ap->host->mmio_base + (seq * 4)); /* flush */ 740 readl(mmio + (seq * 4)); /* flush */
620 741
621 pp->pkt[2] = seq; 742 pp->pkt[2] = seq;
622 wmb(); /* flush PRD, pkt writes */ 743 wmb(); /* flush PRD, pkt writes */
623 writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 744 writel(pp->pkt_dma, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
624 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ 745 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
625} 746}
626 747
627static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc) 748static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
628{ 749{
629 switch (qc->tf.protocol) { 750 switch (qc->tf.protocol) {
751 case ATA_PROT_ATAPI_NODATA:
752 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
753 break;
754 /*FALLTHROUGH*/
755 case ATA_PROT_ATAPI_DMA:
630 case ATA_PROT_DMA: 756 case ATA_PROT_DMA:
631 case ATA_PROT_NODATA: 757 case ATA_PROT_NODATA:
632 pdc_packet_start(qc); 758 pdc_packet_start(qc);
633 return 0; 759 return 0;
634 760
635 case ATA_PROT_ATAPI_DMA:
636 BUG();
637 break;
638
639 default: 761 default:
640 break; 762 break;
641 } 763 }
@@ -658,8 +780,44 @@ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile
658 ata_exec_command(ap, tf); 780 ata_exec_command(ap, tf);
659} 781}
660 782
783static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
784{
785 u8 *scsicmd = qc->scsicmd->cmnd;
786 int pio = 1; /* atapi dma off by default */
787
788 /* Whitelist commands that may use DMA. */
789 switch (scsicmd[0]) {
790 case WRITE_12:
791 case WRITE_10:
792 case WRITE_6:
793 case READ_12:
794 case READ_10:
795 case READ_6:
796 case 0xad: /* READ_DVD_STRUCTURE */
797 case 0xbe: /* READ_CD */
798 pio = 0;
799 }
800 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
801 if (scsicmd[0] == WRITE_10) {
802 unsigned int lba;
803 lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5];
804 if (lba >= 0xFFFF4FA2)
805 pio = 1;
806 }
807 return pio;
808}
661 809
662static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base) 810static int pdc_old_check_atapi_dma(struct ata_queued_cmd *qc)
811{
812 struct ata_port *ap = qc->ap;
813
814 /* First generation chips cannot use ATAPI DMA on SATA ports */
815 if (sata_scr_valid(ap))
816 return 1;
817 return pdc_check_atapi_dma(qc);
818}
819
820static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
663{ 821{
664 port->cmd_addr = base; 822 port->cmd_addr = base;
665 port->data_addr = base; 823 port->data_addr = base;
@@ -679,7 +837,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
679 837
680static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 838static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
681{ 839{
682 void __iomem *mmio = pe->mmio_base; 840 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
683 struct pdc_host_priv *hp = pe->private_data; 841 struct pdc_host_priv *hp = pe->private_data;
684 int hotplug_offset; 842 int hotplug_offset;
685 u32 tmp; 843 u32 tmp;
@@ -733,55 +891,43 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
733static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 891static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
734{ 892{
735 static int printed_version; 893 static int printed_version;
736 struct ata_probe_ent *probe_ent = NULL; 894 struct ata_probe_ent *probe_ent;
737 struct pdc_host_priv *hp; 895 struct pdc_host_priv *hp;
738 unsigned long base; 896 void __iomem *base;
739 void __iomem *mmio_base;
740 unsigned int board_idx = (unsigned int) ent->driver_data; 897 unsigned int board_idx = (unsigned int) ent->driver_data;
741 int pci_dev_busy = 0;
742 int rc; 898 int rc;
899 u8 tmp;
743 900
744 if (!printed_version++) 901 if (!printed_version++)
745 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 902 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
746 903
747 rc = pci_enable_device(pdev); 904 rc = pcim_enable_device(pdev);
748 if (rc) 905 if (rc)
749 return rc; 906 return rc;
750 907
751 rc = pci_request_regions(pdev, DRV_NAME); 908 rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
752 if (rc) { 909 if (rc == -EBUSY)
753 pci_dev_busy = 1; 910 pcim_pin_device(pdev);
754 goto err_out; 911 if (rc)
755 } 912 return rc;
756 913
757 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 914 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
758 if (rc) 915 if (rc)
759 goto err_out_regions; 916 return rc;
760 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 917 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
761 if (rc) 918 if (rc)
762 goto err_out_regions; 919 return rc;
763 920
764 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 921 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
765 if (probe_ent == NULL) { 922 if (probe_ent == NULL)
766 rc = -ENOMEM; 923 return -ENOMEM;
767 goto err_out_regions;
768 }
769 924
770 probe_ent->dev = pci_dev_to_dev(pdev); 925 probe_ent->dev = pci_dev_to_dev(pdev);
771 INIT_LIST_HEAD(&probe_ent->node); 926 INIT_LIST_HEAD(&probe_ent->node);
772 927
773 mmio_base = pci_iomap(pdev, 3, 0); 928 hp = devm_kzalloc(&pdev->dev, sizeof(*hp), GFP_KERNEL);
774 if (mmio_base == NULL) { 929 if (hp == NULL)
775 rc = -ENOMEM; 930 return -ENOMEM;
776 goto err_out_free_ent;
777 }
778 base = (unsigned long) mmio_base;
779
780 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
781 if (hp == NULL) {
782 rc = -ENOMEM;
783 goto err_out_free_ent;
784 }
785 931
786 probe_ent->private_data = hp; 932 probe_ent->private_data = hp;
787 933
@@ -794,7 +940,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
794 940
795 probe_ent->irq = pdev->irq; 941 probe_ent->irq = pdev->irq;
796 probe_ent->irq_flags = IRQF_SHARED; 942 probe_ent->irq_flags = IRQF_SHARED;
797 probe_ent->mmio_base = mmio_base; 943 probe_ent->iomap = pcim_iomap_table(pdev);
944
945 base = probe_ent->iomap[PDC_MMIO_BAR];
798 946
799 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200); 947 pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
800 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280); 948 pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
@@ -820,7 +968,17 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
820 hp->flags |= PDC_FLAG_GEN_II; 968 hp->flags |= PDC_FLAG_GEN_II;
821 /* Fall through */ 969 /* Fall through */
822 case board_2037x: 970 case board_2037x:
823 probe_ent->n_ports = 2; 971 /* TX2plus boards also have a PATA port */
972 tmp = readb(base + PDC_FLASH_CTL+1);
973 if (!(tmp & 0x80)) {
974 probe_ent->n_ports = 3;
975 pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
976 hp->port_flags[2] = ATA_FLAG_SLAVE_POSS;
977 printk(KERN_INFO DRV_NAME " PATA port found\n");
978 } else
979 probe_ent->n_ports = 2;
980 hp->port_flags[0] = ATA_FLAG_SATA;
981 hp->port_flags[1] = ATA_FLAG_SATA;
824 break; 982 break;
825 case board_20619: 983 case board_20619:
826 probe_ent->n_ports = 4; 984 probe_ent->n_ports = 4;
@@ -841,22 +999,11 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
841 /* initialize adapter */ 999 /* initialize adapter */
842 pdc_host_init(board_idx, probe_ent); 1000 pdc_host_init(board_idx, probe_ent);
843 1001
844 /* FIXME: Need any other frees than hp? */
845 if (!ata_device_add(probe_ent)) 1002 if (!ata_device_add(probe_ent))
846 kfree(hp); 1003 return -ENODEV;
847
848 kfree(probe_ent);
849 1004
1005 devm_kfree(&pdev->dev, probe_ent);
850 return 0; 1006 return 0;
851
852err_out_free_ent:
853 kfree(probe_ent);
854err_out_regions:
855 pci_release_regions(pdev);
856err_out:
857 if (!pci_dev_busy)
858 pci_disable_device(pdev);
859 return rc;
860} 1007}
861 1008
862 1009
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 710909df4eaf..bfa35ede6551 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -37,13 +37,14 @@
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/device.h> 38#include <linux/device.h>
39#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
40#include <asm/io.h>
41#include <linux/libata.h> 40#include <linux/libata.h>
42 41
43#define DRV_NAME "sata_qstor" 42#define DRV_NAME "sata_qstor"
44#define DRV_VERSION "0.06" 43#define DRV_VERSION "0.06"
45 44
46enum { 45enum {
46 QS_MMIO_BAR = 4,
47
47 QS_PORTS = 4, 48 QS_PORTS = 4,
48 QS_MAX_PRD = LIBATA_MAX_PRD, 49 QS_MAX_PRD = LIBATA_MAX_PRD,
49 QS_CPB_ORDER = 6, 50 QS_CPB_ORDER = 6,
@@ -117,7 +118,6 @@ static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *en
117static irqreturn_t qs_intr (int irq, void *dev_instance); 118static irqreturn_t qs_intr (int irq, void *dev_instance);
118static int qs_port_start(struct ata_port *ap); 119static int qs_port_start(struct ata_port *ap);
119static void qs_host_stop(struct ata_host *host); 120static void qs_host_stop(struct ata_host *host);
120static void qs_port_stop(struct ata_port *ap);
121static void qs_phy_reset(struct ata_port *ap); 121static void qs_phy_reset(struct ata_port *ap);
122static void qs_qc_prep(struct ata_queued_cmd *qc); 122static void qs_qc_prep(struct ata_queued_cmd *qc);
123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); 123static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
@@ -157,14 +157,15 @@ static const struct ata_port_operations qs_ata_ops = {
157 .phy_reset = qs_phy_reset, 157 .phy_reset = qs_phy_reset,
158 .qc_prep = qs_qc_prep, 158 .qc_prep = qs_qc_prep,
159 .qc_issue = qs_qc_issue, 159 .qc_issue = qs_qc_issue,
160 .data_xfer = ata_mmio_data_xfer, 160 .data_xfer = ata_data_xfer,
161 .eng_timeout = qs_eng_timeout, 161 .eng_timeout = qs_eng_timeout,
162 .irq_handler = qs_intr, 162 .irq_handler = qs_intr,
163 .irq_clear = qs_irq_clear, 163 .irq_clear = qs_irq_clear,
164 .irq_on = ata_irq_on,
165 .irq_ack = ata_irq_ack,
164 .scr_read = qs_scr_read, 166 .scr_read = qs_scr_read,
165 .scr_write = qs_scr_write, 167 .scr_write = qs_scr_write,
166 .port_start = qs_port_start, 168 .port_start = qs_port_start,
167 .port_stop = qs_port_stop,
168 .host_stop = qs_host_stop, 169 .host_stop = qs_host_stop,
169 .bmdma_stop = qs_bmdma_stop, 170 .bmdma_stop = qs_bmdma_stop,
170 .bmdma_status = qs_bmdma_status, 171 .bmdma_status = qs_bmdma_status,
@@ -197,6 +198,11 @@ static struct pci_driver qs_ata_pci_driver = {
197 .remove = ata_pci_remove_one, 198 .remove = ata_pci_remove_one,
198}; 199};
199 200
201static void __iomem *qs_mmio_base(struct ata_host *host)
202{
203 return host->iomap[QS_MMIO_BAR];
204}
205
200static int qs_check_atapi_dma(struct ata_queued_cmd *qc) 206static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
201{ 207{
202 return 1; /* ATAPI DMA not supported */ 208 return 1; /* ATAPI DMA not supported */
@@ -219,7 +225,7 @@ static void qs_irq_clear(struct ata_port *ap)
219 225
220static inline void qs_enter_reg_mode(struct ata_port *ap) 226static inline void qs_enter_reg_mode(struct ata_port *ap)
221{ 227{
222 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); 228 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
223 229
224 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); 230 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
225 readb(chan + QS_CCT_CTR0); /* flush */ 231 readb(chan + QS_CCT_CTR0); /* flush */
@@ -227,7 +233,7 @@ static inline void qs_enter_reg_mode(struct ata_port *ap)
227 233
228static inline void qs_reset_channel_logic(struct ata_port *ap) 234static inline void qs_reset_channel_logic(struct ata_port *ap)
229{ 235{
230 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); 236 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
231 237
232 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1); 238 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
233 readb(chan + QS_CCT_CTR0); /* flush */ 239 readb(chan + QS_CCT_CTR0); /* flush */
@@ -257,14 +263,14 @@ static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
257{ 263{
258 if (sc_reg > SCR_CONTROL) 264 if (sc_reg > SCR_CONTROL)
259 return ~0U; 265 return ~0U;
260 return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8))); 266 return readl(ap->ioaddr.scr_addr + (sc_reg * 8));
261} 267}
262 268
263static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 269static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
264{ 270{
265 if (sc_reg > SCR_CONTROL) 271 if (sc_reg > SCR_CONTROL)
266 return; 272 return;
267 writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8))); 273 writel(val, ap->ioaddr.scr_addr + (sc_reg * 8));
268} 274}
269 275
270static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) 276static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
@@ -325,7 +331,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
325 /* host control block (HCB) */ 331 /* host control block (HCB) */
326 buf[ 0] = QS_HCB_HDR; 332 buf[ 0] = QS_HCB_HDR;
327 buf[ 1] = hflags; 333 buf[ 1] = hflags;
328 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nsect * ATA_SECT_SIZE); 334 *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
329 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem); 335 *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
330 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES; 336 addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
331 *(__le64 *)(&buf[16]) = cpu_to_le64(addr); 337 *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
@@ -341,7 +347,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
341static inline void qs_packet_start(struct ata_queued_cmd *qc) 347static inline void qs_packet_start(struct ata_queued_cmd *qc)
342{ 348{
343 struct ata_port *ap = qc->ap; 349 struct ata_port *ap = qc->ap;
344 u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); 350 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
345 351
346 VPRINTK("ENTER, ap %p\n", ap); 352 VPRINTK("ENTER, ap %p\n", ap);
347 353
@@ -378,7 +384,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
378{ 384{
379 unsigned int handled = 0; 385 unsigned int handled = 0;
380 u8 sFFE; 386 u8 sFFE;
381 u8 __iomem *mmio_base = host->mmio_base; 387 u8 __iomem *mmio_base = qs_mmio_base(host);
382 388
383 do { 389 do {
384 u32 sff0 = readl(mmio_base + QS_HST_SFF); 390 u32 sff0 = readl(mmio_base + QS_HST_SFF);
@@ -470,7 +476,7 @@ static irqreturn_t qs_intr(int irq, void *dev_instance)
470 return IRQ_RETVAL(handled); 476 return IRQ_RETVAL(handled);
471} 477}
472 478
473static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base) 479static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
474{ 480{
475 port->cmd_addr = 481 port->cmd_addr =
476 port->data_addr = base + 0x400; 482 port->data_addr = base + 0x400;
@@ -492,7 +498,7 @@ static int qs_port_start(struct ata_port *ap)
492{ 498{
493 struct device *dev = ap->host->dev; 499 struct device *dev = ap->host->dev;
494 struct qs_port_priv *pp; 500 struct qs_port_priv *pp;
495 void __iomem *mmio_base = ap->host->mmio_base; 501 void __iomem *mmio_base = qs_mmio_base(ap->host);
496 void __iomem *chan = mmio_base + (ap->port_no * 0x4000); 502 void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
497 u64 addr; 503 u64 addr;
498 int rc; 504 int rc;
@@ -501,17 +507,13 @@ static int qs_port_start(struct ata_port *ap)
501 if (rc) 507 if (rc)
502 return rc; 508 return rc;
503 qs_enter_reg_mode(ap); 509 qs_enter_reg_mode(ap);
504 pp = kzalloc(sizeof(*pp), GFP_KERNEL); 510 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
505 if (!pp) { 511 if (!pp)
506 rc = -ENOMEM; 512 return -ENOMEM;
507 goto err_out; 513 pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
508 } 514 GFP_KERNEL);
509 pp->pkt = dma_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma, 515 if (!pp->pkt)
510 GFP_KERNEL); 516 return -ENOMEM;
511 if (!pp->pkt) {
512 rc = -ENOMEM;
513 goto err_out_kfree;
514 }
515 memset(pp->pkt, 0, QS_PKT_BYTES); 517 memset(pp->pkt, 0, QS_PKT_BYTES);
516 ap->private_data = pp; 518 ap->private_data = pp;
517 519
@@ -519,43 +521,19 @@ static int qs_port_start(struct ata_port *ap)
519 writel((u32) addr, chan + QS_CCF_CPBA); 521 writel((u32) addr, chan + QS_CCF_CPBA);
520 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4); 522 writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
521 return 0; 523 return 0;
522
523err_out_kfree:
524 kfree(pp);
525err_out:
526 ata_port_stop(ap);
527 return rc;
528}
529
530static void qs_port_stop(struct ata_port *ap)
531{
532 struct device *dev = ap->host->dev;
533 struct qs_port_priv *pp = ap->private_data;
534
535 if (pp != NULL) {
536 ap->private_data = NULL;
537 if (pp->pkt != NULL)
538 dma_free_coherent(dev, QS_PKT_BYTES, pp->pkt,
539 pp->pkt_dma);
540 kfree(pp);
541 }
542 ata_port_stop(ap);
543} 524}
544 525
545static void qs_host_stop(struct ata_host *host) 526static void qs_host_stop(struct ata_host *host)
546{ 527{
547 void __iomem *mmio_base = host->mmio_base; 528 void __iomem *mmio_base = qs_mmio_base(host);
548 struct pci_dev *pdev = to_pci_dev(host->dev);
549 529
550 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 530 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
551 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ 531 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
552
553 pci_iounmap(pdev, mmio_base);
554} 532}
555 533
556static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 534static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
557{ 535{
558 void __iomem *mmio_base = pe->mmio_base; 536 void __iomem *mmio_base = pe->iomap[QS_MMIO_BAR];
559 unsigned int port_no; 537 unsigned int port_no;
560 538
561 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 539 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
@@ -630,44 +608,34 @@ static int qs_ata_init_one(struct pci_dev *pdev,
630 const struct pci_device_id *ent) 608 const struct pci_device_id *ent)
631{ 609{
632 static int printed_version; 610 static int printed_version;
633 struct ata_probe_ent *probe_ent = NULL; 611 struct ata_probe_ent *probe_ent;
634 void __iomem *mmio_base; 612 void __iomem * const *iomap;
635 unsigned int board_idx = (unsigned int) ent->driver_data; 613 unsigned int board_idx = (unsigned int) ent->driver_data;
636 int rc, port_no; 614 int rc, port_no;
637 615
638 if (!printed_version++) 616 if (!printed_version++)
639 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 617 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
640 618
641 rc = pci_enable_device(pdev); 619 rc = pcim_enable_device(pdev);
642 if (rc) 620 if (rc)
643 return rc; 621 return rc;
644 622
645 rc = pci_request_regions(pdev, DRV_NAME); 623 if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
646 if (rc) 624 return -ENODEV;
647 goto err_out;
648
649 if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) {
650 rc = -ENODEV;
651 goto err_out_regions;
652 }
653 625
654 mmio_base = pci_iomap(pdev, 4, 0); 626 rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
655 if (mmio_base == NULL) { 627 if (rc)
656 rc = -ENOMEM; 628 return rc;
657 goto err_out_regions; 629 iomap = pcim_iomap_table(pdev);
658 }
659 630
660 rc = qs_set_dma_masks(pdev, mmio_base); 631 rc = qs_set_dma_masks(pdev, iomap[QS_MMIO_BAR]);
661 if (rc) 632 if (rc)
662 goto err_out_iounmap; 633 return rc;
663 634
664 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 635 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
665 if (probe_ent == NULL) { 636 if (probe_ent == NULL)
666 rc = -ENOMEM; 637 return -ENOMEM;
667 goto err_out_iounmap;
668 }
669 638
670 memset(probe_ent, 0, sizeof(*probe_ent));
671 probe_ent->dev = pci_dev_to_dev(pdev); 639 probe_ent->dev = pci_dev_to_dev(pdev);
672 INIT_LIST_HEAD(&probe_ent->node); 640 INIT_LIST_HEAD(&probe_ent->node);
673 641
@@ -680,12 +648,12 @@ static int qs_ata_init_one(struct pci_dev *pdev,
680 648
681 probe_ent->irq = pdev->irq; 649 probe_ent->irq = pdev->irq;
682 probe_ent->irq_flags = IRQF_SHARED; 650 probe_ent->irq_flags = IRQF_SHARED;
683 probe_ent->mmio_base = mmio_base; 651 probe_ent->iomap = iomap;
684 probe_ent->n_ports = QS_PORTS; 652 probe_ent->n_ports = QS_PORTS;
685 653
686 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { 654 for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
687 unsigned long chan = (unsigned long)mmio_base + 655 void __iomem *chan =
688 (port_no * 0x4000); 656 probe_ent->iomap[QS_MMIO_BAR] + (port_no * 0x4000);
689 qs_ata_setup_port(&probe_ent->port[port_no], chan); 657 qs_ata_setup_port(&probe_ent->port[port_no], chan);
690 } 658 }
691 659
@@ -694,19 +662,11 @@ static int qs_ata_init_one(struct pci_dev *pdev,
694 /* initialize adapter */ 662 /* initialize adapter */
695 qs_host_init(board_idx, probe_ent); 663 qs_host_init(board_idx, probe_ent);
696 664
697 rc = ata_device_add(probe_ent); 665 if (ata_device_add(probe_ent) != QS_PORTS)
698 kfree(probe_ent); 666 return -EIO;
699 if (rc != QS_PORTS)
700 goto err_out_iounmap;
701 return 0;
702 667
703err_out_iounmap: 668 devm_kfree(&pdev->dev, probe_ent);
704 pci_iounmap(pdev, mmio_base); 669 return 0;
705err_out_regions:
706 pci_release_regions(pdev);
707err_out:
708 pci_disable_device(pdev);
709 return rc;
710} 670}
711 671
712static int __init qs_ata_init(void) 672static int __init qs_ata_init(void)
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 7808d0369d91..dca3d3749f06 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -49,6 +49,8 @@
49#define DRV_VERSION "2.0" 49#define DRV_VERSION "2.0"
50 50
51enum { 51enum {
52 SIL_MMIO_BAR = 5,
53
52 /* 54 /*
53 * host flags 55 * host flags
54 */ 56 */
@@ -200,18 +202,18 @@ static const struct ata_port_operations sil_ops = {
200 .bmdma_status = ata_bmdma_status, 202 .bmdma_status = ata_bmdma_status,
201 .qc_prep = ata_qc_prep, 203 .qc_prep = ata_qc_prep,
202 .qc_issue = ata_qc_issue_prot, 204 .qc_issue = ata_qc_issue_prot,
203 .data_xfer = ata_mmio_data_xfer, 205 .data_xfer = ata_data_xfer,
204 .freeze = sil_freeze, 206 .freeze = sil_freeze,
205 .thaw = sil_thaw, 207 .thaw = sil_thaw,
206 .error_handler = ata_bmdma_error_handler, 208 .error_handler = ata_bmdma_error_handler,
207 .post_internal_cmd = ata_bmdma_post_internal_cmd, 209 .post_internal_cmd = ata_bmdma_post_internal_cmd,
208 .irq_handler = sil_interrupt, 210 .irq_handler = sil_interrupt,
209 .irq_clear = ata_bmdma_irq_clear, 211 .irq_clear = ata_bmdma_irq_clear,
212 .irq_on = ata_irq_on,
213 .irq_ack = ata_irq_ack,
210 .scr_read = sil_scr_read, 214 .scr_read = sil_scr_read,
211 .scr_write = sil_scr_write, 215 .scr_write = sil_scr_write,
212 .port_start = ata_port_start, 216 .port_start = ata_port_start,
213 .port_stop = ata_port_stop,
214 .host_stop = ata_pci_host_stop,
215}; 217};
216 218
217static const struct ata_port_info sil_port_info[] = { 219static const struct ata_port_info sil_port_info[] = {
@@ -297,7 +299,8 @@ static void sil_post_set_mode (struct ata_port *ap)
297{ 299{
298 struct ata_host *host = ap->host; 300 struct ata_host *host = ap->host;
299 struct ata_device *dev; 301 struct ata_device *dev;
300 void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode; 302 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
303 void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
301 u32 tmp, dev_mode[2]; 304 u32 tmp, dev_mode[2];
302 unsigned int i; 305 unsigned int i;
303 306
@@ -320,9 +323,9 @@ static void sil_post_set_mode (struct ata_port *ap)
320 readl(addr); /* flush */ 323 readl(addr); /* flush */
321} 324}
322 325
323static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) 326static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
324{ 327{
325 unsigned long offset = ap->ioaddr.scr_addr; 328 void __iomem *offset = ap->ioaddr.scr_addr;
326 329
327 switch (sc_reg) { 330 switch (sc_reg) {
328 case SCR_STATUS: 331 case SCR_STATUS:
@@ -341,7 +344,7 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re
341 344
342static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) 345static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
343{ 346{
344 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg); 347 void __iomem *mmio = sil_scr_addr(ap, sc_reg);
345 if (mmio) 348 if (mmio)
346 return readl(mmio); 349 return readl(mmio);
347 return 0xffffffffU; 350 return 0xffffffffU;
@@ -349,7 +352,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
349 352
350static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 353static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
351{ 354{
352 void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg); 355 void __iomem *mmio = sil_scr_addr(ap, sc_reg);
353 if (mmio) 356 if (mmio)
354 writel(val, mmio); 357 writel(val, mmio);
355} 358}
@@ -444,7 +447,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
444static irqreturn_t sil_interrupt(int irq, void *dev_instance) 447static irqreturn_t sil_interrupt(int irq, void *dev_instance)
445{ 448{
446 struct ata_host *host = dev_instance; 449 struct ata_host *host = dev_instance;
447 void __iomem *mmio_base = host->mmio_base; 450 void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
448 int handled = 0; 451 int handled = 0;
449 int i; 452 int i;
450 453
@@ -476,7 +479,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
476 479
477static void sil_freeze(struct ata_port *ap) 480static void sil_freeze(struct ata_port *ap)
478{ 481{
479 void __iomem *mmio_base = ap->host->mmio_base; 482 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
480 u32 tmp; 483 u32 tmp;
481 484
482 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ 485 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
@@ -491,7 +494,7 @@ static void sil_freeze(struct ata_port *ap)
491 494
492static void sil_thaw(struct ata_port *ap) 495static void sil_thaw(struct ata_port *ap)
493{ 496{
494 void __iomem *mmio_base = ap->host->mmio_base; 497 void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
495 u32 tmp; 498 u32 tmp;
496 499
497 /* clear IRQ */ 500 /* clear IRQ */
@@ -541,9 +544,9 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
541{ 544{
542 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO; 545 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
543 unsigned int n, quirks = 0; 546 unsigned int n, quirks = 0;
544 unsigned char model_num[41]; 547 unsigned char model_num[ATA_ID_PROD_LEN + 1];
545 548
546 ata_id_c_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num)); 549 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
547 550
548 for (n = 0; sil_blacklist[n].product; n++) 551 for (n = 0; sil_blacklist[n].product; n++)
549 if (!strcmp(sil_blacklist[n].product, model_num)) { 552 if (!strcmp(sil_blacklist[n].product, model_num)) {
@@ -621,38 +624,35 @@ static void sil_init_controller(struct pci_dev *pdev,
621static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 624static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
622{ 625{
623 static int printed_version; 626 static int printed_version;
624 struct ata_probe_ent *probe_ent = NULL; 627 struct device *dev = &pdev->dev;
625 unsigned long base; 628 struct ata_probe_ent *probe_ent;
626 void __iomem *mmio_base; 629 void __iomem *mmio_base;
627 int rc; 630 int rc;
628 unsigned int i; 631 unsigned int i;
629 int pci_dev_busy = 0;
630 632
631 if (!printed_version++) 633 if (!printed_version++)
632 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 634 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
633 635
634 rc = pci_enable_device(pdev); 636 rc = pcim_enable_device(pdev);
635 if (rc) 637 if (rc)
636 return rc; 638 return rc;
637 639
638 rc = pci_request_regions(pdev, DRV_NAME); 640 rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
639 if (rc) { 641 if (rc == -EBUSY)
640 pci_dev_busy = 1; 642 pcim_pin_device(pdev);
641 goto err_out; 643 if (rc)
642 } 644 return rc;
643 645
644 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 646 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
645 if (rc) 647 if (rc)
646 goto err_out_regions; 648 return rc;
647 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 649 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
648 if (rc) 650 if (rc)
649 goto err_out_regions; 651 return rc;
650 652
651 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 653 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
652 if (probe_ent == NULL) { 654 if (probe_ent == NULL)
653 rc = -ENOMEM; 655 return -ENOMEM;
654 goto err_out_regions;
655 }
656 656
657 INIT_LIST_HEAD(&probe_ent->node); 657 INIT_LIST_HEAD(&probe_ent->node);
658 probe_ent->dev = pci_dev_to_dev(pdev); 658 probe_ent->dev = pci_dev_to_dev(pdev);
@@ -666,22 +666,16 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
666 probe_ent->irq_flags = IRQF_SHARED; 666 probe_ent->irq_flags = IRQF_SHARED;
667 probe_ent->port_flags = sil_port_info[ent->driver_data].flags; 667 probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
668 668
669 mmio_base = pci_iomap(pdev, 5, 0); 669 probe_ent->iomap = pcim_iomap_table(pdev);
670 if (mmio_base == NULL) {
671 rc = -ENOMEM;
672 goto err_out_free_ent;
673 }
674
675 probe_ent->mmio_base = mmio_base;
676 670
677 base = (unsigned long) mmio_base; 671 mmio_base = probe_ent->iomap[SIL_MMIO_BAR];
678 672
679 for (i = 0; i < probe_ent->n_ports; i++) { 673 for (i = 0; i < probe_ent->n_ports; i++) {
680 probe_ent->port[i].cmd_addr = base + sil_port[i].tf; 674 probe_ent->port[i].cmd_addr = mmio_base + sil_port[i].tf;
681 probe_ent->port[i].altstatus_addr = 675 probe_ent->port[i].altstatus_addr =
682 probe_ent->port[i].ctl_addr = base + sil_port[i].ctl; 676 probe_ent->port[i].ctl_addr = mmio_base + sil_port[i].ctl;
683 probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma; 677 probe_ent->port[i].bmdma_addr = mmio_base + sil_port[i].bmdma;
684 probe_ent->port[i].scr_addr = base + sil_port[i].scr; 678 probe_ent->port[i].scr_addr = mmio_base + sil_port[i].scr;
685 ata_std_ports(&probe_ent->port[i]); 679 ata_std_ports(&probe_ent->port[i]);
686 } 680 }
687 681
@@ -690,30 +684,25 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
690 684
691 pci_set_master(pdev); 685 pci_set_master(pdev);
692 686
693 /* FIXME: check ata_device_add return value */ 687 if (!ata_device_add(probe_ent))
694 ata_device_add(probe_ent); 688 return -ENODEV;
695 kfree(probe_ent);
696 689
690 devm_kfree(dev, probe_ent);
697 return 0; 691 return 0;
698
699err_out_free_ent:
700 kfree(probe_ent);
701err_out_regions:
702 pci_release_regions(pdev);
703err_out:
704 if (!pci_dev_busy)
705 pci_disable_device(pdev);
706 return rc;
707} 692}
708 693
709#ifdef CONFIG_PM 694#ifdef CONFIG_PM
710static int sil_pci_device_resume(struct pci_dev *pdev) 695static int sil_pci_device_resume(struct pci_dev *pdev)
711{ 696{
712 struct ata_host *host = dev_get_drvdata(&pdev->dev); 697 struct ata_host *host = dev_get_drvdata(&pdev->dev);
698 int rc;
699
700 rc = ata_pci_device_do_resume(pdev);
701 if (rc)
702 return rc;
713 703
714 ata_pci_device_do_resume(pdev);
715 sil_init_controller(pdev, host->n_ports, host->ports[0]->flags, 704 sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
716 host->mmio_base); 705 host->iomap[SIL_MMIO_BAR]);
717 ata_host_resume(host); 706 ata_host_resume(host);
718 707
719 return 0; 708 return 0;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 5aa288d2fb86..e65e8d55da3e 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -28,7 +28,6 @@
28#include <scsi/scsi_host.h> 28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h> 30#include <linux/libata.h>
31#include <asm/io.h>
32 31
33#define DRV_NAME "sata_sil24" 32#define DRV_NAME "sata_sil24"
34#define DRV_VERSION "0.3" 33#define DRV_VERSION "0.3"
@@ -61,6 +60,9 @@ struct sil24_port_multiplier {
61}; 60};
62 61
63enum { 62enum {
63 SIL24_HOST_BAR = 0,
64 SIL24_PORT_BAR = 2,
65
64 /* 66 /*
65 * Global controller registers (128 bytes @ BAR0) 67 * Global controller registers (128 bytes @ BAR0)
66 */ 68 */
@@ -321,12 +323,6 @@ struct sil24_port_priv {
321 struct ata_taskfile tf; /* Cached taskfile registers */ 323 struct ata_taskfile tf; /* Cached taskfile registers */
322}; 324};
323 325
324/* ap->host->private_data */
325struct sil24_host_priv {
326 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
327 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
328};
329
330static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev); 326static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
331static u8 sil24_check_status(struct ata_port *ap); 327static u8 sil24_check_status(struct ata_port *ap);
332static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); 328static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
@@ -341,8 +337,6 @@ static void sil24_thaw(struct ata_port *ap);
341static void sil24_error_handler(struct ata_port *ap); 337static void sil24_error_handler(struct ata_port *ap);
342static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); 338static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
343static int sil24_port_start(struct ata_port *ap); 339static int sil24_port_start(struct ata_port *ap);
344static void sil24_port_stop(struct ata_port *ap);
345static void sil24_host_stop(struct ata_host *host);
346static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 340static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
347#ifdef CONFIG_PM 341#ifdef CONFIG_PM
348static int sil24_pci_device_resume(struct pci_dev *pdev); 342static int sil24_pci_device_resume(struct pci_dev *pdev);
@@ -362,7 +356,7 @@ static struct pci_driver sil24_pci_driver = {
362 .name = DRV_NAME, 356 .name = DRV_NAME,
363 .id_table = sil24_pci_tbl, 357 .id_table = sil24_pci_tbl,
364 .probe = sil24_init_one, 358 .probe = sil24_init_one,
365 .remove = ata_pci_remove_one, /* safe? */ 359 .remove = ata_pci_remove_one,
366#ifdef CONFIG_PM 360#ifdef CONFIG_PM
367 .suspend = ata_pci_device_suspend, 361 .suspend = ata_pci_device_suspend,
368 .resume = sil24_pci_device_resume, 362 .resume = sil24_pci_device_resume,
@@ -406,6 +400,8 @@ static const struct ata_port_operations sil24_ops = {
406 400
407 .irq_handler = sil24_interrupt, 401 .irq_handler = sil24_interrupt,
408 .irq_clear = sil24_irq_clear, 402 .irq_clear = sil24_irq_clear,
403 .irq_on = ata_dummy_irq_on,
404 .irq_ack = ata_dummy_irq_ack,
409 405
410 .scr_read = sil24_scr_read, 406 .scr_read = sil24_scr_read,
411 .scr_write = sil24_scr_write, 407 .scr_write = sil24_scr_write,
@@ -416,8 +412,6 @@ static const struct ata_port_operations sil24_ops = {
416 .post_internal_cmd = sil24_post_internal_cmd, 412 .post_internal_cmd = sil24_post_internal_cmd,
417 413
418 .port_start = sil24_port_start, 414 .port_start = sil24_port_start,
419 .port_stop = sil24_port_stop,
420 .host_stop = sil24_host_stop,
421}; 415};
422 416
423/* 417/*
@@ -467,7 +461,7 @@ static int sil24_tag(int tag)
467 461
468static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev) 462static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
469{ 463{
470 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 464 void __iomem *port = ap->ioaddr.cmd_addr;
471 465
472 if (dev->cdb_len == 16) 466 if (dev->cdb_len == 16)
473 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT); 467 writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
@@ -478,7 +472,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
478static inline void sil24_update_tf(struct ata_port *ap) 472static inline void sil24_update_tf(struct ata_port *ap)
479{ 473{
480 struct sil24_port_priv *pp = ap->private_data; 474 struct sil24_port_priv *pp = ap->private_data;
481 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 475 void __iomem *port = ap->ioaddr.cmd_addr;
482 struct sil24_prb __iomem *prb = port; 476 struct sil24_prb __iomem *prb = port;
483 u8 fis[6 * 4]; 477 u8 fis[6 * 4];
484 478
@@ -501,7 +495,7 @@ static int sil24_scr_map[] = {
501 495
502static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg) 496static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
503{ 497{
504 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; 498 void __iomem *scr_addr = ap->ioaddr.scr_addr;
505 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 499 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
506 void __iomem *addr; 500 void __iomem *addr;
507 addr = scr_addr + sil24_scr_map[sc_reg] * 4; 501 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
@@ -512,7 +506,7 @@ static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
512 506
513static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) 507static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
514{ 508{
515 void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr; 509 void __iomem *scr_addr = ap->ioaddr.scr_addr;
516 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { 510 if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
517 void __iomem *addr; 511 void __iomem *addr;
518 addr = scr_addr + sil24_scr_map[sc_reg] * 4; 512 addr = scr_addr + sil24_scr_map[sc_reg] * 4;
@@ -528,7 +522,7 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
528 522
529static int sil24_init_port(struct ata_port *ap) 523static int sil24_init_port(struct ata_port *ap)
530{ 524{
531 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 525 void __iomem *port = ap->ioaddr.cmd_addr;
532 u32 tmp; 526 u32 tmp;
533 527
534 writel(PORT_CS_INIT, port + PORT_CTRL_STAT); 528 writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
@@ -544,7 +538,7 @@ static int sil24_init_port(struct ata_port *ap)
544 538
545static int sil24_softreset(struct ata_port *ap, unsigned int *class) 539static int sil24_softreset(struct ata_port *ap, unsigned int *class)
546{ 540{
547 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 541 void __iomem *port = ap->ioaddr.cmd_addr;
548 struct sil24_port_priv *pp = ap->private_data; 542 struct sil24_port_priv *pp = ap->private_data;
549 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; 543 struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
550 dma_addr_t paddr = pp->cmd_block_dma; 544 dma_addr_t paddr = pp->cmd_block_dma;
@@ -604,7 +598,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
604 598
605static int sil24_hardreset(struct ata_port *ap, unsigned int *class) 599static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
606{ 600{
607 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 601 void __iomem *port = ap->ioaddr.cmd_addr;
608 const char *reason; 602 const char *reason;
609 int tout_msec, rc; 603 int tout_msec, rc;
610 u32 tmp; 604 u32 tmp;
@@ -721,7 +715,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
721{ 715{
722 struct ata_port *ap = qc->ap; 716 struct ata_port *ap = qc->ap;
723 struct sil24_port_priv *pp = ap->private_data; 717 struct sil24_port_priv *pp = ap->private_data;
724 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 718 void __iomem *port = ap->ioaddr.cmd_addr;
725 unsigned int tag = sil24_tag(qc->tag); 719 unsigned int tag = sil24_tag(qc->tag);
726 dma_addr_t paddr; 720 dma_addr_t paddr;
727 void __iomem *activate; 721 void __iomem *activate;
@@ -742,7 +736,7 @@ static void sil24_irq_clear(struct ata_port *ap)
742 736
743static void sil24_freeze(struct ata_port *ap) 737static void sil24_freeze(struct ata_port *ap)
744{ 738{
745 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 739 void __iomem *port = ap->ioaddr.cmd_addr;
746 740
747 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear 741 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
748 * PORT_IRQ_ENABLE instead. 742 * PORT_IRQ_ENABLE instead.
@@ -752,7 +746,7 @@ static void sil24_freeze(struct ata_port *ap)
752 746
753static void sil24_thaw(struct ata_port *ap) 747static void sil24_thaw(struct ata_port *ap)
754{ 748{
755 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 749 void __iomem *port = ap->ioaddr.cmd_addr;
756 u32 tmp; 750 u32 tmp;
757 751
758 /* clear IRQ */ 752 /* clear IRQ */
@@ -765,7 +759,7 @@ static void sil24_thaw(struct ata_port *ap)
765 759
766static void sil24_error_intr(struct ata_port *ap) 760static void sil24_error_intr(struct ata_port *ap)
767{ 761{
768 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 762 void __iomem *port = ap->ioaddr.cmd_addr;
769 struct ata_eh_info *ehi = &ap->eh_info; 763 struct ata_eh_info *ehi = &ap->eh_info;
770 int freeze = 0; 764 int freeze = 0;
771 u32 irq_stat; 765 u32 irq_stat;
@@ -843,7 +837,7 @@ static void sil24_finish_qc(struct ata_queued_cmd *qc)
843 837
844static inline void sil24_host_intr(struct ata_port *ap) 838static inline void sil24_host_intr(struct ata_port *ap)
845{ 839{
846 void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr; 840 void __iomem *port = ap->ioaddr.cmd_addr;
847 u32 slot_stat, qc_active; 841 u32 slot_stat, qc_active;
848 int rc; 842 int rc;
849 843
@@ -878,12 +872,12 @@ static inline void sil24_host_intr(struct ata_port *ap)
878static irqreturn_t sil24_interrupt(int irq, void *dev_instance) 872static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
879{ 873{
880 struct ata_host *host = dev_instance; 874 struct ata_host *host = dev_instance;
881 struct sil24_host_priv *hpriv = host->private_data; 875 void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
882 unsigned handled = 0; 876 unsigned handled = 0;
883 u32 status; 877 u32 status;
884 int i; 878 int i;
885 879
886 status = readl(hpriv->host_base + HOST_IRQ_STAT); 880 status = readl(host_base + HOST_IRQ_STAT);
887 881
888 if (status == 0xffffffff) { 882 if (status == 0xffffffff) {
889 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, " 883 printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
@@ -938,13 +932,6 @@ static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
938 sil24_init_port(ap); 932 sil24_init_port(ap);
939} 933}
940 934
941static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *dev)
942{
943 const size_t cb_size = sizeof(*pp->cmd_block) * SIL24_MAX_CMDS;
944
945 dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
946}
947
948static int sil24_port_start(struct ata_port *ap) 935static int sil24_port_start(struct ata_port *ap)
949{ 936{
950 struct device *dev = ap->host->dev; 937 struct device *dev = ap->host->dev;
@@ -952,22 +939,22 @@ static int sil24_port_start(struct ata_port *ap)
952 union sil24_cmd_block *cb; 939 union sil24_cmd_block *cb;
953 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 940 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
954 dma_addr_t cb_dma; 941 dma_addr_t cb_dma;
955 int rc = -ENOMEM; 942 int rc;
956 943
957 pp = kzalloc(sizeof(*pp), GFP_KERNEL); 944 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
958 if (!pp) 945 if (!pp)
959 goto err_out; 946 return -ENOMEM;
960 947
961 pp->tf.command = ATA_DRDY; 948 pp->tf.command = ATA_DRDY;
962 949
963 cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); 950 cb = dmam_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
964 if (!cb) 951 if (!cb)
965 goto err_out_pp; 952 return -ENOMEM;
966 memset(cb, 0, cb_size); 953 memset(cb, 0, cb_size);
967 954
968 rc = ata_pad_alloc(ap, dev); 955 rc = ata_pad_alloc(ap, dev);
969 if (rc) 956 if (rc)
970 goto err_out_pad; 957 return rc;
971 958
972 pp->cmd_block = cb; 959 pp->cmd_block = cb;
973 pp->cmd_block_dma = cb_dma; 960 pp->cmd_block_dma = cb_dma;
@@ -975,33 +962,6 @@ static int sil24_port_start(struct ata_port *ap)
975 ap->private_data = pp; 962 ap->private_data = pp;
976 963
977 return 0; 964 return 0;
978
979err_out_pad:
980 sil24_cblk_free(pp, dev);
981err_out_pp:
982 kfree(pp);
983err_out:
984 return rc;
985}
986
987static void sil24_port_stop(struct ata_port *ap)
988{
989 struct device *dev = ap->host->dev;
990 struct sil24_port_priv *pp = ap->private_data;
991
992 sil24_cblk_free(pp, dev);
993 ata_pad_free(ap, dev);
994 kfree(pp);
995}
996
997static void sil24_host_stop(struct ata_host *host)
998{
999 struct sil24_host_priv *hpriv = host->private_data;
1000 struct pci_dev *pdev = to_pci_dev(host->dev);
1001
1002 pci_iounmap(pdev, hpriv->host_base);
1003 pci_iounmap(pdev, hpriv->port_base);
1004 kfree(hpriv);
1005} 965}
1006 966
1007static void sil24_init_controller(struct pci_dev *pdev, int n_ports, 967static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
@@ -1066,43 +1026,32 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
1066static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1026static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1067{ 1027{
1068 static int printed_version = 0; 1028 static int printed_version = 0;
1029 struct device *dev = &pdev->dev;
1069 unsigned int board_id = (unsigned int)ent->driver_data; 1030 unsigned int board_id = (unsigned int)ent->driver_data;
1070 struct ata_port_info *pinfo = &sil24_port_info[board_id]; 1031 struct ata_port_info *pinfo = &sil24_port_info[board_id];
1071 struct ata_probe_ent *probe_ent = NULL; 1032 struct ata_probe_ent *probe_ent;
1072 struct sil24_host_priv *hpriv = NULL; 1033 void __iomem *host_base;
1073 void __iomem *host_base = NULL; 1034 void __iomem *port_base;
1074 void __iomem *port_base = NULL;
1075 int i, rc; 1035 int i, rc;
1076 u32 tmp; 1036 u32 tmp;
1077 1037
1078 if (!printed_version++) 1038 if (!printed_version++)
1079 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1039 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1080 1040
1081 rc = pci_enable_device(pdev); 1041 rc = pcim_enable_device(pdev);
1082 if (rc) 1042 if (rc)
1083 return rc; 1043 return rc;
1084 1044
1085 rc = pci_request_regions(pdev, DRV_NAME); 1045 rc = pcim_iomap_regions(pdev,
1046 (1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
1047 DRV_NAME);
1086 if (rc) 1048 if (rc)
1087 goto out_disable; 1049 return rc;
1088
1089 rc = -ENOMEM;
1090 /* map mmio registers */
1091 host_base = pci_iomap(pdev, 0, 0);
1092 if (!host_base)
1093 goto out_free;
1094 port_base = pci_iomap(pdev, 2, 0);
1095 if (!port_base)
1096 goto out_free;
1097
1098 /* allocate & init probe_ent and hpriv */
1099 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1100 if (!probe_ent)
1101 goto out_free;
1102 1050
1103 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); 1051 /* allocate & init probe_ent */
1104 if (!hpriv) 1052 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
1105 goto out_free; 1053 if (!probe_ent)
1054 return -ENOMEM;
1106 1055
1107 probe_ent->dev = pci_dev_to_dev(pdev); 1056 probe_ent->dev = pci_dev_to_dev(pdev);
1108 INIT_LIST_HEAD(&probe_ent->node); 1057 INIT_LIST_HEAD(&probe_ent->node);
@@ -1117,10 +1066,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1117 1066
1118 probe_ent->irq = pdev->irq; 1067 probe_ent->irq = pdev->irq;
1119 probe_ent->irq_flags = IRQF_SHARED; 1068 probe_ent->irq_flags = IRQF_SHARED;
1120 probe_ent->private_data = hpriv; 1069 probe_ent->iomap = pcim_iomap_table(pdev);
1121 1070
1122 hpriv->host_base = host_base; 1071 host_base = probe_ent->iomap[SIL24_HOST_BAR];
1123 hpriv->port_base = port_base; 1072 port_base = probe_ent->iomap[SIL24_PORT_BAR];
1124 1073
1125 /* 1074 /*
1126 * Configure the device 1075 * Configure the device
@@ -1132,7 +1081,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1132 if (rc) { 1081 if (rc) {
1133 dev_printk(KERN_ERR, &pdev->dev, 1082 dev_printk(KERN_ERR, &pdev->dev,
1134 "64-bit DMA enable failed\n"); 1083 "64-bit DMA enable failed\n");
1135 goto out_free; 1084 return rc;
1136 } 1085 }
1137 } 1086 }
1138 } else { 1087 } else {
@@ -1140,13 +1089,13 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1140 if (rc) { 1089 if (rc) {
1141 dev_printk(KERN_ERR, &pdev->dev, 1090 dev_printk(KERN_ERR, &pdev->dev,
1142 "32-bit DMA enable failed\n"); 1091 "32-bit DMA enable failed\n");
1143 goto out_free; 1092 return rc;
1144 } 1093 }
1145 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 1094 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1146 if (rc) { 1095 if (rc) {
1147 dev_printk(KERN_ERR, &pdev->dev, 1096 dev_printk(KERN_ERR, &pdev->dev,
1148 "32-bit consistent DMA enable failed\n"); 1097 "32-bit consistent DMA enable failed\n");
1149 goto out_free; 1098 return rc;
1150 } 1099 }
1151 } 1100 }
1152 1101
@@ -1162,11 +1111,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1162 } 1111 }
1163 1112
1164 for (i = 0; i < probe_ent->n_ports; i++) { 1113 for (i = 0; i < probe_ent->n_ports; i++) {
1165 unsigned long portu = 1114 void __iomem *port = port_base + i * PORT_REGS_SIZE;
1166 (unsigned long)port_base + i * PORT_REGS_SIZE;
1167 1115
1168 probe_ent->port[i].cmd_addr = portu; 1116 probe_ent->port[i].cmd_addr = port;
1169 probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; 1117 probe_ent->port[i].scr_addr = port + PORT_SCONTROL;
1170 1118
1171 ata_std_ports(&probe_ent->port[i]); 1119 ata_std_ports(&probe_ent->port[i]);
1172 } 1120 }
@@ -1176,38 +1124,30 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1176 1124
1177 pci_set_master(pdev); 1125 pci_set_master(pdev);
1178 1126
1179 /* FIXME: check ata_device_add return value */ 1127 if (!ata_device_add(probe_ent))
1180 ata_device_add(probe_ent); 1128 return -ENODEV;
1181 1129
1182 kfree(probe_ent); 1130 devm_kfree(dev, probe_ent);
1183 return 0; 1131 return 0;
1184
1185 out_free:
1186 if (host_base)
1187 pci_iounmap(pdev, host_base);
1188 if (port_base)
1189 pci_iounmap(pdev, port_base);
1190 kfree(probe_ent);
1191 kfree(hpriv);
1192 pci_release_regions(pdev);
1193 out_disable:
1194 pci_disable_device(pdev);
1195 return rc;
1196} 1132}
1197 1133
1198#ifdef CONFIG_PM 1134#ifdef CONFIG_PM
1199static int sil24_pci_device_resume(struct pci_dev *pdev) 1135static int sil24_pci_device_resume(struct pci_dev *pdev)
1200{ 1136{
1201 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1137 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1202 struct sil24_host_priv *hpriv = host->private_data; 1138 void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
1139 void __iomem *port_base = host->iomap[SIL24_PORT_BAR];
1140 int rc;
1203 1141
1204 ata_pci_device_do_resume(pdev); 1142 rc = ata_pci_device_do_resume(pdev);
1143 if (rc)
1144 return rc;
1205 1145
1206 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) 1146 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
1207 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL); 1147 writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
1208 1148
1209 sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags, 1149 sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
1210 hpriv->host_base, hpriv->port_base); 1150 host_base, port_base);
1211 1151
1212 ata_host_resume(host); 1152 ata_host_resume(host);
1213 1153
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 9c25a1e91730..49c9e2bd706f 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -40,9 +40,11 @@
40#include <linux/device.h> 40#include <linux/device.h>
41#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
42#include <linux/libata.h> 42#include <linux/libata.h>
43#include "libata.h"
43 44
45#undef DRV_NAME /* already defined in libata.h, for libata-core */
44#define DRV_NAME "sata_sis" 46#define DRV_NAME "sata_sis"
45#define DRV_VERSION "0.6" 47#define DRV_VERSION "0.7"
46 48
47enum { 49enum {
48 sis_180 = 0, 50 sis_180 = 0,
@@ -67,9 +69,12 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 69static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
68 70
69static const struct pci_device_id sis_pci_tbl[] = { 71static const struct pci_device_id sis_pci_tbl[] = {
70 { PCI_VDEVICE(SI, 0x180), sis_180 }, 72 { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
71 { PCI_VDEVICE(SI, 0x181), sis_180 }, 73 { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */
72 { PCI_VDEVICE(SI, 0x182), sis_180 }, 74 { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */
75 { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */
76 { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/966L */
77 { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L */
73 78
74 { } /* terminate list */ 79 { } /* terminate list */
75}; 80};
@@ -112,18 +117,18 @@ static const struct ata_port_operations sis_ops = {
112 .bmdma_status = ata_bmdma_status, 117 .bmdma_status = ata_bmdma_status,
113 .qc_prep = ata_qc_prep, 118 .qc_prep = ata_qc_prep,
114 .qc_issue = ata_qc_issue_prot, 119 .qc_issue = ata_qc_issue_prot,
115 .data_xfer = ata_pio_data_xfer, 120 .data_xfer = ata_data_xfer,
116 .freeze = ata_bmdma_freeze, 121 .freeze = ata_bmdma_freeze,
117 .thaw = ata_bmdma_thaw, 122 .thaw = ata_bmdma_thaw,
118 .error_handler = ata_bmdma_error_handler, 123 .error_handler = ata_bmdma_error_handler,
119 .post_internal_cmd = ata_bmdma_post_internal_cmd, 124 .post_internal_cmd = ata_bmdma_post_internal_cmd,
120 .irq_handler = ata_interrupt, 125 .irq_handler = ata_interrupt,
121 .irq_clear = ata_bmdma_irq_clear, 126 .irq_clear = ata_bmdma_irq_clear,
127 .irq_on = ata_irq_on,
128 .irq_ack = ata_irq_ack,
122 .scr_read = sis_scr_read, 129 .scr_read = sis_scr_read,
123 .scr_write = sis_scr_write, 130 .scr_write = sis_scr_write,
124 .port_start = ata_port_start, 131 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127}; 132};
128 133
129static struct ata_port_info sis_port_info = { 134static struct ata_port_info sis_port_info = {
@@ -135,31 +140,42 @@ static struct ata_port_info sis_port_info = {
135 .port_ops = &sis_ops, 140 .port_ops = &sis_ops,
136}; 141};
137 142
138
139MODULE_AUTHOR("Uwe Koziolek"); 143MODULE_AUTHOR("Uwe Koziolek");
140MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); 144MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
141MODULE_LICENSE("GPL"); 145MODULE_LICENSE("GPL");
142MODULE_DEVICE_TABLE(pci, sis_pci_tbl); 146MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
143MODULE_VERSION(DRV_VERSION); 147MODULE_VERSION(DRV_VERSION);
144 148
145static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg, int device) 149static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
146{ 150{
151 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
147 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg); 152 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
153 u8 pmr;
148 154
149 if (port_no) { 155 if (ap->port_no) {
150 if (device == 0x182) 156 switch (pdev->device) {
151 addr += SIS182_SATA1_OFS; 157 case 0x0180:
152 else 158 case 0x0181:
153 addr += SIS180_SATA1_OFS; 159 pci_read_config_byte(pdev, SIS_PMR, &pmr);
160 if ((pmr & SIS_PMR_COMBINED) == 0)
161 addr += SIS180_SATA1_OFS;
162 break;
163
164 case 0x0182:
165 case 0x0183:
166 case 0x1182:
167 case 0x1183:
168 addr += SIS182_SATA1_OFS;
169 break;
170 }
154 } 171 }
155
156 return addr; 172 return addr;
157} 173}
158 174
159static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) 175static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
160{ 176{
161 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 177 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device); 178 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
163 u32 val, val2 = 0; 179 u32 val, val2 = 0;
164 u8 pmr; 180 u8 pmr;
165 181
@@ -170,26 +186,28 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
170 186
171 pci_read_config_dword(pdev, cfg_addr, &val); 187 pci_read_config_dword(pdev, cfg_addr, &val);
172 188
173 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 189 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
190 (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
174 pci_read_config_dword(pdev, cfg_addr+0x10, &val2); 191 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
175 192
176 return (val|val2) & 0xfffffffb; /* avoid problems with powerdowned ports */ 193 return (val|val2) & 0xfffffffb; /* avoid problems with powerdowned ports */
177} 194}
178 195
179static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 196static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
180{ 197{
181 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 198 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device); 199 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
183 u8 pmr; 200 u8 pmr;
184 201
185 if (scr == SCR_ERROR) /* doesn't exist in PCI cfg space */ 202 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
186 return; 203 return;
187 204
188 pci_read_config_byte(pdev, SIS_PMR, &pmr); 205 pci_read_config_byte(pdev, SIS_PMR, &pmr);
189 206
190 pci_write_config_dword(pdev, cfg_addr, val); 207 pci_write_config_dword(pdev, cfg_addr, val);
191 208
192 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 209 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
210 (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
193 pci_write_config_dword(pdev, cfg_addr+0x10, val); 211 pci_write_config_dword(pdev, cfg_addr+0x10, val);
194} 212}
195 213
@@ -207,10 +225,11 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
207 225
208 pci_read_config_byte(pdev, SIS_PMR, &pmr); 226 pci_read_config_byte(pdev, SIS_PMR, &pmr);
209 227
210 val = inl(ap->ioaddr.scr_addr + (sc_reg * 4)); 228 val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
211 229
212 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 230 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
213 val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10); 231 (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
232 val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
214 233
215 return (val | val2) & 0xfffffffb; 234 return (val | val2) & 0xfffffffb;
216} 235}
@@ -228,9 +247,10 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
228 if (ap->flags & SIS_FLAG_CFGSCR) 247 if (ap->flags & SIS_FLAG_CFGSCR)
229 sis_scr_cfg_write(ap, sc_reg, val); 248 sis_scr_cfg_write(ap, sc_reg, val);
230 else { 249 else {
231 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 250 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
232 if ((pdev->device == 0x182) || (pmr & SIS_PMR_COMBINED)) 251 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
233 outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10); 252 (pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
253 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
234 } 254 }
235} 255}
236 256
@@ -241,29 +261,28 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
241 int rc; 261 int rc;
242 u32 genctl, val; 262 u32 genctl, val;
243 struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi }; 263 struct ata_port_info pi = sis_port_info, *ppi[2] = { &pi, &pi };
244 int pci_dev_busy = 0;
245 u8 pmr; 264 u8 pmr;
246 u8 port2_start; 265 u8 port2_start = 0x20;
247 266
248 if (!printed_version++) 267 if (!printed_version++)
249 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 268 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
250 269
251 rc = pci_enable_device(pdev); 270 rc = pcim_enable_device(pdev);
252 if (rc) 271 if (rc)
253 return rc; 272 return rc;
254 273
255 rc = pci_request_regions(pdev, DRV_NAME); 274 rc = pci_request_regions(pdev, DRV_NAME);
256 if (rc) { 275 if (rc) {
257 pci_dev_busy = 1; 276 pcim_pin_device(pdev);
258 goto err_out; 277 return rc;
259 } 278 }
260 279
261 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 280 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
262 if (rc) 281 if (rc)
263 goto err_out_regions; 282 return rc;
264 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 283 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
265 if (rc) 284 if (rc)
266 goto err_out_regions; 285 return rc;
267 286
268 /* check and see if the SCRs are in IO space or PCI cfg space */ 287 /* check and see if the SCRs are in IO space or PCI cfg space */
269 pci_read_config_dword(pdev, SIS_GENCTL, &genctl); 288 pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
@@ -282,60 +301,79 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
282 } 301 }
283 302
284 pci_read_config_byte(pdev, SIS_PMR, &pmr); 303 pci_read_config_byte(pdev, SIS_PMR, &pmr);
285 if (ent->device != 0x182) { 304 switch (ent->device) {
305 case 0x0180:
306 case 0x0181:
307
308 /* The PATA-handling is provided by pata_sis */
309 switch (pmr & 0x30) {
310 case 0x10:
311 ppi[1] = &sis_info133;
312 break;
313
314 case 0x30:
315 ppi[0] = &sis_info133;
316 break;
317 }
286 if ((pmr & SIS_PMR_COMBINED) == 0) { 318 if ((pmr & SIS_PMR_COMBINED) == 0) {
287 dev_printk(KERN_INFO, &pdev->dev, 319 dev_printk(KERN_INFO, &pdev->dev,
288 "Detected SiS 180/181/964 chipset in SATA mode\n"); 320 "Detected SiS 180/181/964 chipset in SATA mode\n");
289 port2_start = 64; 321 port2_start = 64;
290 } 322 } else {
291 else {
292 dev_printk(KERN_INFO, &pdev->dev, 323 dev_printk(KERN_INFO, &pdev->dev,
293 "Detected SiS 180/181 chipset in combined mode\n"); 324 "Detected SiS 180/181 chipset in combined mode\n");
294 port2_start=0; 325 port2_start=0;
295 pi.flags |= ATA_FLAG_SLAVE_POSS; 326 pi.flags |= ATA_FLAG_SLAVE_POSS;
296 } 327 }
297 } 328 break;
298 else { 329
330 case 0x0182:
331 case 0x0183:
299 pci_read_config_dword ( pdev, 0x6C, &val); 332 pci_read_config_dword ( pdev, 0x6C, &val);
300 if (val & (1L << 31)) { 333 if (val & (1L << 31)) {
301 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n"); 334 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
302 pi.flags |= ATA_FLAG_SLAVE_POSS; 335 pi.flags |= ATA_FLAG_SLAVE_POSS;
303 } 336 } else {
304 else
305 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n"); 337 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
306 port2_start = 0x20; 338 }
339 break;
340
341 case 0x1182:
342 case 0x1183:
343 pci_read_config_dword(pdev, 0x64, &val);
344 if (val & 0x10000000) {
345 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/1183/966L SATA controller\n");
346 } else {
347 dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/1183/966 SATA controller\n");
348 pi.flags |= ATA_FLAG_SLAVE_POSS;
349 }
350 break;
307 } 351 }
308 352
309 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 353 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
310 if (!probe_ent) { 354 if (!probe_ent)
311 rc = -ENOMEM; 355 return -ENOMEM;
312 goto err_out_regions;
313 }
314 356
315 if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) { 357 if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
316 probe_ent->port[0].scr_addr = 358 void *mmio;
317 pci_resource_start(pdev, SIS_SCR_PCI_BAR); 359
318 probe_ent->port[1].scr_addr = 360 mmio = pcim_iomap(pdev, SIS_SCR_PCI_BAR, 0);
319 pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start; 361 if (!mmio)
362 return -ENOMEM;
363
364 probe_ent->port[0].scr_addr = mmio;
365 probe_ent->port[1].scr_addr = mmio + port2_start;
320 } 366 }
321 367
322 pci_set_master(pdev); 368 pci_set_master(pdev);
323 pci_intx(pdev, 1); 369 pci_intx(pdev, 1);
324 370
325 /* FIXME: check ata_device_add return value */ 371 if (!ata_device_add(probe_ent))
326 ata_device_add(probe_ent); 372 return -EIO;
327 kfree(probe_ent);
328 373
374 devm_kfree(&pdev->dev, probe_ent);
329 return 0; 375 return 0;
330 376
331err_out_regions:
332 pci_release_regions(pdev);
333
334err_out:
335 if (!pci_dev_busy)
336 pci_disable_device(pdev);
337 return rc;
338
339} 377}
340 378
341static int __init sis_init(void) 379static int __init sis_init(void)
@@ -350,4 +388,3 @@ static void __exit sis_exit(void)
350 388
351module_init(sis_init); 389module_init(sis_init);
352module_exit(sis_exit); 390module_exit(sis_exit);
353
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 46d8a94669b4..4e4289994204 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -116,7 +116,7 @@ static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
116{ 116{
117 if (sc_reg > SCR_CONTROL) 117 if (sc_reg > SCR_CONTROL)
118 return 0xffffffffU; 118 return 0xffffffffU;
119 return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 119 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
120} 120}
121 121
122 122
@@ -125,7 +125,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
125{ 125{
126 if (sc_reg > SCR_CONTROL) 126 if (sc_reg > SCR_CONTROL)
127 return; 127 return;
128 writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); 128 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
129} 129}
130 130
131 131
@@ -135,31 +135,31 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
135 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 135 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
136 136
137 if (tf->ctl != ap->last_ctl) { 137 if (tf->ctl != ap->last_ctl) {
138 writeb(tf->ctl, (void __iomem *) ioaddr->ctl_addr); 138 writeb(tf->ctl, ioaddr->ctl_addr);
139 ap->last_ctl = tf->ctl; 139 ap->last_ctl = tf->ctl;
140 ata_wait_idle(ap); 140 ata_wait_idle(ap);
141 } 141 }
142 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 142 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
143 writew(tf->feature | (((u16)tf->hob_feature) << 8), 143 writew(tf->feature | (((u16)tf->hob_feature) << 8),
144 (void __iomem *) ioaddr->feature_addr); 144 ioaddr->feature_addr);
145 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), 145 writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
146 (void __iomem *) ioaddr->nsect_addr); 146 ioaddr->nsect_addr);
147 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), 147 writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
148 (void __iomem *) ioaddr->lbal_addr); 148 ioaddr->lbal_addr);
149 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), 149 writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
150 (void __iomem *) ioaddr->lbam_addr); 150 ioaddr->lbam_addr);
151 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), 151 writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
152 (void __iomem *) ioaddr->lbah_addr); 152 ioaddr->lbah_addr);
153 } else if (is_addr) { 153 } else if (is_addr) {
154 writew(tf->feature, (void __iomem *) ioaddr->feature_addr); 154 writew(tf->feature, ioaddr->feature_addr);
155 writew(tf->nsect, (void __iomem *) ioaddr->nsect_addr); 155 writew(tf->nsect, ioaddr->nsect_addr);
156 writew(tf->lbal, (void __iomem *) ioaddr->lbal_addr); 156 writew(tf->lbal, ioaddr->lbal_addr);
157 writew(tf->lbam, (void __iomem *) ioaddr->lbam_addr); 157 writew(tf->lbam, ioaddr->lbam_addr);
158 writew(tf->lbah, (void __iomem *) ioaddr->lbah_addr); 158 writew(tf->lbah, ioaddr->lbah_addr);
159 } 159 }
160 160
161 if (tf->flags & ATA_TFLAG_DEVICE) 161 if (tf->flags & ATA_TFLAG_DEVICE)
162 writeb(tf->device, (void __iomem *) ioaddr->device_addr); 162 writeb(tf->device, ioaddr->device_addr);
163 163
164 ata_wait_idle(ap); 164 ata_wait_idle(ap);
165} 165}
@@ -171,12 +171,12 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
171 u16 nsect, lbal, lbam, lbah, feature; 171 u16 nsect, lbal, lbam, lbah, feature;
172 172
173 tf->command = k2_stat_check_status(ap); 173 tf->command = k2_stat_check_status(ap);
174 tf->device = readw((void __iomem *)ioaddr->device_addr); 174 tf->device = readw(ioaddr->device_addr);
175 feature = readw((void __iomem *)ioaddr->error_addr); 175 feature = readw(ioaddr->error_addr);
176 nsect = readw((void __iomem *)ioaddr->nsect_addr); 176 nsect = readw(ioaddr->nsect_addr);
177 lbal = readw((void __iomem *)ioaddr->lbal_addr); 177 lbal = readw(ioaddr->lbal_addr);
178 lbam = readw((void __iomem *)ioaddr->lbam_addr); 178 lbam = readw(ioaddr->lbam_addr);
179 lbah = readw((void __iomem *)ioaddr->lbah_addr); 179 lbah = readw(ioaddr->lbah_addr);
180 180
181 tf->feature = feature; 181 tf->feature = feature;
182 tf->nsect = nsect; 182 tf->nsect = nsect;
@@ -262,7 +262,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
262 262
263static u8 k2_stat_check_status(struct ata_port *ap) 263static u8 k2_stat_check_status(struct ata_port *ap)
264{ 264{
265 return readl((void *) ap->ioaddr.status_addr); 265 return readl((void __iomem *) ap->ioaddr.status_addr);
266} 266}
267 267
268#ifdef CONFIG_PPC_OF 268#ifdef CONFIG_PPC_OF
@@ -349,21 +349,21 @@ static const struct ata_port_operations k2_sata_ops = {
349 .bmdma_status = ata_bmdma_status, 349 .bmdma_status = ata_bmdma_status,
350 .qc_prep = ata_qc_prep, 350 .qc_prep = ata_qc_prep,
351 .qc_issue = ata_qc_issue_prot, 351 .qc_issue = ata_qc_issue_prot,
352 .data_xfer = ata_mmio_data_xfer, 352 .data_xfer = ata_data_xfer,
353 .freeze = ata_bmdma_freeze, 353 .freeze = ata_bmdma_freeze,
354 .thaw = ata_bmdma_thaw, 354 .thaw = ata_bmdma_thaw,
355 .error_handler = ata_bmdma_error_handler, 355 .error_handler = ata_bmdma_error_handler,
356 .post_internal_cmd = ata_bmdma_post_internal_cmd, 356 .post_internal_cmd = ata_bmdma_post_internal_cmd,
357 .irq_handler = ata_interrupt, 357 .irq_handler = ata_interrupt,
358 .irq_clear = ata_bmdma_irq_clear, 358 .irq_clear = ata_bmdma_irq_clear,
359 .irq_on = ata_irq_on,
360 .irq_ack = ata_irq_ack,
359 .scr_read = k2_sata_scr_read, 361 .scr_read = k2_sata_scr_read,
360 .scr_write = k2_sata_scr_write, 362 .scr_write = k2_sata_scr_write,
361 .port_start = ata_port_start, 363 .port_start = ata_port_start,
362 .port_stop = ata_port_stop,
363 .host_stop = ata_pci_host_stop,
364}; 364};
365 365
366static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) 366static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
367{ 367{
368 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET; 368 port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
369 port->data_addr = base + K2_SATA_TF_DATA_OFFSET; 369 port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
@@ -386,12 +386,11 @@ static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
386static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 386static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
387{ 387{
388 static int printed_version; 388 static int printed_version;
389 struct ata_probe_ent *probe_ent = NULL; 389 struct device *dev = &pdev->dev;
390 unsigned long base; 390 struct ata_probe_ent *probe_ent;
391 void __iomem *mmio_base; 391 void __iomem *mmio_base;
392 const struct k2_board_info *board_info = 392 const struct k2_board_info *board_info =
393 &k2_board_info[ent->driver_data]; 393 &k2_board_info[ent->driver_data];
394 int pci_dev_busy = 0;
395 int rc; 394 int rc;
396 int i; 395 int i;
397 396
@@ -402,7 +401,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
402 * If this driver happens to only be useful on Apple's K2, then 401 * If this driver happens to only be useful on Apple's K2, then
403 * we should check that here as it has a normal Serverworks ID 402 * we should check that here as it has a normal Serverworks ID
404 */ 403 */
405 rc = pci_enable_device(pdev); 404 rc = pcim_enable_device(pdev);
406 if (rc) 405 if (rc)
407 return rc; 406 return rc;
408 /* 407 /*
@@ -412,48 +411,27 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
412 if (pci_resource_len(pdev, 5) == 0) 411 if (pci_resource_len(pdev, 5) == 0)
413 return -ENODEV; 412 return -ENODEV;
414 413
415 /* Request PCI regions */ 414 /* Request and iomap PCI regions */
416 rc = pci_request_regions(pdev, DRV_NAME); 415 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
417 if (rc) { 416 if (rc == -EBUSY)
418 pci_dev_busy = 1; 417 pcim_pin_device(pdev);
419 goto err_out; 418 if (rc)
420 } 419 return rc;
421 420
422 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 421 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
423 if (rc) 422 if (rc)
424 goto err_out_regions; 423 return rc;
425 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 424 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
426 if (rc) 425 if (rc)
427 goto err_out_regions; 426 return rc;
428 427
429 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 428 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
430 if (probe_ent == NULL) { 429 if (probe_ent == NULL)
431 rc = -ENOMEM; 430 return -ENOMEM;
432 goto err_out_regions;
433 }
434 431
435 memset(probe_ent, 0, sizeof(*probe_ent));
436 probe_ent->dev = pci_dev_to_dev(pdev); 432 probe_ent->dev = pci_dev_to_dev(pdev);
437 INIT_LIST_HEAD(&probe_ent->node); 433 INIT_LIST_HEAD(&probe_ent->node);
438 434
439 mmio_base = pci_iomap(pdev, 5, 0);
440 if (mmio_base == NULL) {
441 rc = -ENOMEM;
442 goto err_out_free_ent;
443 }
444 base = (unsigned long) mmio_base;
445
446 /* Clear a magic bit in SCR1 according to Darwin, those help
447 * some funky seagate drives (though so far, those were already
448 * set by the firmware on the machines I had access to)
449 */
450 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
451 mmio_base + K2_SATA_SICR1_OFFSET);
452
453 /* Clear SATA error & interrupts we don't use */
454 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
455 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
456
457 probe_ent->sht = &k2_sata_sht; 435 probe_ent->sht = &k2_sata_sht;
458 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 436 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
459 ATA_FLAG_MMIO | board_info->port_flags; 437 ATA_FLAG_MMIO | board_info->port_flags;
@@ -461,7 +439,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
461 probe_ent->n_ports = 4; 439 probe_ent->n_ports = 4;
462 probe_ent->irq = pdev->irq; 440 probe_ent->irq = pdev->irq;
463 probe_ent->irq_flags = IRQF_SHARED; 441 probe_ent->irq_flags = IRQF_SHARED;
464 probe_ent->mmio_base = mmio_base; 442 probe_ent->iomap = pcim_iomap_table(pdev);
465 443
466 /* We don't care much about the PIO/UDMA masks, but the core won't like us 444 /* We don't care much about the PIO/UDMA masks, but the core won't like us
467 * if we don't fill these 445 * if we don't fill these
@@ -470,28 +448,33 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
470 probe_ent->mwdma_mask = 0x7; 448 probe_ent->mwdma_mask = 0x7;
471 probe_ent->udma_mask = 0x7f; 449 probe_ent->udma_mask = 0x7f;
472 450
451 mmio_base = probe_ent->iomap[5];
452
473 /* different controllers have different number of ports - currently 4 or 8 */ 453 /* different controllers have different number of ports - currently 4 or 8 */
474 /* All ports are on the same function. Multi-function device is no 454 /* All ports are on the same function. Multi-function device is no
475 * longer available. This should not be seen in any system. */ 455 * longer available. This should not be seen in any system. */
476 for (i = 0; i < board_info->n_ports; i++) 456 for (i = 0; i < board_info->n_ports; i++)
477 k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET); 457 k2_sata_setup_port(&probe_ent->port[i],
458 mmio_base + i * K2_SATA_PORT_OFFSET);
459
460 /* Clear a magic bit in SCR1 according to Darwin, those help
461 * some funky seagate drives (though so far, those were already
462 * set by the firmware on the machines I had access to)
463 */
464 writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000,
465 mmio_base + K2_SATA_SICR1_OFFSET);
466
467 /* Clear SATA error & interrupts we don't use */
468 writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
469 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
478 470
479 pci_set_master(pdev); 471 pci_set_master(pdev);
480 472
481 /* FIXME: check ata_device_add return value */ 473 if (!ata_device_add(probe_ent))
482 ata_device_add(probe_ent); 474 return -ENODEV;
483 kfree(probe_ent);
484 475
476 devm_kfree(dev, probe_ent);
485 return 0; 477 return 0;
486
487err_out_free_ent:
488 kfree(probe_ent);
489err_out_regions:
490 pci_release_regions(pdev);
491err_out:
492 if (!pci_dev_busy)
493 pci_disable_device(pdev);
494 return rc;
495} 478}
496 479
497/* 0x240 is device ID for Apple K2 device 480/* 0x240 is device ID for Apple K2 device
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index ae7992de4b08..06e87a377382 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -42,7 +42,6 @@
42#include <scsi/scsi_host.h> 42#include <scsi/scsi_host.h>
43#include <scsi/scsi_cmnd.h> 43#include <scsi/scsi_cmnd.h>
44#include <linux/libata.h> 44#include <linux/libata.h>
45#include <asm/io.h>
46#include "sata_promise.h" 45#include "sata_promise.h"
47 46
48#define DRV_NAME "sata_sx4" 47#define DRV_NAME "sata_sx4"
@@ -50,6 +49,9 @@
50 49
51 50
52enum { 51enum {
52 PDC_MMIO_BAR = 3,
53 PDC_DIMM_BAR = 4,
54
53 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ 55 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
54 56
55 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ 57 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
@@ -138,8 +140,6 @@ struct pdc_port_priv {
138}; 140};
139 141
140struct pdc_host_priv { 142struct pdc_host_priv {
141 void __iomem *dimm_mmio;
142
143 unsigned int doing_hdma; 143 unsigned int doing_hdma;
144 unsigned int hdma_prod; 144 unsigned int hdma_prod;
145 unsigned int hdma_cons; 145 unsigned int hdma_cons;
@@ -156,11 +156,9 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance);
156static void pdc_eng_timeout(struct ata_port *ap); 156static void pdc_eng_timeout(struct ata_port *ap);
157static void pdc_20621_phy_reset (struct ata_port *ap); 157static void pdc_20621_phy_reset (struct ata_port *ap);
158static int pdc_port_start(struct ata_port *ap); 158static int pdc_port_start(struct ata_port *ap);
159static void pdc_port_stop(struct ata_port *ap);
160static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 159static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
161static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 160static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
162static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 161static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
163static void pdc20621_host_stop(struct ata_host *host);
164static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); 162static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
165static int pdc20621_detect_dimm(struct ata_probe_ent *pe); 163static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
166static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, 164static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
@@ -205,13 +203,13 @@ static const struct ata_port_operations pdc_20621_ops = {
205 .phy_reset = pdc_20621_phy_reset, 203 .phy_reset = pdc_20621_phy_reset,
206 .qc_prep = pdc20621_qc_prep, 204 .qc_prep = pdc20621_qc_prep,
207 .qc_issue = pdc20621_qc_issue_prot, 205 .qc_issue = pdc20621_qc_issue_prot,
208 .data_xfer = ata_mmio_data_xfer, 206 .data_xfer = ata_data_xfer,
209 .eng_timeout = pdc_eng_timeout, 207 .eng_timeout = pdc_eng_timeout,
210 .irq_handler = pdc20621_interrupt, 208 .irq_handler = pdc20621_interrupt,
211 .irq_clear = pdc20621_irq_clear, 209 .irq_clear = pdc20621_irq_clear,
210 .irq_on = ata_irq_on,
211 .irq_ack = ata_irq_ack,
212 .port_start = pdc_port_start, 212 .port_start = pdc_port_start,
213 .port_stop = pdc_port_stop,
214 .host_stop = pdc20621_host_stop,
215}; 213};
216 214
217static const struct ata_port_info pdc_port_info[] = { 215static const struct ata_port_info pdc_port_info[] = {
@@ -243,18 +241,6 @@ static struct pci_driver pdc_sata_pci_driver = {
243}; 241};
244 242
245 243
246static void pdc20621_host_stop(struct ata_host *host)
247{
248 struct pci_dev *pdev = to_pci_dev(host->dev);
249 struct pdc_host_priv *hpriv = host->private_data;
250 void __iomem *dimm_mmio = hpriv->dimm_mmio;
251
252 pci_iounmap(pdev, dimm_mmio);
253 kfree(hpriv);
254
255 pci_iounmap(pdev, host->mmio_base);
256}
257
258static int pdc_port_start(struct ata_port *ap) 244static int pdc_port_start(struct ata_port *ap)
259{ 245{
260 struct device *dev = ap->host->dev; 246 struct device *dev = ap->host->dev;
@@ -265,43 +251,19 @@ static int pdc_port_start(struct ata_port *ap)
265 if (rc) 251 if (rc)
266 return rc; 252 return rc;
267 253
268 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 254 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
269 if (!pp) { 255 if (!pp)
270 rc = -ENOMEM; 256 return -ENOMEM;
271 goto err_out;
272 }
273 memset(pp, 0, sizeof(*pp));
274 257
275 pp->pkt = dma_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); 258 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
276 if (!pp->pkt) { 259 if (!pp->pkt)
277 rc = -ENOMEM; 260 return -ENOMEM;
278 goto err_out_kfree;
279 }
280 261
281 ap->private_data = pp; 262 ap->private_data = pp;
282 263
283 return 0; 264 return 0;
284
285err_out_kfree:
286 kfree(pp);
287err_out:
288 ata_port_stop(ap);
289 return rc;
290}
291
292
293static void pdc_port_stop(struct ata_port *ap)
294{
295 struct device *dev = ap->host->dev;
296 struct pdc_port_priv *pp = ap->private_data;
297
298 ap->private_data = NULL;
299 dma_free_coherent(dev, 128, pp->pkt, pp->pkt_dma);
300 kfree(pp);
301 ata_port_stop(ap);
302} 265}
303 266
304
305static void pdc_20621_phy_reset (struct ata_port *ap) 267static void pdc_20621_phy_reset (struct ata_port *ap)
306{ 268{
307 VPRINTK("ENTER\n"); 269 VPRINTK("ENTER\n");
@@ -452,9 +414,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
452 struct scatterlist *sg; 414 struct scatterlist *sg;
453 struct ata_port *ap = qc->ap; 415 struct ata_port *ap = qc->ap;
454 struct pdc_port_priv *pp = ap->private_data; 416 struct pdc_port_priv *pp = ap->private_data;
455 void __iomem *mmio = ap->host->mmio_base; 417 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
456 struct pdc_host_priv *hpriv = ap->host->private_data; 418 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
457 void __iomem *dimm_mmio = hpriv->dimm_mmio;
458 unsigned int portno = ap->port_no; 419 unsigned int portno = ap->port_no;
459 unsigned int i, idx, total_len = 0, sgt_len; 420 unsigned int i, idx, total_len = 0, sgt_len;
460 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; 421 u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
@@ -513,9 +474,8 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
513{ 474{
514 struct ata_port *ap = qc->ap; 475 struct ata_port *ap = qc->ap;
515 struct pdc_port_priv *pp = ap->private_data; 476 struct pdc_port_priv *pp = ap->private_data;
516 void __iomem *mmio = ap->host->mmio_base; 477 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
517 struct pdc_host_priv *hpriv = ap->host->private_data; 478 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
518 void __iomem *dimm_mmio = hpriv->dimm_mmio;
519 unsigned int portno = ap->port_no; 479 unsigned int portno = ap->port_no;
520 unsigned int i; 480 unsigned int i;
521 481
@@ -565,7 +525,7 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
565{ 525{
566 struct ata_port *ap = qc->ap; 526 struct ata_port *ap = qc->ap;
567 struct ata_host *host = ap->host; 527 struct ata_host *host = ap->host;
568 void __iomem *mmio = host->mmio_base; 528 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
569 529
570 /* hard-code chip #0 */ 530 /* hard-code chip #0 */
571 mmio += PDC_CHIP0_OFS; 531 mmio += PDC_CHIP0_OFS;
@@ -619,8 +579,7 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
619{ 579{
620 struct ata_port *ap = qc->ap; 580 struct ata_port *ap = qc->ap;
621 unsigned int port_no = ap->port_no; 581 unsigned int port_no = ap->port_no;
622 struct pdc_host_priv *hpriv = ap->host->private_data; 582 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
623 void *dimm_mmio = hpriv->dimm_mmio;
624 583
625 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); 584 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
626 dimm_mmio += PDC_DIMM_HOST_PKT; 585 dimm_mmio += PDC_DIMM_HOST_PKT;
@@ -639,7 +598,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639 struct ata_port *ap = qc->ap; 598 struct ata_port *ap = qc->ap;
640 struct ata_host *host = ap->host; 599 struct ata_host *host = ap->host;
641 unsigned int port_no = ap->port_no; 600 unsigned int port_no = ap->port_no;
642 void __iomem *mmio = host->mmio_base; 601 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
643 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 602 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
644 u8 seq = (u8) (port_no + 1); 603 u8 seq = (u8) (port_no + 1);
645 unsigned int port_ofs; 604 unsigned int port_ofs;
@@ -668,8 +627,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
668 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ 627 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
669 628
670 writel(port_ofs + PDC_DIMM_ATA_PKT, 629 writel(port_ofs + PDC_DIMM_ATA_PKT,
671 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 630 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
672 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 631 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 VPRINTK("submitted ofs 0x%x (%u), seq %u\n", 632 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
674 port_ofs + PDC_DIMM_ATA_PKT, 633 port_ofs + PDC_DIMM_ATA_PKT,
675 port_ofs + PDC_DIMM_ATA_PKT, 634 port_ofs + PDC_DIMM_ATA_PKT,
@@ -747,8 +706,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
747 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); 706 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
748 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); 707 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
749 writel(port_ofs + PDC_DIMM_ATA_PKT, 708 writel(port_ofs + PDC_DIMM_ATA_PKT,
750 (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 709 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
751 readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); 710 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
752 } 711 }
753 712
754 /* step two - execute ATA command */ 713 /* step two - execute ATA command */
@@ -781,7 +740,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
781static void pdc20621_irq_clear(struct ata_port *ap) 740static void pdc20621_irq_clear(struct ata_port *ap)
782{ 741{
783 struct ata_host *host = ap->host; 742 struct ata_host *host = ap->host;
784 void __iomem *mmio = host->mmio_base; 743 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
785 744
786 mmio += PDC_CHIP0_OFS; 745 mmio += PDC_CHIP0_OFS;
787 746
@@ -799,12 +758,12 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance)
799 758
800 VPRINTK("ENTER\n"); 759 VPRINTK("ENTER\n");
801 760
802 if (!host || !host->mmio_base) { 761 if (!host || !host->iomap[PDC_MMIO_BAR]) {
803 VPRINTK("QUICK EXIT\n"); 762 VPRINTK("QUICK EXIT\n");
804 return IRQ_NONE; 763 return IRQ_NONE;
805 } 764 }
806 765
807 mmio_base = host->mmio_base; 766 mmio_base = host->iomap[PDC_MMIO_BAR];
808 767
809 /* reading should also clear interrupts */ 768 /* reading should also clear interrupts */
810 mmio_base += PDC_CHIP0_OFS; 769 mmio_base += PDC_CHIP0_OFS;
@@ -905,7 +864,7 @@ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile
905} 864}
906 865
907 866
908static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base) 867static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
909{ 868{
910 port->cmd_addr = base; 869 port->cmd_addr = base;
911 port->data_addr = base; 870 port->data_addr = base;
@@ -931,9 +890,8 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
931 u16 idx; 890 u16 idx;
932 u8 page_mask; 891 u8 page_mask;
933 long dist; 892 long dist;
934 void __iomem *mmio = pe->mmio_base; 893 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
935 struct pdc_host_priv *hpriv = pe->private_data; 894 void __iomem *dimm_mmio = pe->iomap[PDC_DIMM_BAR];
936 void __iomem *dimm_mmio = hpriv->dimm_mmio;
937 895
938 /* hard-code chip #0 */ 896 /* hard-code chip #0 */
939 mmio += PDC_CHIP0_OFS; 897 mmio += PDC_CHIP0_OFS;
@@ -987,9 +945,8 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
987 u16 idx; 945 u16 idx;
988 u8 page_mask; 946 u8 page_mask;
989 long dist; 947 long dist;
990 void __iomem *mmio = pe->mmio_base; 948 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
991 struct pdc_host_priv *hpriv = pe->private_data; 949 void __iomem *dimm_mmio = pe->iomap[PDC_DIMM_BAR];
992 void __iomem *dimm_mmio = hpriv->dimm_mmio;
993 950
994 /* hard-code chip #0 */ 951 /* hard-code chip #0 */
995 mmio += PDC_CHIP0_OFS; 952 mmio += PDC_CHIP0_OFS;
@@ -1034,7 +991,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
1034static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, 991static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
1035 u32 subaddr, u32 *pdata) 992 u32 subaddr, u32 *pdata)
1036{ 993{
1037 void __iomem *mmio = pe->mmio_base; 994 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
1038 u32 i2creg = 0; 995 u32 i2creg = 0;
1039 u32 status; 996 u32 status;
1040 u32 count =0; 997 u32 count =0;
@@ -1093,7 +1050,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1093 u32 data = 0; 1050 u32 data = 0;
1094 int size, i; 1051 int size, i;
1095 u8 bdimmsize; 1052 u8 bdimmsize;
1096 void __iomem *mmio = pe->mmio_base; 1053 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
1097 static const struct { 1054 static const struct {
1098 unsigned int reg; 1055 unsigned int reg;
1099 unsigned int ofs; 1056 unsigned int ofs;
@@ -1155,8 +1112,8 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
1155static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe) 1112static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
1156{ 1113{
1157 u32 data, spd0; 1114 u32 data, spd0;
1158 int error, i; 1115 int error, i;
1159 void __iomem *mmio = pe->mmio_base; 1116 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
1160 1117
1161 /* hard-code chip #0 */ 1118 /* hard-code chip #0 */
1162 mmio += PDC_CHIP0_OFS; 1119 mmio += PDC_CHIP0_OFS;
@@ -1210,7 +1167,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1210 u32 ticks=0; 1167 u32 ticks=0;
1211 u32 clock=0; 1168 u32 clock=0;
1212 u32 fparam=0; 1169 u32 fparam=0;
1213 void __iomem *mmio = pe->mmio_base; 1170 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
1214 1171
1215 /* hard-code chip #0 */ 1172 /* hard-code chip #0 */
1216 mmio += PDC_CHIP0_OFS; 1173 mmio += PDC_CHIP0_OFS;
@@ -1334,7 +1291,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
1334static void pdc_20621_init(struct ata_probe_ent *pe) 1291static void pdc_20621_init(struct ata_probe_ent *pe)
1335{ 1292{
1336 u32 tmp; 1293 u32 tmp;
1337 void __iomem *mmio = pe->mmio_base; 1294 void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
1338 1295
1339 /* hard-code chip #0 */ 1296 /* hard-code chip #0 */
1340 mmio += PDC_CHIP0_OFS; 1297 mmio += PDC_CHIP0_OFS;
@@ -1365,67 +1322,43 @@ static void pdc_20621_init(struct ata_probe_ent *pe)
1365static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1322static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1366{ 1323{
1367 static int printed_version; 1324 static int printed_version;
1368 struct ata_probe_ent *probe_ent = NULL; 1325 struct ata_probe_ent *probe_ent;
1369 unsigned long base; 1326 void __iomem *base;
1370 void __iomem *mmio_base; 1327 struct pdc_host_priv *hpriv;
1371 void __iomem *dimm_mmio = NULL;
1372 struct pdc_host_priv *hpriv = NULL;
1373 unsigned int board_idx = (unsigned int) ent->driver_data; 1328 unsigned int board_idx = (unsigned int) ent->driver_data;
1374 int pci_dev_busy = 0;
1375 int rc; 1329 int rc;
1376 1330
1377 if (!printed_version++) 1331 if (!printed_version++)
1378 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1332 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1379 1333
1380 rc = pci_enable_device(pdev); 1334 rc = pcim_enable_device(pdev);
1381 if (rc) 1335 if (rc)
1382 return rc; 1336 return rc;
1383 1337
1384 rc = pci_request_regions(pdev, DRV_NAME); 1338 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1385 if (rc) { 1339 DRV_NAME);
1386 pci_dev_busy = 1; 1340 if (rc == -EBUSY)
1387 goto err_out; 1341 pcim_pin_device(pdev);
1388 } 1342 if (rc)
1343 return rc;
1389 1344
1390 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1345 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1391 if (rc) 1346 if (rc)
1392 goto err_out_regions; 1347 return rc;
1393 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 1348 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1394 if (rc) 1349 if (rc)
1395 goto err_out_regions; 1350 return rc;
1396 1351
1397 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 1352 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
1398 if (probe_ent == NULL) { 1353 if (probe_ent == NULL)
1399 rc = -ENOMEM; 1354 return -ENOMEM;
1400 goto err_out_regions;
1401 }
1402 1355
1403 memset(probe_ent, 0, sizeof(*probe_ent));
1404 probe_ent->dev = pci_dev_to_dev(pdev); 1356 probe_ent->dev = pci_dev_to_dev(pdev);
1405 INIT_LIST_HEAD(&probe_ent->node); 1357 INIT_LIST_HEAD(&probe_ent->node);
1406 1358
1407 mmio_base = pci_iomap(pdev, 3, 0); 1359 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1408 if (mmio_base == NULL) { 1360 if (!hpriv)
1409 rc = -ENOMEM; 1361 return -ENOMEM;
1410 goto err_out_free_ent;
1411 }
1412 base = (unsigned long) mmio_base;
1413
1414 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1415 if (!hpriv) {
1416 rc = -ENOMEM;
1417 goto err_out_iounmap;
1418 }
1419 memset(hpriv, 0, sizeof(*hpriv));
1420
1421 dimm_mmio = pci_iomap(pdev, 4, 0);
1422 if (!dimm_mmio) {
1423 kfree(hpriv);
1424 rc = -ENOMEM;
1425 goto err_out_iounmap;
1426 }
1427
1428 hpriv->dimm_mmio = dimm_mmio;
1429 1362
1430 probe_ent->sht = pdc_port_info[board_idx].sht; 1363 probe_ent->sht = pdc_port_info[board_idx].sht;
1431 probe_ent->port_flags = pdc_port_info[board_idx].flags; 1364 probe_ent->port_flags = pdc_port_info[board_idx].flags;
@@ -1436,10 +1369,10 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1436 1369
1437 probe_ent->irq = pdev->irq; 1370 probe_ent->irq = pdev->irq;
1438 probe_ent->irq_flags = IRQF_SHARED; 1371 probe_ent->irq_flags = IRQF_SHARED;
1439 probe_ent->mmio_base = mmio_base; 1372 probe_ent->iomap = pcim_iomap_table(pdev);
1440 1373
1441 probe_ent->private_data = hpriv; 1374 probe_ent->private_data = hpriv;
1442 base += PDC_CHIP0_OFS; 1375 base = probe_ent->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1443 1376
1444 probe_ent->n_ports = 4; 1377 probe_ent->n_ports = 4;
1445 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200); 1378 pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
@@ -1451,31 +1384,15 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
1451 1384
1452 /* initialize adapter */ 1385 /* initialize adapter */
1453 /* initialize local dimm */ 1386 /* initialize local dimm */
1454 if (pdc20621_dimm_init(probe_ent)) { 1387 if (pdc20621_dimm_init(probe_ent))
1455 rc = -ENOMEM; 1388 return -ENOMEM;
1456 goto err_out_iounmap_dimm;
1457 }
1458 pdc_20621_init(probe_ent); 1389 pdc_20621_init(probe_ent);
1459 1390
1460 /* FIXME: check ata_device_add return value */ 1391 if (!ata_device_add(probe_ent))
1461 ata_device_add(probe_ent); 1392 return -ENODEV;
1462 kfree(probe_ent);
1463 1393
1394 devm_kfree(&pdev->dev, probe_ent);
1464 return 0; 1395 return 0;
1465
1466err_out_iounmap_dimm: /* only get to this label if 20621 */
1467 kfree(hpriv);
1468 pci_iounmap(pdev, dimm_mmio);
1469err_out_iounmap:
1470 pci_iounmap(pdev, mmio_base);
1471err_out_free_ent:
1472 kfree(probe_ent);
1473err_out_regions:
1474 pci_release_regions(pdev);
1475err_out:
1476 if (!pci_dev_busy)
1477 pci_disable_device(pdev);
1478 return rc;
1479} 1396}
1480 1397
1481 1398
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index a43aec62d505..80131eec68f4 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -108,7 +108,7 @@ static const struct ata_port_operations uli_ops = {
108 .bmdma_status = ata_bmdma_status, 108 .bmdma_status = ata_bmdma_status,
109 .qc_prep = ata_qc_prep, 109 .qc_prep = ata_qc_prep,
110 .qc_issue = ata_qc_issue_prot, 110 .qc_issue = ata_qc_issue_prot,
111 .data_xfer = ata_pio_data_xfer, 111 .data_xfer = ata_data_xfer,
112 112
113 .freeze = ata_bmdma_freeze, 113 .freeze = ata_bmdma_freeze,
114 .thaw = ata_bmdma_thaw, 114 .thaw = ata_bmdma_thaw,
@@ -117,13 +117,13 @@ static const struct ata_port_operations uli_ops = {
117 117
118 .irq_handler = ata_interrupt, 118 .irq_handler = ata_interrupt,
119 .irq_clear = ata_bmdma_irq_clear, 119 .irq_clear = ata_bmdma_irq_clear,
120 .irq_on = ata_irq_on,
121 .irq_ack = ata_irq_ack,
120 122
121 .scr_read = uli_scr_read, 123 .scr_read = uli_scr_read,
122 .scr_write = uli_scr_write, 124 .scr_write = uli_scr_write,
123 125
124 .port_start = ata_port_start, 126 .port_start = ata_port_start,
125 .port_stop = ata_port_stop,
126 .host_stop = ata_host_stop,
127}; 127};
128 128
129static struct ata_port_info uli_port_info = { 129static struct ata_port_info uli_port_info = {
@@ -189,62 +189,60 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
189 struct ata_port_info *ppi[2]; 189 struct ata_port_info *ppi[2];
190 int rc; 190 int rc;
191 unsigned int board_idx = (unsigned int) ent->driver_data; 191 unsigned int board_idx = (unsigned int) ent->driver_data;
192 int pci_dev_busy = 0;
193 struct uli_priv *hpriv; 192 struct uli_priv *hpriv;
193 void __iomem * const *iomap;
194 194
195 if (!printed_version++) 195 if (!printed_version++)
196 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 196 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
197 197
198 rc = pci_enable_device(pdev); 198 rc = pcim_enable_device(pdev);
199 if (rc) 199 if (rc)
200 return rc; 200 return rc;
201 201
202 rc = pci_request_regions(pdev, DRV_NAME); 202 rc = pci_request_regions(pdev, DRV_NAME);
203 if (rc) { 203 if (rc) {
204 pci_dev_busy = 1; 204 pcim_pin_device(pdev);
205 goto err_out; 205 return rc;
206 } 206 }
207 207
208 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 208 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
209 if (rc) 209 if (rc)
210 goto err_out_regions; 210 return rc;
211 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 211 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
212 if (rc) 212 if (rc)
213 goto err_out_regions; 213 return rc;
214 214
215 ppi[0] = ppi[1] = &uli_port_info; 215 ppi[0] = ppi[1] = &uli_port_info;
216 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 216 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
217 if (!probe_ent) { 217 if (!probe_ent)
218 rc = -ENOMEM; 218 return -ENOMEM;
219 goto err_out_regions;
220 }
221 219
222 hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); 220 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
223 if (!hpriv) { 221 if (!hpriv)
224 rc = -ENOMEM; 222 return -ENOMEM;
225 goto err_out_probe_ent;
226 }
227 223
228 probe_ent->private_data = hpriv; 224 probe_ent->private_data = hpriv;
229 225
226 iomap = pcim_iomap_table(pdev);
227
230 switch (board_idx) { 228 switch (board_idx) {
231 case uli_5287: 229 case uli_5287:
232 hpriv->scr_cfg_addr[0] = ULI5287_BASE; 230 hpriv->scr_cfg_addr[0] = ULI5287_BASE;
233 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS; 231 hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
234 probe_ent->n_ports = 4; 232 probe_ent->n_ports = 4;
235 233
236 probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8; 234 probe_ent->port[2].cmd_addr = iomap[0] + 8;
237 probe_ent->port[2].altstatus_addr = 235 probe_ent->port[2].altstatus_addr =
238 probe_ent->port[2].ctl_addr = 236 probe_ent->port[2].ctl_addr = (void __iomem *)
239 (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4; 237 ((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
240 probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16; 238 probe_ent->port[2].bmdma_addr = iomap[4] + 16;
241 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4; 239 hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
242 240
243 probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8; 241 probe_ent->port[3].cmd_addr = iomap[2] + 8;
244 probe_ent->port[3].altstatus_addr = 242 probe_ent->port[3].altstatus_addr =
245 probe_ent->port[3].ctl_addr = 243 probe_ent->port[3].ctl_addr = (void __iomem *)
246 (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4; 244 ((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
247 probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24; 245 probe_ent->port[3].bmdma_addr = iomap[4] + 24;
248 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5; 246 hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
249 247
250 ata_std_ports(&probe_ent->port[2]); 248 ata_std_ports(&probe_ent->port[2]);
@@ -269,21 +267,11 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
269 pci_set_master(pdev); 267 pci_set_master(pdev);
270 pci_intx(pdev, 1); 268 pci_intx(pdev, 1);
271 269
272 /* FIXME: check ata_device_add return value */ 270 if (!ata_device_add(probe_ent))
273 ata_device_add(probe_ent); 271 return -ENODEV;
274 kfree(probe_ent);
275 272
273 devm_kfree(&pdev->dev, probe_ent);
276 return 0; 274 return 0;
277
278err_out_probe_ent:
279 kfree(probe_ent);
280err_out_regions:
281 pci_release_regions(pdev);
282err_out:
283 if (!pci_dev_busy)
284 pci_disable_device(pdev);
285 return rc;
286
287} 275}
288 276
289static int __init uli_init(void) 277static int __init uli_init(void)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index d3d5c0d57032..baca6d79bb0b 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -44,7 +44,6 @@
44#include <linux/device.h> 44#include <linux/device.h>
45#include <scsi/scsi_host.h> 45#include <scsi/scsi_host.h>
46#include <linux/libata.h> 46#include <linux/libata.h>
47#include <asm/io.h>
48 47
49#define DRV_NAME "sata_via" 48#define DRV_NAME "sata_via"
50#define DRV_VERSION "2.0" 49#define DRV_VERSION "2.0"
@@ -59,11 +58,14 @@ enum {
59 SATA_INT_GATE = 0x41, /* SATA interrupt gating */ 58 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
60 SATA_NATIVE_MODE = 0x42, /* Native mode enable */ 59 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
61 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */ 60 SATA_PATA_SHARING = 0x49, /* PATA/SATA sharing func ctrl */
62 61 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
62 PATA_PIO_TIMING = 0xAB, /* PATA timing register */
63
63 PORT0 = (1 << 1), 64 PORT0 = (1 << 1),
64 PORT1 = (1 << 0), 65 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1, 66 ALL_PORTS = PORT0 | PORT1,
66 N_PORTS = 2, 67 PATA_PORT = 2, /* PATA is port 2 */
68 N_PORTS = 3,
67 69
68 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), 70 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69 71
@@ -76,6 +78,11 @@ static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg);
76static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 78static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void svia_noop_freeze(struct ata_port *ap); 79static void svia_noop_freeze(struct ata_port *ap);
78static void vt6420_error_handler(struct ata_port *ap); 80static void vt6420_error_handler(struct ata_port *ap);
81static void vt6421_sata_error_handler(struct ata_port *ap);
82static void vt6421_pata_error_handler(struct ata_port *ap);
83static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
84static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
85static int vt6421_port_start(struct ata_port *ap);
79 86
80static const struct pci_device_id svia_pci_tbl[] = { 87static const struct pci_device_id svia_pci_tbl[] = {
81 { PCI_VDEVICE(VIA, 0x5337), vt6420 }, 88 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
@@ -127,7 +134,7 @@ static const struct ata_port_operations vt6420_sata_ops = {
127 134
128 .qc_prep = ata_qc_prep, 135 .qc_prep = ata_qc_prep,
129 .qc_issue = ata_qc_issue_prot, 136 .qc_issue = ata_qc_issue_prot,
130 .data_xfer = ata_pio_data_xfer, 137 .data_xfer = ata_data_xfer,
131 138
132 .freeze = svia_noop_freeze, 139 .freeze = svia_noop_freeze,
133 .thaw = ata_bmdma_thaw, 140 .thaw = ata_bmdma_thaw,
@@ -136,15 +143,49 @@ static const struct ata_port_operations vt6420_sata_ops = {
136 143
137 .irq_handler = ata_interrupt, 144 .irq_handler = ata_interrupt,
138 .irq_clear = ata_bmdma_irq_clear, 145 .irq_clear = ata_bmdma_irq_clear,
146 .irq_on = ata_irq_on,
147 .irq_ack = ata_irq_ack,
139 148
140 .port_start = ata_port_start, 149 .port_start = ata_port_start,
141 .port_stop = ata_port_stop,
142 .host_stop = ata_host_stop,
143}; 150};
144 151
145static const struct ata_port_operations vt6421_sata_ops = { 152static const struct ata_port_operations vt6421_pata_ops = {
146 .port_disable = ata_port_disable, 153 .port_disable = ata_port_disable,
154
155 .set_piomode = vt6421_set_pio_mode,
156 .set_dmamode = vt6421_set_dma_mode,
157
158 .tf_load = ata_tf_load,
159 .tf_read = ata_tf_read,
160 .check_status = ata_check_status,
161 .exec_command = ata_exec_command,
162 .dev_select = ata_std_dev_select,
163
164 .bmdma_setup = ata_bmdma_setup,
165 .bmdma_start = ata_bmdma_start,
166 .bmdma_stop = ata_bmdma_stop,
167 .bmdma_status = ata_bmdma_status,
168
169 .qc_prep = ata_qc_prep,
170 .qc_issue = ata_qc_issue_prot,
171 .data_xfer = ata_data_xfer,
172
173 .freeze = ata_bmdma_freeze,
174 .thaw = ata_bmdma_thaw,
175 .error_handler = vt6421_pata_error_handler,
176 .post_internal_cmd = ata_bmdma_post_internal_cmd,
147 177
178 .irq_handler = ata_interrupt,
179 .irq_clear = ata_bmdma_irq_clear,
180 .irq_on = ata_irq_on,
181 .irq_ack = ata_irq_ack,
182
183 .port_start = vt6421_port_start,
184};
185
186static const struct ata_port_operations vt6421_sata_ops = {
187 .port_disable = ata_port_disable,
188
148 .tf_load = ata_tf_load, 189 .tf_load = ata_tf_load,
149 .tf_read = ata_tf_read, 190 .tf_read = ata_tf_read,
150 .check_status = ata_check_status, 191 .check_status = ata_check_status,
@@ -158,22 +199,22 @@ static const struct ata_port_operations vt6421_sata_ops = {
158 199
159 .qc_prep = ata_qc_prep, 200 .qc_prep = ata_qc_prep,
160 .qc_issue = ata_qc_issue_prot, 201 .qc_issue = ata_qc_issue_prot,
161 .data_xfer = ata_pio_data_xfer, 202 .data_xfer = ata_data_xfer,
162 203
163 .freeze = ata_bmdma_freeze, 204 .freeze = ata_bmdma_freeze,
164 .thaw = ata_bmdma_thaw, 205 .thaw = ata_bmdma_thaw,
165 .error_handler = ata_bmdma_error_handler, 206 .error_handler = vt6421_sata_error_handler,
166 .post_internal_cmd = ata_bmdma_post_internal_cmd, 207 .post_internal_cmd = ata_bmdma_post_internal_cmd,
167 208
168 .irq_handler = ata_interrupt, 209 .irq_handler = ata_interrupt,
169 .irq_clear = ata_bmdma_irq_clear, 210 .irq_clear = ata_bmdma_irq_clear,
211 .irq_on = ata_irq_on,
212 .irq_ack = ata_irq_ack,
170 213
171 .scr_read = svia_scr_read, 214 .scr_read = svia_scr_read,
172 .scr_write = svia_scr_write, 215 .scr_write = svia_scr_write,
173 216
174 .port_start = ata_port_start, 217 .port_start = vt6421_port_start,
175 .port_stop = ata_port_stop,
176 .host_stop = ata_host_stop,
177}; 218};
178 219
179static struct ata_port_info vt6420_port_info = { 220static struct ata_port_info vt6420_port_info = {
@@ -195,14 +236,14 @@ static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
195{ 236{
196 if (sc_reg > SCR_CONTROL) 237 if (sc_reg > SCR_CONTROL)
197 return 0xffffffffU; 238 return 0xffffffffU;
198 return inl(ap->ioaddr.scr_addr + (4 * sc_reg)); 239 return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
199} 240}
200 241
201static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 242static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
202{ 243{
203 if (sc_reg > SCR_CONTROL) 244 if (sc_reg > SCR_CONTROL)
204 return; 245 return;
205 outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); 246 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
206} 247}
207 248
208static void svia_noop_freeze(struct ata_port *ap) 249static void svia_noop_freeze(struct ata_port *ap)
@@ -289,6 +330,61 @@ static void vt6420_error_handler(struct ata_port *ap)
289 NULL, ata_std_postreset); 330 NULL, ata_std_postreset);
290} 331}
291 332
333static int vt6421_pata_prereset(struct ata_port *ap)
334{
335 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
336 u8 tmp;
337
338 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
339 if (tmp & 0x10)
340 ap->cbl = ATA_CBL_PATA40;
341 else
342 ap->cbl = ATA_CBL_PATA80;
343 return 0;
344}
345
346static void vt6421_pata_error_handler(struct ata_port *ap)
347{
348 return ata_bmdma_drive_eh(ap, vt6421_pata_prereset, ata_std_softreset,
349 NULL, ata_std_postreset);
350}
351
352static int vt6421_sata_prereset(struct ata_port *ap)
353{
354 ap->cbl = ATA_CBL_SATA;
355 return 0;
356}
357
358static void vt6421_sata_error_handler(struct ata_port *ap)
359{
360 return ata_bmdma_drive_eh(ap, vt6421_sata_prereset, ata_std_softreset,
361 NULL, ata_std_postreset);
362}
363
364static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
365{
366 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
367 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
368 pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]);
369}
370
371static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
372{
373 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
374 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
375 pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->pio_mode - XFER_UDMA_0]);
376}
377
378static int vt6421_port_start(struct ata_port *ap)
379{
380 if (ap->port_no == PATA_PORT) {
381 ap->ops = &vt6421_pata_ops;
382 ap->mwdma_mask = 0;
383 ap->flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST;
384 }
385 return ata_port_start(ap);
386}
387
292static const unsigned int svia_bar_sizes[] = { 388static const unsigned int svia_bar_sizes[] = {
293 8, 4, 8, 4, 16, 256 389 8, 4, 8, 4, 16, 256
294}; 390};
@@ -297,31 +393,28 @@ static const unsigned int vt6421_bar_sizes[] = {
297 16, 16, 16, 16, 32, 128 393 16, 16, 16, 16, 32, 128
298}; 394};
299 395
300static unsigned long svia_scr_addr(unsigned long addr, unsigned int port) 396static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port)
301{ 397{
302 return addr + (port * 128); 398 return addr + (port * 128);
303} 399}
304 400
305static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port) 401static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port)
306{ 402{
307 return addr + (port * 64); 403 return addr + (port * 64);
308} 404}
309 405
310static void vt6421_init_addrs(struct ata_probe_ent *probe_ent, 406static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
311 struct pci_dev *pdev, 407 void __iomem * const *iomap, unsigned int port)
312 unsigned int port)
313{ 408{
314 unsigned long reg_addr = pci_resource_start(pdev, port); 409 void __iomem *reg_addr = iomap[port];
315 unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8); 410 void __iomem *bmdma_addr = iomap[4] + (port * 8);
316 unsigned long scr_addr;
317 411
318 probe_ent->port[port].cmd_addr = reg_addr; 412 probe_ent->port[port].cmd_addr = reg_addr;
319 probe_ent->port[port].altstatus_addr = 413 probe_ent->port[port].altstatus_addr =
320 probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS; 414 probe_ent->port[port].ctl_addr = (void __iomem *)
415 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
321 probe_ent->port[port].bmdma_addr = bmdma_addr; 416 probe_ent->port[port].bmdma_addr = bmdma_addr;
322 417 probe_ent->port[port].scr_addr = vt6421_scr_addr(iomap[5], port);
323 scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
324 probe_ent->port[port].scr_addr = scr_addr;
325 418
326 ata_std_ports(&probe_ent->port[port]); 419 ata_std_ports(&probe_ent->port[port]);
327} 420}
@@ -330,16 +423,16 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
330{ 423{
331 struct ata_probe_ent *probe_ent; 424 struct ata_probe_ent *probe_ent;
332 struct ata_port_info *ppi[2]; 425 struct ata_port_info *ppi[2];
333 426 void __iomem * const *iomap;
427
334 ppi[0] = ppi[1] = &vt6420_port_info; 428 ppi[0] = ppi[1] = &vt6420_port_info;
335 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 429 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
336 if (!probe_ent) 430 if (!probe_ent)
337 return NULL; 431 return NULL;
338 432
339 probe_ent->port[0].scr_addr = 433 iomap = pcim_iomap_table(pdev);
340 svia_scr_addr(pci_resource_start(pdev, 5), 0); 434 probe_ent->port[0].scr_addr = svia_scr_addr(iomap[5], 0);
341 probe_ent->port[1].scr_addr = 435 probe_ent->port[1].scr_addr = svia_scr_addr(iomap[5], 1);
342 svia_scr_addr(pci_resource_start(pdev, 5), 1);
343 436
344 return probe_ent; 437 return probe_ent;
345} 438}
@@ -349,7 +442,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
349 struct ata_probe_ent *probe_ent; 442 struct ata_probe_ent *probe_ent;
350 unsigned int i; 443 unsigned int i;
351 444
352 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 445 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
353 if (!probe_ent) 446 if (!probe_ent)
354 return NULL; 447 return NULL;
355 448
@@ -368,7 +461,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
368 probe_ent->udma_mask = 0x7f; 461 probe_ent->udma_mask = 0x7f;
369 462
370 for (i = 0; i < N_PORTS; i++) 463 for (i = 0; i < N_PORTS; i++)
371 vt6421_init_addrs(probe_ent, pdev, i); 464 vt6421_init_addrs(probe_ent, pcim_iomap_table(pdev), i);
372 465
373 return probe_ent; 466 return probe_ent;
374} 467}
@@ -420,20 +513,19 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
420 struct ata_probe_ent *probe_ent; 513 struct ata_probe_ent *probe_ent;
421 int board_id = (int) ent->driver_data; 514 int board_id = (int) ent->driver_data;
422 const int *bar_sizes; 515 const int *bar_sizes;
423 int pci_dev_busy = 0;
424 u8 tmp8; 516 u8 tmp8;
425 517
426 if (!printed_version++) 518 if (!printed_version++)
427 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 519 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
428 520
429 rc = pci_enable_device(pdev); 521 rc = pcim_enable_device(pdev);
430 if (rc) 522 if (rc)
431 return rc; 523 return rc;
432 524
433 rc = pci_request_regions(pdev, DRV_NAME); 525 rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME);
434 if (rc) { 526 if (rc) {
435 pci_dev_busy = 1; 527 pcim_pin_device(pdev);
436 goto err_out; 528 return rc;
437 } 529 }
438 530
439 if (board_id == vt6420) { 531 if (board_id == vt6420) {
@@ -442,8 +534,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
442 dev_printk(KERN_ERR, &pdev->dev, 534 dev_printk(KERN_ERR, &pdev->dev,
443 "SATA master/slave not supported (0x%x)\n", 535 "SATA master/slave not supported (0x%x)\n",
444 (int) tmp8); 536 (int) tmp8);
445 rc = -EIO; 537 return -EIO;
446 goto err_out_regions;
447 } 538 }
448 539
449 bar_sizes = &svia_bar_sizes[0]; 540 bar_sizes = &svia_bar_sizes[0];
@@ -459,16 +550,15 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
459 i, 550 i,
460 (unsigned long long)pci_resource_start(pdev, i), 551 (unsigned long long)pci_resource_start(pdev, i),
461 (unsigned long long)pci_resource_len(pdev, i)); 552 (unsigned long long)pci_resource_len(pdev, i));
462 rc = -ENODEV; 553 return -ENODEV;
463 goto err_out_regions;
464 } 554 }
465 555
466 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 556 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
467 if (rc) 557 if (rc)
468 goto err_out_regions; 558 return rc;
469 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 559 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
470 if (rc) 560 if (rc)
471 goto err_out_regions; 561 return rc;
472 562
473 if (board_id == vt6420) 563 if (board_id == vt6420)
474 probe_ent = vt6420_init_probe_ent(pdev); 564 probe_ent = vt6420_init_probe_ent(pdev);
@@ -477,26 +567,18 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
477 567
478 if (!probe_ent) { 568 if (!probe_ent) {
479 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n"); 569 dev_printk(KERN_ERR, &pdev->dev, "out of memory\n");
480 rc = -ENOMEM; 570 return -ENOMEM;
481 goto err_out_regions;
482 } 571 }
483 572
484 svia_configure(pdev); 573 svia_configure(pdev);
485 574
486 pci_set_master(pdev); 575 pci_set_master(pdev);
487 576
488 /* FIXME: check ata_device_add return value */ 577 if (!ata_device_add(probe_ent))
489 ata_device_add(probe_ent); 578 return -ENODEV;
490 kfree(probe_ent);
491 579
580 devm_kfree(&pdev->dev, probe_ent);
492 return 0; 581 return 0;
493
494err_out_regions:
495 pci_release_regions(pdev);
496err_out:
497 if (!pci_dev_busy)
498 pci_disable_device(pdev);
499 return rc;
500} 582}
501 583
502static int __init svia_init(void) 584static int __init svia_init(void)
@@ -511,4 +593,3 @@ static void __exit svia_exit(void)
511 593
512module_init(svia_init); 594module_init(svia_init);
513module_exit(svia_exit); 595module_exit(svia_exit);
514
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 0fa1b89f76d5..3d9daf231115 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -50,6 +50,8 @@
50#define DRV_VERSION "2.0" 50#define DRV_VERSION "2.0"
51 51
52enum { 52enum {
53 VSC_MMIO_BAR = 0,
54
53 /* Interrupt register offsets (from chip base address) */ 55 /* Interrupt register offsets (from chip base address) */
54 VSC_SATA_INT_STAT_OFFSET = 0x00, 56 VSC_SATA_INT_STAT_OFFSET = 0x00,
55 VSC_SATA_INT_MASK_OFFSET = 0x04, 57 VSC_SATA_INT_MASK_OFFSET = 0x04,
@@ -96,7 +98,6 @@ enum {
96 VSC_SATA_INT_PHY_CHANGE), 98 VSC_SATA_INT_PHY_CHANGE),
97}; 99};
98 100
99
100#define is_vsc_sata_int_err(port_idx, int_status) \ 101#define is_vsc_sata_int_err(port_idx, int_status) \
101 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx))) 102 (int_status & (VSC_SATA_INT_ERROR << (8 * port_idx)))
102 103
@@ -105,7 +106,7 @@ static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
105{ 106{
106 if (sc_reg > SCR_CONTROL) 107 if (sc_reg > SCR_CONTROL)
107 return 0xffffffffU; 108 return 0xffffffffU;
108 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 109 return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
109} 110}
110 111
111 112
@@ -114,7 +115,7 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
114{ 115{
115 if (sc_reg > SCR_CONTROL) 116 if (sc_reg > SCR_CONTROL)
116 return; 117 return;
117 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); 118 writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
118} 119}
119 120
120 121
@@ -123,7 +124,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
123 void __iomem *mask_addr; 124 void __iomem *mask_addr;
124 u8 mask; 125 u8 mask;
125 126
126 mask_addr = ap->host->mmio_base + 127 mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
127 VSC_SATA_INT_MASK_OFFSET + ap->port_no; 128 VSC_SATA_INT_MASK_OFFSET + ap->port_no;
128 mask = readb(mask_addr); 129 mask = readb(mask_addr);
129 if (ctl & ATA_NIEN) 130 if (ctl & ATA_NIEN)
@@ -150,25 +151,25 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
150 } 151 }
151 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 152 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
152 writew(tf->feature | (((u16)tf->hob_feature) << 8), 153 writew(tf->feature | (((u16)tf->hob_feature) << 8),
153 (void __iomem *) ioaddr->feature_addr); 154 ioaddr->feature_addr);
154 writew(tf->nsect | (((u16)tf->hob_nsect) << 8), 155 writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
155 (void __iomem *) ioaddr->nsect_addr); 156 ioaddr->nsect_addr);
156 writew(tf->lbal | (((u16)tf->hob_lbal) << 8), 157 writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
157 (void __iomem *) ioaddr->lbal_addr); 158 ioaddr->lbal_addr);
158 writew(tf->lbam | (((u16)tf->hob_lbam) << 8), 159 writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
159 (void __iomem *) ioaddr->lbam_addr); 160 ioaddr->lbam_addr);
160 writew(tf->lbah | (((u16)tf->hob_lbah) << 8), 161 writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
161 (void __iomem *) ioaddr->lbah_addr); 162 ioaddr->lbah_addr);
162 } else if (is_addr) { 163 } else if (is_addr) {
163 writew(tf->feature, (void __iomem *) ioaddr->feature_addr); 164 writew(tf->feature, ioaddr->feature_addr);
164 writew(tf->nsect, (void __iomem *) ioaddr->nsect_addr); 165 writew(tf->nsect, ioaddr->nsect_addr);
165 writew(tf->lbal, (void __iomem *) ioaddr->lbal_addr); 166 writew(tf->lbal, ioaddr->lbal_addr);
166 writew(tf->lbam, (void __iomem *) ioaddr->lbam_addr); 167 writew(tf->lbam, ioaddr->lbam_addr);
167 writew(tf->lbah, (void __iomem *) ioaddr->lbah_addr); 168 writew(tf->lbah, ioaddr->lbah_addr);
168 } 169 }
169 170
170 if (tf->flags & ATA_TFLAG_DEVICE) 171 if (tf->flags & ATA_TFLAG_DEVICE)
171 writeb(tf->device, (void __iomem *) ioaddr->device_addr); 172 writeb(tf->device, ioaddr->device_addr);
172 173
173 ata_wait_idle(ap); 174 ata_wait_idle(ap);
174} 175}
@@ -180,12 +181,12 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
180 u16 nsect, lbal, lbam, lbah, feature; 181 u16 nsect, lbal, lbam, lbah, feature;
181 182
182 tf->command = ata_check_status(ap); 183 tf->command = ata_check_status(ap);
183 tf->device = readw((void __iomem *) ioaddr->device_addr); 184 tf->device = readw(ioaddr->device_addr);
184 feature = readw((void __iomem *) ioaddr->error_addr); 185 feature = readw(ioaddr->error_addr);
185 nsect = readw((void __iomem *) ioaddr->nsect_addr); 186 nsect = readw(ioaddr->nsect_addr);
186 lbal = readw((void __iomem *) ioaddr->lbal_addr); 187 lbal = readw(ioaddr->lbal_addr);
187 lbam = readw((void __iomem *) ioaddr->lbam_addr); 188 lbam = readw(ioaddr->lbam_addr);
188 lbah = readw((void __iomem *) ioaddr->lbah_addr); 189 lbah = readw(ioaddr->lbah_addr);
189 190
190 tf->feature = feature; 191 tf->feature = feature;
191 tf->nsect = nsect; 192 tf->nsect = nsect;
@@ -217,7 +218,8 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance)
217 218
218 spin_lock(&host->lock); 219 spin_lock(&host->lock);
219 220
220 int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET); 221 int_status = readl(host->iomap[VSC_MMIO_BAR] +
222 VSC_SATA_INT_STAT_OFFSET);
221 223
222 for (i = 0; i < host->n_ports; i++) { 224 for (i = 0; i < host->n_ports; i++) {
223 if (int_status & ((u32) 0xFF << (8 * i))) { 225 if (int_status & ((u32) 0xFF << (8 * i))) {
@@ -301,21 +303,22 @@ static const struct ata_port_operations vsc_sata_ops = {
301 .bmdma_status = ata_bmdma_status, 303 .bmdma_status = ata_bmdma_status,
302 .qc_prep = ata_qc_prep, 304 .qc_prep = ata_qc_prep,
303 .qc_issue = ata_qc_issue_prot, 305 .qc_issue = ata_qc_issue_prot,
304 .data_xfer = ata_mmio_data_xfer, 306 .data_xfer = ata_data_xfer,
305 .freeze = ata_bmdma_freeze, 307 .freeze = ata_bmdma_freeze,
306 .thaw = ata_bmdma_thaw, 308 .thaw = ata_bmdma_thaw,
307 .error_handler = ata_bmdma_error_handler, 309 .error_handler = ata_bmdma_error_handler,
308 .post_internal_cmd = ata_bmdma_post_internal_cmd, 310 .post_internal_cmd = ata_bmdma_post_internal_cmd,
309 .irq_handler = vsc_sata_interrupt, 311 .irq_handler = vsc_sata_interrupt,
310 .irq_clear = ata_bmdma_irq_clear, 312 .irq_clear = ata_bmdma_irq_clear,
313 .irq_on = ata_irq_on,
314 .irq_ack = ata_irq_ack,
311 .scr_read = vsc_sata_scr_read, 315 .scr_read = vsc_sata_scr_read,
312 .scr_write = vsc_sata_scr_write, 316 .scr_write = vsc_sata_scr_write,
313 .port_start = ata_port_start, 317 .port_start = ata_port_start,
314 .port_stop = ata_port_stop,
315 .host_stop = ata_pci_host_stop,
316}; 318};
317 319
318static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) 320static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
321 void __iomem *base)
319{ 322{
320 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET; 323 port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
321 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET; 324 port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
@@ -332,80 +335,70 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned lon
332 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET; 335 port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
333 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET; 336 port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
334 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET; 337 port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
335 writel(0, (void __iomem *) base + VSC_SATA_UP_DESCRIPTOR_OFFSET); 338 writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
336 writel(0, (void __iomem *) base + VSC_SATA_UP_DATA_BUFFER_OFFSET); 339 writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
337} 340}
338 341
339 342
340static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 343static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
341{ 344{
342 static int printed_version; 345 static int printed_version;
343 struct ata_probe_ent *probe_ent = NULL; 346 struct ata_probe_ent *probe_ent;
344 unsigned long base;
345 int pci_dev_busy = 0;
346 void __iomem *mmio_base; 347 void __iomem *mmio_base;
347 int rc; 348 int rc;
348 349
349 if (!printed_version++) 350 if (!printed_version++)
350 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 351 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
351 352
352 rc = pci_enable_device(pdev); 353 rc = pcim_enable_device(pdev);
353 if (rc) 354 if (rc)
354 return rc; 355 return rc;
355 356
356 /* 357 /*
357 * Check if we have needed resource mapped. 358 * Check if we have needed resource mapped.
358 */ 359 */
359 if (pci_resource_len(pdev, 0) == 0) { 360 if (pci_resource_len(pdev, 0) == 0)
360 rc = -ENODEV; 361 return -ENODEV;
361 goto err_out;
362 }
363 362
364 rc = pci_request_regions(pdev, DRV_NAME); 363 rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
365 if (rc) { 364 if (rc == -EBUSY)
366 pci_dev_busy = 1; 365 pcim_pin_device(pdev);
367 goto err_out; 366 if (rc)
368 } 367 return rc;
369 368
370 /* 369 /*
371 * Use 32 bit DMA mask, because 64 bit address support is poor. 370 * Use 32 bit DMA mask, because 64 bit address support is poor.
372 */ 371 */
373 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 372 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
374 if (rc) 373 if (rc)
375 goto err_out_regions; 374 return rc;
376 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 375 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
377 if (rc) 376 if (rc)
378 goto err_out_regions; 377 return rc;
379 378
380 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); 379 probe_ent = devm_kzalloc(&pdev->dev, sizeof(*probe_ent), GFP_KERNEL);
381 if (probe_ent == NULL) { 380 if (probe_ent == NULL)
382 rc = -ENOMEM; 381 return -ENOMEM;
383 goto err_out_regions;
384 }
385 memset(probe_ent, 0, sizeof(*probe_ent));
386 probe_ent->dev = pci_dev_to_dev(pdev); 382 probe_ent->dev = pci_dev_to_dev(pdev);
387 INIT_LIST_HEAD(&probe_ent->node); 383 INIT_LIST_HEAD(&probe_ent->node);
388 384
389 mmio_base = pci_iomap(pdev, 0, 0);
390 if (mmio_base == NULL) {
391 rc = -ENOMEM;
392 goto err_out_free_ent;
393 }
394 base = (unsigned long) mmio_base;
395
396 /* 385 /*
397 * Due to a bug in the chip, the default cache line size can't be used 386 * Due to a bug in the chip, the default cache line size can't be used
398 */ 387 */
399 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); 388 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
400 389
390 if (pci_enable_msi(pdev) == 0)
391 pci_intx(pdev, 0);
392 else
393 probe_ent->irq_flags = IRQF_SHARED;
394
401 probe_ent->sht = &vsc_sata_sht; 395 probe_ent->sht = &vsc_sata_sht;
402 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 396 probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
403 ATA_FLAG_MMIO; 397 ATA_FLAG_MMIO;
404 probe_ent->port_ops = &vsc_sata_ops; 398 probe_ent->port_ops = &vsc_sata_ops;
405 probe_ent->n_ports = 4; 399 probe_ent->n_ports = 4;
406 probe_ent->irq = pdev->irq; 400 probe_ent->irq = pdev->irq;
407 probe_ent->irq_flags = IRQF_SHARED; 401 probe_ent->iomap = pcim_iomap_table(pdev);
408 probe_ent->mmio_base = mmio_base;
409 402
410 /* We don't care much about the PIO/UDMA masks, but the core won't like us 403 /* We don't care much about the PIO/UDMA masks, but the core won't like us
411 * if we don't fill these 404 * if we don't fill these
@@ -414,11 +407,13 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
414 probe_ent->mwdma_mask = 0x07; 407 probe_ent->mwdma_mask = 0x07;
415 probe_ent->udma_mask = 0x7f; 408 probe_ent->udma_mask = 0x7f;
416 409
410 mmio_base = probe_ent->iomap[VSC_MMIO_BAR];
411
417 /* We have 4 ports per PCI function */ 412 /* We have 4 ports per PCI function */
418 vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET); 413 vsc_sata_setup_port(&probe_ent->port[0], mmio_base + 1 * VSC_SATA_PORT_OFFSET);
419 vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET); 414 vsc_sata_setup_port(&probe_ent->port[1], mmio_base + 2 * VSC_SATA_PORT_OFFSET);
420 vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET); 415 vsc_sata_setup_port(&probe_ent->port[2], mmio_base + 3 * VSC_SATA_PORT_OFFSET);
421 vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET); 416 vsc_sata_setup_port(&probe_ent->port[3], mmio_base + 4 * VSC_SATA_PORT_OFFSET);
422 417
423 pci_set_master(pdev); 418 pci_set_master(pdev);
424 419
@@ -430,20 +425,11 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
430 */ 425 */
431 pci_write_config_dword(pdev, 0x98, 0); 426 pci_write_config_dword(pdev, 0x98, 0);
432 427
433 /* FIXME: check ata_device_add return value */ 428 if (!ata_device_add(probe_ent))
434 ata_device_add(probe_ent); 429 return -ENODEV;
435 kfree(probe_ent);
436 430
431 devm_kfree(&pdev->dev, probe_ent);
437 return 0; 432 return 0;
438
439err_out_free_ent:
440 kfree(probe_ent);
441err_out_regions:
442 pci_release_regions(pdev);
443err_out:
444 if (!pci_dev_busy)
445 pci_disable_device(pdev);
446 return rc;
447} 433}
448 434
449static const struct pci_device_id vsc_sata_pci_tbl[] = { 435static const struct pci_device_id vsc_sata_pci_tbl[] = {
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 1429f3a2629e..5d6312e33490 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -37,6 +37,18 @@ config DEBUG_DRIVER
37 37
38 If you are unsure about this, say N here. 38 If you are unsure about this, say N here.
39 39
40config DEBUG_DEVRES
41 bool "Managed device resources verbose debug messages"
42 depends on DEBUG_KERNEL
43 help
44 This option enables kernel parameter devres.log. If set to
45 non-zero, devres debug messages are printed. Select this if
46 you are having a problem with devres or want to debug
47 resource management for a managed device. devres.log can be
48 switched on and off from sysfs node.
49
50 If you are unsure about this, Say N here.
51
40config SYS_HYPERVISOR 52config SYS_HYPERVISOR
41 bool 53 bool
42 default n 54 default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7bbb9eeda235..e9eb7382ac3a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -3,6 +3,7 @@
3obj-y := core.o sys.o bus.o dd.o \ 3obj-y := core.o sys.o bus.o dd.o \
4 driver.o class.o platform.o \ 4 driver.o class.o platform.o \
5 cpu.o firmware.o init.o map.o dmapool.o \ 5 cpu.o firmware.o init.o map.o dmapool.o \
6 dma-mapping.o devres.o \
6 attribute_container.o transport_class.o 7 attribute_container.o transport_class.o
7obj-y += power/ 8obj-y += power/
8obj-$(CONFIG_ISA) += isa.o 9obj-$(CONFIG_ISA) += isa.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index d26644a59537..de7e1442ce60 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -44,3 +44,4 @@ struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
44 44
45extern char *make_class_name(const char *name, struct kobject *kobj); 45extern char *make_class_name(const char *name, struct kobject *kobj);
46 46
47extern void devres_release_all(struct device *dev);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8bf2ca2e56b5..96def1ddba19 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -364,7 +364,7 @@ char *make_class_name(const char *name, struct kobject *kobj)
364 364
365 class_name = kmalloc(size, GFP_KERNEL); 365 class_name = kmalloc(size, GFP_KERNEL);
366 if (!class_name) 366 if (!class_name)
367 return ERR_PTR(-ENOMEM); 367 return NULL;
368 368
369 strcpy(class_name, name); 369 strcpy(class_name, name);
370 strcat(class_name, ":"); 370 strcat(class_name, ":");
@@ -411,8 +411,11 @@ static int make_deprecated_class_device_links(struct class_device *class_dev)
411 return 0; 411 return 0;
412 412
413 class_name = make_class_name(class_dev->class->name, &class_dev->kobj); 413 class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
414 error = sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj, 414 if (class_name)
415 class_name); 415 error = sysfs_create_link(&class_dev->dev->kobj,
416 &class_dev->kobj, class_name);
417 else
418 error = -ENOMEM;
416 kfree(class_name); 419 kfree(class_name);
417 return error; 420 return error;
418} 421}
@@ -425,7 +428,8 @@ static void remove_deprecated_class_device_links(struct class_device *class_dev)
425 return; 428 return;
426 429
427 class_name = make_class_name(class_dev->class->name, &class_dev->kobj); 430 class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
428 sysfs_remove_link(&class_dev->dev->kobj, class_name); 431 if (class_name)
432 sysfs_remove_link(&class_dev->dev->kobj, class_name);
429 kfree(class_name); 433 kfree(class_name);
430} 434}
431#else 435#else
@@ -863,9 +867,12 @@ int class_device_rename(struct class_device *class_dev, char *new_name)
863 if (class_dev->dev) { 867 if (class_dev->dev) {
864 new_class_name = make_class_name(class_dev->class->name, 868 new_class_name = make_class_name(class_dev->class->name,
865 &class_dev->kobj); 869 &class_dev->kobj);
866 sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj, 870 if (new_class_name)
867 new_class_name); 871 sysfs_create_link(&class_dev->dev->kobj,
868 sysfs_remove_link(&class_dev->dev->kobj, old_class_name); 872 &class_dev->kobj, new_class_name);
873 if (old_class_name)
874 sysfs_remove_link(&class_dev->dev->kobj,
875 old_class_name);
869 } 876 }
870#endif 877#endif
871 class_device_put(class_dev); 878 class_device_put(class_dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 67b79a7592a9..a8ac34ba6107 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -95,6 +95,8 @@ static void device_release(struct kobject * kobj)
95 95
96 if (dev->release) 96 if (dev->release)
97 dev->release(dev); 97 dev->release(dev);
98 else if (dev->type && dev->type->release)
99 dev->type->release(dev);
98 else if (dev->class && dev->class->dev_release) 100 else if (dev->class && dev->class->dev_release)
99 dev->class->dev_release(dev); 101 dev->class->dev_release(dev);
100 else { 102 else {
@@ -154,25 +156,47 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp,
154 "MINOR=%u", MINOR(dev->devt)); 156 "MINOR=%u", MINOR(dev->devt));
155 } 157 }
156 158
157#ifdef CONFIG_SYSFS_DEPRECATED 159 if (dev->driver)
158 /* add bus name (same as SUBSYSTEM, deprecated) */
159 if (dev->bus)
160 add_uevent_var(envp, num_envp, &i,
161 buffer, buffer_size, &length,
162 "PHYSDEVBUS=%s", dev->bus->name);
163#endif
164
165 /* add driver name (PHYSDEV* values are deprecated)*/
166 if (dev->driver) {
167 add_uevent_var(envp, num_envp, &i, 160 add_uevent_var(envp, num_envp, &i,
168 buffer, buffer_size, &length, 161 buffer, buffer_size, &length,
169 "DRIVER=%s", dev->driver->name); 162 "DRIVER=%s", dev->driver->name);
163
170#ifdef CONFIG_SYSFS_DEPRECATED 164#ifdef CONFIG_SYSFS_DEPRECATED
165 if (dev->class) {
166 struct device *parent = dev->parent;
167
168 /* find first bus device in parent chain */
169 while (parent && !parent->bus)
170 parent = parent->parent;
171 if (parent && parent->bus) {
172 const char *path;
173
174 path = kobject_get_path(&parent->kobj, GFP_KERNEL);
175 add_uevent_var(envp, num_envp, &i,
176 buffer, buffer_size, &length,
177 "PHYSDEVPATH=%s", path);
178 kfree(path);
179
180 add_uevent_var(envp, num_envp, &i,
181 buffer, buffer_size, &length,
182 "PHYSDEVBUS=%s", parent->bus->name);
183
184 if (parent->driver)
185 add_uevent_var(envp, num_envp, &i,
186 buffer, buffer_size, &length,
187 "PHYSDEVDRIVER=%s", parent->driver->name);
188 }
189 } else if (dev->bus) {
171 add_uevent_var(envp, num_envp, &i, 190 add_uevent_var(envp, num_envp, &i,
172 buffer, buffer_size, &length, 191 buffer, buffer_size, &length,
173 "PHYSDEVDRIVER=%s", dev->driver->name); 192 "PHYSDEVBUS=%s", dev->bus->name);
174#endif 193
194 if (dev->driver)
195 add_uevent_var(envp, num_envp, &i,
196 buffer, buffer_size, &length,
197 "PHYSDEVDRIVER=%s", dev->driver->name);
175 } 198 }
199#endif
176 200
177 /* terminate, set to next free slot, shrink available space */ 201 /* terminate, set to next free slot, shrink available space */
178 envp[i] = NULL; 202 envp[i] = NULL;
@@ -184,19 +208,25 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, char **envp,
184 if (dev->bus && dev->bus->uevent) { 208 if (dev->bus && dev->bus->uevent) {
185 /* have the bus specific function add its stuff */ 209 /* have the bus specific function add its stuff */
186 retval = dev->bus->uevent(dev, envp, num_envp, buffer, buffer_size); 210 retval = dev->bus->uevent(dev, envp, num_envp, buffer, buffer_size);
187 if (retval) { 211 if (retval)
188 pr_debug ("%s - uevent() returned %d\n", 212 pr_debug ("%s: bus uevent() returned %d\n",
189 __FUNCTION__, retval); 213 __FUNCTION__, retval);
190 }
191 } 214 }
192 215
193 if (dev->class && dev->class->dev_uevent) { 216 if (dev->class && dev->class->dev_uevent) {
194 /* have the class specific function add its stuff */ 217 /* have the class specific function add its stuff */
195 retval = dev->class->dev_uevent(dev, envp, num_envp, buffer, buffer_size); 218 retval = dev->class->dev_uevent(dev, envp, num_envp, buffer, buffer_size);
196 if (retval) { 219 if (retval)
197 pr_debug("%s - dev_uevent() returned %d\n", 220 pr_debug("%s: class uevent() returned %d\n",
198 __FUNCTION__, retval); 221 __FUNCTION__, retval);
199 } 222 }
223
224 if (dev->type && dev->type->uevent) {
225 /* have the device type specific fuction add its stuff */
226 retval = dev->type->uevent(dev, envp, num_envp, buffer, buffer_size);
227 if (retval)
228 pr_debug("%s: dev_type uevent() returned %d\n",
229 __FUNCTION__, retval);
200 } 230 }
201 231
202 return retval; 232 return retval;
@@ -247,37 +277,50 @@ static void device_remove_groups(struct device *dev)
247static int device_add_attrs(struct device *dev) 277static int device_add_attrs(struct device *dev)
248{ 278{
249 struct class *class = dev->class; 279 struct class *class = dev->class;
280 struct device_type *type = dev->type;
250 int error = 0; 281 int error = 0;
251 int i; 282 int i;
252 283
253 if (!class) 284 if (class && class->dev_attrs) {
254 return 0;
255
256 if (class->dev_attrs) {
257 for (i = 0; attr_name(class->dev_attrs[i]); i++) { 285 for (i = 0; attr_name(class->dev_attrs[i]); i++) {
258 error = device_create_file(dev, &class->dev_attrs[i]); 286 error = device_create_file(dev, &class->dev_attrs[i]);
259 if (error) 287 if (error)
260 break; 288 break;
261 } 289 }
290 if (error)
291 while (--i >= 0)
292 device_remove_file(dev, &class->dev_attrs[i]);
262 } 293 }
263 if (error) 294
264 while (--i >= 0) 295 if (type && type->attrs) {
265 device_remove_file(dev, &class->dev_attrs[i]); 296 for (i = 0; attr_name(type->attrs[i]); i++) {
297 error = device_create_file(dev, &type->attrs[i]);
298 if (error)
299 break;
300 }
301 if (error)
302 while (--i >= 0)
303 device_remove_file(dev, &type->attrs[i]);
304 }
305
266 return error; 306 return error;
267} 307}
268 308
269static void device_remove_attrs(struct device *dev) 309static void device_remove_attrs(struct device *dev)
270{ 310{
271 struct class *class = dev->class; 311 struct class *class = dev->class;
312 struct device_type *type = dev->type;
272 int i; 313 int i;
273 314
274 if (!class) 315 if (class && class->dev_attrs) {
275 return;
276
277 if (class->dev_attrs) {
278 for (i = 0; attr_name(class->dev_attrs[i]); i++) 316 for (i = 0; attr_name(class->dev_attrs[i]); i++)
279 device_remove_file(dev, &class->dev_attrs[i]); 317 device_remove_file(dev, &class->dev_attrs[i]);
280 } 318 }
319
320 if (type && type->attrs) {
321 for (i = 0; attr_name(type->attrs[i]); i++)
322 device_remove_file(dev, &type->attrs[i]);
323 }
281} 324}
282 325
283 326
@@ -385,27 +428,30 @@ void device_initialize(struct device *dev)
385 INIT_LIST_HEAD(&dev->dma_pools); 428 INIT_LIST_HEAD(&dev->dma_pools);
386 INIT_LIST_HEAD(&dev->node); 429 INIT_LIST_HEAD(&dev->node);
387 init_MUTEX(&dev->sem); 430 init_MUTEX(&dev->sem);
431 spin_lock_init(&dev->devres_lock);
432 INIT_LIST_HEAD(&dev->devres_head);
388 device_init_wakeup(dev, 0); 433 device_init_wakeup(dev, 0);
389 set_dev_node(dev, -1); 434 set_dev_node(dev, -1);
390} 435}
391 436
392#ifdef CONFIG_SYSFS_DEPRECATED 437#ifdef CONFIG_SYSFS_DEPRECATED
393static int setup_parent(struct device *dev, struct device *parent) 438static struct kobject * get_device_parent(struct device *dev,
439 struct device *parent)
394{ 440{
395 /* Set the parent to the class, not the parent device */ 441 /* Set the parent to the class, not the parent device */
396 /* this keeps sysfs from having a symlink to make old udevs happy */ 442 /* this keeps sysfs from having a symlink to make old udevs happy */
397 if (dev->class) 443 if (dev->class)
398 dev->kobj.parent = &dev->class->subsys.kset.kobj; 444 return &dev->class->subsys.kset.kobj;
399 else if (parent) 445 else if (parent)
400 dev->kobj.parent = &parent->kobj; 446 return &parent->kobj;
401 447
402 return 0; 448 return NULL;
403} 449}
404#else 450#else
405static int virtual_device_parent(struct device *dev) 451static struct kobject * virtual_device_parent(struct device *dev)
406{ 452{
407 if (!dev->class) 453 if (!dev->class)
408 return -ENODEV; 454 return ERR_PTR(-ENODEV);
409 455
410 if (!dev->class->virtual_dir) { 456 if (!dev->class->virtual_dir) {
411 static struct kobject *virtual_dir = NULL; 457 static struct kobject *virtual_dir = NULL;
@@ -415,25 +461,31 @@ static int virtual_device_parent(struct device *dev)
415 dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name); 461 dev->class->virtual_dir = kobject_add_dir(virtual_dir, dev->class->name);
416 } 462 }
417 463
418 dev->kobj.parent = dev->class->virtual_dir; 464 return dev->class->virtual_dir;
419 return 0;
420} 465}
421 466
422static int setup_parent(struct device *dev, struct device *parent) 467static struct kobject * get_device_parent(struct device *dev,
468 struct device *parent)
423{ 469{
424 int error;
425
426 /* if this is a class device, and has no parent, create one */ 470 /* if this is a class device, and has no parent, create one */
427 if ((dev->class) && (parent == NULL)) { 471 if ((dev->class) && (parent == NULL)) {
428 error = virtual_device_parent(dev); 472 return virtual_device_parent(dev);
429 if (error)
430 return error;
431 } else if (parent) 473 } else if (parent)
432 dev->kobj.parent = &parent->kobj; 474 return &parent->kobj;
475 return NULL;
476}
433 477
478#endif
479static int setup_parent(struct device *dev, struct device *parent)
480{
481 struct kobject *kobj;
482 kobj = get_device_parent(dev, parent);
483 if (IS_ERR(kobj))
484 return PTR_ERR(kobj);
485 if (kobj)
486 dev->kobj.parent = kobj;
434 return 0; 487 return 0;
435} 488}
436#endif
437 489
438/** 490/**
439 * device_add - add device to device hierarchy. 491 * device_add - add device to device hierarchy.
@@ -520,9 +572,13 @@ int device_add(struct device *dev)
520 &dev->kobj, dev->bus_id); 572 &dev->kobj, dev->bus_id);
521#ifdef CONFIG_SYSFS_DEPRECATED 573#ifdef CONFIG_SYSFS_DEPRECATED
522 if (parent) { 574 if (parent) {
523 sysfs_create_link(&dev->kobj, &dev->parent->kobj, "device"); 575 sysfs_create_link(&dev->kobj, &dev->parent->kobj,
524 class_name = make_class_name(dev->class->name, &dev->kobj); 576 "device");
525 sysfs_create_link(&dev->parent->kobj, &dev->kobj, class_name); 577 class_name = make_class_name(dev->class->name,
578 &dev->kobj);
579 if (class_name)
580 sysfs_create_link(&dev->parent->kobj,
581 &dev->kobj, class_name);
526 } 582 }
527#endif 583#endif
528 } 584 }
@@ -535,7 +591,8 @@ int device_add(struct device *dev)
535 goto PMError; 591 goto PMError;
536 if ((error = bus_add_device(dev))) 592 if ((error = bus_add_device(dev)))
537 goto BusError; 593 goto BusError;
538 kobject_uevent(&dev->kobj, KOBJ_ADD); 594 if (!dev->uevent_suppress)
595 kobject_uevent(&dev->kobj, KOBJ_ADD);
539 if ((error = bus_attach_device(dev))) 596 if ((error = bus_attach_device(dev)))
540 goto AttachError; 597 goto AttachError;
541 if (parent) 598 if (parent)
@@ -665,7 +722,9 @@ void device_del(struct device * dev)
665 if (parent) { 722 if (parent) {
666 char *class_name = make_class_name(dev->class->name, 723 char *class_name = make_class_name(dev->class->name,
667 &dev->kobj); 724 &dev->kobj);
668 sysfs_remove_link(&dev->parent->kobj, class_name); 725 if (class_name)
726 sysfs_remove_link(&dev->parent->kobj,
727 class_name);
669 kfree(class_name); 728 kfree(class_name);
670 sysfs_remove_link(&dev->kobj, "device"); 729 sysfs_remove_link(&dev->kobj, "device");
671 } 730 }
@@ -968,20 +1027,25 @@ static int device_move_class_links(struct device *dev,
968 1027
969 class_name = make_class_name(dev->class->name, &dev->kobj); 1028 class_name = make_class_name(dev->class->name, &dev->kobj);
970 if (!class_name) { 1029 if (!class_name) {
971 error = PTR_ERR(class_name); 1030 error = -ENOMEM;
972 class_name = NULL;
973 goto out; 1031 goto out;
974 } 1032 }
975 if (old_parent) { 1033 if (old_parent) {
976 sysfs_remove_link(&dev->kobj, "device"); 1034 sysfs_remove_link(&dev->kobj, "device");
977 sysfs_remove_link(&old_parent->kobj, class_name); 1035 sysfs_remove_link(&old_parent->kobj, class_name);
978 } 1036 }
979 error = sysfs_create_link(&dev->kobj, &new_parent->kobj, "device"); 1037 if (new_parent) {
980 if (error) 1038 error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
981 goto out; 1039 "device");
982 error = sysfs_create_link(&new_parent->kobj, &dev->kobj, class_name); 1040 if (error)
983 if (error) 1041 goto out;
984 sysfs_remove_link(&dev->kobj, "device"); 1042 error = sysfs_create_link(&new_parent->kobj, &dev->kobj,
1043 class_name);
1044 if (error)
1045 sysfs_remove_link(&dev->kobj, "device");
1046 }
1047 else
1048 error = 0;
985out: 1049out:
986 kfree(class_name); 1050 kfree(class_name);
987 return error; 1051 return error;
@@ -993,29 +1057,28 @@ out:
993/** 1057/**
994 * device_move - moves a device to a new parent 1058 * device_move - moves a device to a new parent
995 * @dev: the pointer to the struct device to be moved 1059 * @dev: the pointer to the struct device to be moved
996 * @new_parent: the new parent of the device 1060 * @new_parent: the new parent of the device (can by NULL)
997 */ 1061 */
998int device_move(struct device *dev, struct device *new_parent) 1062int device_move(struct device *dev, struct device *new_parent)
999{ 1063{
1000 int error; 1064 int error;
1001 struct device *old_parent; 1065 struct device *old_parent;
1066 struct kobject *new_parent_kobj;
1002 1067
1003 dev = get_device(dev); 1068 dev = get_device(dev);
1004 if (!dev) 1069 if (!dev)
1005 return -EINVAL; 1070 return -EINVAL;
1006 1071
1007 if (!device_is_registered(dev)) {
1008 error = -EINVAL;
1009 goto out;
1010 }
1011 new_parent = get_device(new_parent); 1072 new_parent = get_device(new_parent);
1012 if (!new_parent) { 1073 new_parent_kobj = get_device_parent (dev, new_parent);
1013 error = -EINVAL; 1074 if (IS_ERR(new_parent_kobj)) {
1075 error = PTR_ERR(new_parent_kobj);
1076 put_device(new_parent);
1014 goto out; 1077 goto out;
1015 } 1078 }
1016 pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id, 1079 pr_debug("DEVICE: moving '%s' to '%s'\n", dev->bus_id,
1017 new_parent->bus_id); 1080 new_parent ? new_parent->bus_id : "<NULL>");
1018 error = kobject_move(&dev->kobj, &new_parent->kobj); 1081 error = kobject_move(&dev->kobj, new_parent_kobj);
1019 if (error) { 1082 if (error) {
1020 put_device(new_parent); 1083 put_device(new_parent);
1021 goto out; 1084 goto out;
@@ -1024,7 +1087,8 @@ int device_move(struct device *dev, struct device *new_parent)
1024 dev->parent = new_parent; 1087 dev->parent = new_parent;
1025 if (old_parent) 1088 if (old_parent)
1026 klist_remove(&dev->knode_parent); 1089 klist_remove(&dev->knode_parent);
1027 klist_add_tail(&dev->knode_parent, &new_parent->klist_children); 1090 if (new_parent)
1091 klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
1028 if (!dev->class) 1092 if (!dev->class)
1029 goto out_put; 1093 goto out_put;
1030 error = device_move_class_links(dev, old_parent, new_parent); 1094 error = device_move_class_links(dev, old_parent, new_parent);
@@ -1032,7 +1096,8 @@ int device_move(struct device *dev, struct device *new_parent)
1032 /* We ignore errors on cleanup since we're hosed anyway... */ 1096 /* We ignore errors on cleanup since we're hosed anyway... */
1033 device_move_class_links(dev, new_parent, old_parent); 1097 device_move_class_links(dev, new_parent, old_parent);
1034 if (!kobject_move(&dev->kobj, &old_parent->kobj)) { 1098 if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
1035 klist_remove(&dev->knode_parent); 1099 if (new_parent)
1100 klist_remove(&dev->knode_parent);
1036 if (old_parent) 1101 if (old_parent)
1037 klist_add_tail(&dev->knode_parent, 1102 klist_add_tail(&dev->knode_parent,
1038 &old_parent->klist_children); 1103 &old_parent->klist_children);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 510e7884975f..6a48824e43ff 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -86,8 +86,12 @@ static void driver_sysfs_remove(struct device *dev)
86 */ 86 */
87int device_bind_driver(struct device *dev) 87int device_bind_driver(struct device *dev)
88{ 88{
89 driver_bound(dev); 89 int ret;
90 return driver_sysfs_add(dev); 90
91 ret = driver_sysfs_add(dev);
92 if (!ret)
93 driver_bound(dev);
94 return ret;
91} 95}
92 96
93struct stupid_thread_structure { 97struct stupid_thread_structure {
@@ -108,6 +112,7 @@ static int really_probe(void *void_data)
108 atomic_inc(&probe_count); 112 atomic_inc(&probe_count);
109 pr_debug("%s: Probing driver %s with device %s\n", 113 pr_debug("%s: Probing driver %s with device %s\n",
110 drv->bus->name, drv->name, dev->bus_id); 114 drv->bus->name, drv->name, dev->bus_id);
115 WARN_ON(!list_empty(&dev->devres_head));
111 116
112 dev->driver = drv; 117 dev->driver = drv;
113 if (driver_sysfs_add(dev)) { 118 if (driver_sysfs_add(dev)) {
@@ -133,21 +138,21 @@ static int really_probe(void *void_data)
133 goto done; 138 goto done;
134 139
135probe_failed: 140probe_failed:
141 devres_release_all(dev);
136 driver_sysfs_remove(dev); 142 driver_sysfs_remove(dev);
137 dev->driver = NULL; 143 dev->driver = NULL;
138 144
139 if (ret == -ENODEV || ret == -ENXIO) { 145 if (ret != -ENODEV && ret != -ENXIO) {
140 /* Driver matched, but didn't support device
141 * or device not found.
142 * Not an error; keep going.
143 */
144 ret = 0;
145 } else {
146 /* driver matched but the probe failed */ 146 /* driver matched but the probe failed */
147 printk(KERN_WARNING 147 printk(KERN_WARNING
148 "%s: probe of %s failed with error %d\n", 148 "%s: probe of %s failed with error %d\n",
149 drv->name, dev->bus_id, ret); 149 drv->name, dev->bus_id, ret);
150 } 150 }
151 /*
152 * Ignore errors returned by ->probe so that the next driver can try
153 * its luck.
154 */
155 ret = 0;
151done: 156done:
152 kfree(data); 157 kfree(data);
153 atomic_dec(&probe_count); 158 atomic_dec(&probe_count);
@@ -324,6 +329,7 @@ static void __device_release_driver(struct device * dev)
324 dev->bus->remove(dev); 329 dev->bus->remove(dev);
325 else if (drv->remove) 330 else if (drv->remove)
326 drv->remove(dev); 331 drv->remove(dev);
332 devres_release_all(dev);
327 dev->driver = NULL; 333 dev->driver = NULL;
328 put_driver(drv); 334 put_driver(drv);
329 } 335 }
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
new file mode 100644
index 000000000000..e177c9533b6c
--- /dev/null
+++ b/drivers/base/devres.c
@@ -0,0 +1,644 @@
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/module.h>
12
13struct devres_node {
14 struct list_head entry;
15 dr_release_t release;
16#ifdef CONFIG_DEBUG_DEVRES
17 const char *name;
18 size_t size;
19#endif
20};
21
22struct devres {
23 struct devres_node node;
24 /* -- 3 pointers */
25 unsigned long long data[]; /* guarantee ull alignment */
26};
27
28struct devres_group {
29 struct devres_node node[2];
30 void *id;
31 int color;
32 /* -- 8 pointers */
33};
34
35#ifdef CONFIG_DEBUG_DEVRES
36static int log_devres = 0;
37module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
38
39static void set_node_dbginfo(struct devres_node *node, const char *name,
40 size_t size)
41{
42 node->name = name;
43 node->size = size;
44}
45
46static void devres_log(struct device *dev, struct devres_node *node,
47 const char *op)
48{
49 if (unlikely(log_devres))
50 dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
51 op, node, node->name, (unsigned long)node->size);
52}
53#else /* CONFIG_DEBUG_DEVRES */
54#define set_node_dbginfo(node, n, s) do {} while (0)
55#define devres_log(dev, node, op) do {} while (0)
56#endif /* CONFIG_DEBUG_DEVRES */
57
58/*
59 * Release functions for devres group. These callbacks are used only
60 * for identification.
61 */
62static void group_open_release(struct device *dev, void *res)
63{
64 /* noop */
65}
66
67static void group_close_release(struct device *dev, void *res)
68{
69 /* noop */
70}
71
72static struct devres_group * node_to_group(struct devres_node *node)
73{
74 if (node->release == &group_open_release)
75 return container_of(node, struct devres_group, node[0]);
76 if (node->release == &group_close_release)
77 return container_of(node, struct devres_group, node[1]);
78 return NULL;
79}
80
81static __always_inline struct devres * alloc_dr(dr_release_t release,
82 size_t size, gfp_t gfp)
83{
84 size_t tot_size = sizeof(struct devres) + size;
85 struct devres *dr;
86
87 dr = kmalloc_track_caller(tot_size, gfp);
88 if (unlikely(!dr))
89 return NULL;
90
91 memset(dr, 0, tot_size);
92 INIT_LIST_HEAD(&dr->node.entry);
93 dr->node.release = release;
94 return dr;
95}
96
97static void add_dr(struct device *dev, struct devres_node *node)
98{
99 devres_log(dev, node, "ADD");
100 BUG_ON(!list_empty(&node->entry));
101 list_add_tail(&node->entry, &dev->devres_head);
102}
103
104/**
105 * devres_alloc - Allocate device resource data
106 * @release: Release function devres will be associated with
107 * @size: Allocation size
108 * @gfp: Allocation flags
109 *
110 * allocate devres of @size bytes. The allocated area is zeroed, then
111 * associated with @release. The returned pointer can be passed to
112 * other devres_*() functions.
113 *
114 * RETURNS:
115 * Pointer to allocated devres on success, NULL on failure.
116 */
117#ifdef CONFIG_DEBUG_DEVRES
118void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
119 const char *name)
120{
121 struct devres *dr;
122
123 dr = alloc_dr(release, size, gfp);
124 if (unlikely(!dr))
125 return NULL;
126 set_node_dbginfo(&dr->node, name, size);
127 return dr->data;
128}
129EXPORT_SYMBOL_GPL(__devres_alloc);
130#else
131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
132{
133 struct devres *dr;
134
135 dr = alloc_dr(release, size, gfp);
136 if (unlikely(!dr))
137 return NULL;
138 return dr->data;
139}
140EXPORT_SYMBOL_GPL(devres_alloc);
141#endif
142
143/**
144 * devres_free - Free device resource data
145 * @res: Pointer to devres data to free
146 *
147 * Free devres created with devres_alloc().
148 */
149void devres_free(void *res)
150{
151 if (res) {
152 struct devres *dr = container_of(res, struct devres, data);
153
154 BUG_ON(!list_empty(&dr->node.entry));
155 kfree(dr);
156 }
157}
158EXPORT_SYMBOL_GPL(devres_free);
159
160/**
161 * devres_add - Register device resource
162 * @dev: Device to add resource to
163 * @res: Resource to register
164 *
165 * Register devres @res to @dev. @res should have been allocated
166 * using devres_alloc(). On driver detach, the associated release
167 * function will be invoked and devres will be freed automatically.
168 */
169void devres_add(struct device *dev, void *res)
170{
171 struct devres *dr = container_of(res, struct devres, data);
172 unsigned long flags;
173
174 spin_lock_irqsave(&dev->devres_lock, flags);
175 add_dr(dev, &dr->node);
176 spin_unlock_irqrestore(&dev->devres_lock, flags);
177}
178EXPORT_SYMBOL_GPL(devres_add);
179
180static struct devres *find_dr(struct device *dev, dr_release_t release,
181 dr_match_t match, void *match_data)
182{
183 struct devres_node *node;
184
185 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
186 struct devres *dr = container_of(node, struct devres, node);
187
188 if (node->release != release)
189 continue;
190 if (match && !match(dev, dr->data, match_data))
191 continue;
192 return dr;
193 }
194
195 return NULL;
196}
197
198/**
199 * devres_find - Find device resource
200 * @dev: Device to lookup resource from
201 * @release: Look for resources associated with this release function
202 * @match: Match function (optional)
203 * @match_data: Data for the match function
204 *
205 * Find the latest devres of @dev which is associated with @release
206 * and for which @match returns 1. If @match is NULL, it's considered
207 * to match all.
208 *
209 * RETURNS:
210 * Pointer to found devres, NULL if not found.
211 */
212void * devres_find(struct device *dev, dr_release_t release,
213 dr_match_t match, void *match_data)
214{
215 struct devres *dr;
216 unsigned long flags;
217
218 spin_lock_irqsave(&dev->devres_lock, flags);
219 dr = find_dr(dev, release, match, match_data);
220 spin_unlock_irqrestore(&dev->devres_lock, flags);
221
222 if (dr)
223 return dr->data;
224 return NULL;
225}
226EXPORT_SYMBOL_GPL(devres_find);
227
228/**
229 * devres_get - Find devres, if non-existent, add one atomically
230 * @dev: Device to lookup or add devres for
231 * @new_res: Pointer to new initialized devres to add if not found
232 * @match: Match function (optional)
233 * @match_data: Data for the match function
234 *
235 * Find the latest devres of @dev which has the same release function
236 * as @new_res and for which @match return 1. If found, @new_res is
237 * freed; otherwise, @new_res is added atomically.
238 *
239 * RETURNS:
240 * Pointer to found or added devres.
241 */
242void * devres_get(struct device *dev, void *new_res,
243 dr_match_t match, void *match_data)
244{
245 struct devres *new_dr = container_of(new_res, struct devres, data);
246 struct devres *dr;
247 unsigned long flags;
248
249 spin_lock_irqsave(&dev->devres_lock, flags);
250 dr = find_dr(dev, new_dr->node.release, match, match_data);
251 if (!dr) {
252 add_dr(dev, &new_dr->node);
253 dr = new_dr;
254 new_dr = NULL;
255 }
256 spin_unlock_irqrestore(&dev->devres_lock, flags);
257 devres_free(new_dr);
258
259 return dr->data;
260}
261EXPORT_SYMBOL_GPL(devres_get);
262
263/**
264 * devres_remove - Find a device resource and remove it
265 * @dev: Device to find resource from
266 * @release: Look for resources associated with this release function
267 * @match: Match function (optional)
268 * @match_data: Data for the match function
269 *
270 * Find the latest devres of @dev associated with @release and for
271 * which @match returns 1. If @match is NULL, it's considered to
272 * match all. If found, the resource is removed atomically and
273 * returned.
274 *
275 * RETURNS:
276 * Pointer to removed devres on success, NULL if not found.
277 */
278void * devres_remove(struct device *dev, dr_release_t release,
279 dr_match_t match, void *match_data)
280{
281 struct devres *dr;
282 unsigned long flags;
283
284 spin_lock_irqsave(&dev->devres_lock, flags);
285 dr = find_dr(dev, release, match, match_data);
286 if (dr) {
287 list_del_init(&dr->node.entry);
288 devres_log(dev, &dr->node, "REM");
289 }
290 spin_unlock_irqrestore(&dev->devres_lock, flags);
291
292 if (dr)
293 return dr->data;
294 return NULL;
295}
296EXPORT_SYMBOL_GPL(devres_remove);
297
298/**
299 * devres_destroy - Find a device resource and destroy it
300 * @dev: Device to find resource from
301 * @release: Look for resources associated with this release function
302 * @match: Match function (optional)
303 * @match_data: Data for the match function
304 *
305 * Find the latest devres of @dev associated with @release and for
306 * which @match returns 1. If @match is NULL, it's considered to
307 * match all. If found, the resource is removed atomically and freed.
308 *
309 * RETURNS:
310 * 0 if devres is found and freed, -ENOENT if not found.
311 */
312int devres_destroy(struct device *dev, dr_release_t release,
313 dr_match_t match, void *match_data)
314{
315 void *res;
316
317 res = devres_remove(dev, release, match, match_data);
318 if (unlikely(!res))
319 return -ENOENT;
320
321 devres_free(res);
322 return 0;
323}
324EXPORT_SYMBOL_GPL(devres_destroy);
325
326static int remove_nodes(struct device *dev,
327 struct list_head *first, struct list_head *end,
328 struct list_head *todo)
329{
330 int cnt = 0, nr_groups = 0;
331 struct list_head *cur;
332
333 /* First pass - move normal devres entries to @todo and clear
334 * devres_group colors.
335 */
336 cur = first;
337 while (cur != end) {
338 struct devres_node *node;
339 struct devres_group *grp;
340
341 node = list_entry(cur, struct devres_node, entry);
342 cur = cur->next;
343
344 grp = node_to_group(node);
345 if (grp) {
346 /* clear color of group markers in the first pass */
347 grp->color = 0;
348 nr_groups++;
349 } else {
350 /* regular devres entry */
351 if (&node->entry == first)
352 first = first->next;
353 list_move_tail(&node->entry, todo);
354 cnt++;
355 }
356 }
357
358 if (!nr_groups)
359 return cnt;
360
361 /* Second pass - Scan groups and color them. A group gets
362 * color value of two iff the group is wholly contained in
363 * [cur, end). That is, for a closed group, both opening and
364 * closing markers should be in the range, while just the
365 * opening marker is enough for an open group.
366 */
367 cur = first;
368 while (cur != end) {
369 struct devres_node *node;
370 struct devres_group *grp;
371
372 node = list_entry(cur, struct devres_node, entry);
373 cur = cur->next;
374
375 grp = node_to_group(node);
376 BUG_ON(!grp || list_empty(&grp->node[0].entry));
377
378 grp->color++;
379 if (list_empty(&grp->node[1].entry))
380 grp->color++;
381
382 BUG_ON(grp->color <= 0 || grp->color > 2);
383 if (grp->color == 2) {
384 /* No need to update cur or end. The removed
385 * nodes are always before both.
386 */
387 list_move_tail(&grp->node[0].entry, todo);
388 list_del_init(&grp->node[1].entry);
389 }
390 }
391
392 return cnt;
393}
394
395static int release_nodes(struct device *dev, struct list_head *first,
396 struct list_head *end, unsigned long flags)
397{
398 LIST_HEAD(todo);
399 int cnt;
400 struct devres *dr, *tmp;
401
402 cnt = remove_nodes(dev, first, end, &todo);
403
404 spin_unlock_irqrestore(&dev->devres_lock, flags);
405
406 /* Release. Note that both devres and devres_group are
407 * handled as devres in the following loop. This is safe.
408 */
409 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
410 devres_log(dev, &dr->node, "REL");
411 dr->node.release(dev, dr->data);
412 kfree(dr);
413 }
414
415 return cnt;
416}
417
418/**
419 * devres_release_all - Release all resources
420 * @dev: Device to release resources for
421 *
422 * Release all resources associated with @dev. This function is
423 * called on driver detach.
424 */
425int devres_release_all(struct device *dev)
426{
427 unsigned long flags;
428
429 spin_lock_irqsave(&dev->devres_lock, flags);
430 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
431 flags);
432}
433
434/**
435 * devres_open_group - Open a new devres group
436 * @dev: Device to open devres group for
437 * @id: Separator ID
438 * @gfp: Allocation flags
439 *
440 * Open a new devres group for @dev with @id. For @id, using a
441 * pointer to an object which won't be used for another group is
442 * recommended. If @id is NULL, address-wise unique ID is created.
443 *
444 * RETURNS:
445 * ID of the new group, NULL on failure.
446 */
447void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
448{
449 struct devres_group *grp;
450 unsigned long flags;
451
452 grp = kmalloc(sizeof(*grp), gfp);
453 if (unlikely(!grp))
454 return NULL;
455
456 grp->node[0].release = &group_open_release;
457 grp->node[1].release = &group_close_release;
458 INIT_LIST_HEAD(&grp->node[0].entry);
459 INIT_LIST_HEAD(&grp->node[1].entry);
460 set_node_dbginfo(&grp->node[0], "grp<", 0);
461 set_node_dbginfo(&grp->node[1], "grp>", 0);
462 grp->id = grp;
463 if (id)
464 grp->id = id;
465
466 spin_lock_irqsave(&dev->devres_lock, flags);
467 add_dr(dev, &grp->node[0]);
468 spin_unlock_irqrestore(&dev->devres_lock, flags);
469 return grp->id;
470}
471EXPORT_SYMBOL_GPL(devres_open_group);
472
473/* Find devres group with ID @id. If @id is NULL, look for the latest. */
474static struct devres_group * find_group(struct device *dev, void *id)
475{
476 struct devres_node *node;
477
478 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
479 struct devres_group *grp;
480
481 if (node->release != &group_open_release)
482 continue;
483
484 grp = container_of(node, struct devres_group, node[0]);
485
486 if (id) {
487 if (grp->id == id)
488 return grp;
489 } else if (list_empty(&grp->node[1].entry))
490 return grp;
491 }
492
493 return NULL;
494}
495
496/**
497 * devres_close_group - Close a devres group
498 * @dev: Device to close devres group for
499 * @id: ID of target group, can be NULL
500 *
501 * Close the group identified by @id. If @id is NULL, the latest open
502 * group is selected.
503 */
504void devres_close_group(struct device *dev, void *id)
505{
506 struct devres_group *grp;
507 unsigned long flags;
508
509 spin_lock_irqsave(&dev->devres_lock, flags);
510
511 grp = find_group(dev, id);
512 if (grp)
513 add_dr(dev, &grp->node[1]);
514 else
515 WARN_ON(1);
516
517 spin_unlock_irqrestore(&dev->devres_lock, flags);
518}
519EXPORT_SYMBOL_GPL(devres_close_group);
520
521/**
522 * devres_remove_group - Remove a devres group
523 * @dev: Device to remove group for
524 * @id: ID of target group, can be NULL
525 *
526 * Remove the group identified by @id. If @id is NULL, the latest
527 * open group is selected. Note that removing a group doesn't affect
528 * any other resources.
529 */
530void devres_remove_group(struct device *dev, void *id)
531{
532 struct devres_group *grp;
533 unsigned long flags;
534
535 spin_lock_irqsave(&dev->devres_lock, flags);
536
537 grp = find_group(dev, id);
538 if (grp) {
539 list_del_init(&grp->node[0].entry);
540 list_del_init(&grp->node[1].entry);
541 devres_log(dev, &grp->node[0], "REM");
542 } else
543 WARN_ON(1);
544
545 spin_unlock_irqrestore(&dev->devres_lock, flags);
546
547 kfree(grp);
548}
549EXPORT_SYMBOL_GPL(devres_remove_group);
550
551/**
552 * devres_release_group - Release resources in a devres group
553 * @dev: Device to release group for
554 * @id: ID of target group, can be NULL
555 *
556 * Release all resources in the group identified by @id. If @id is
557 * NULL, the latest open group is selected. The selected group and
558 * groups properly nested inside the selected group are removed.
559 *
560 * RETURNS:
561 * The number of released non-group resources.
562 */
563int devres_release_group(struct device *dev, void *id)
564{
565 struct devres_group *grp;
566 unsigned long flags;
567 int cnt = 0;
568
569 spin_lock_irqsave(&dev->devres_lock, flags);
570
571 grp = find_group(dev, id);
572 if (grp) {
573 struct list_head *first = &grp->node[0].entry;
574 struct list_head *end = &dev->devres_head;
575
576 if (!list_empty(&grp->node[1].entry))
577 end = grp->node[1].entry.next;
578
579 cnt = release_nodes(dev, first, end, flags);
580 } else {
581 WARN_ON(1);
582 spin_unlock_irqrestore(&dev->devres_lock, flags);
583 }
584
585 return cnt;
586}
587EXPORT_SYMBOL_GPL(devres_release_group);
588
589/*
590 * Managed kzalloc/kfree
591 */
592static void devm_kzalloc_release(struct device *dev, void *res)
593{
594 /* noop */
595}
596
597static int devm_kzalloc_match(struct device *dev, void *res, void *data)
598{
599 return res == data;
600}
601
602/**
603 * devm_kzalloc - Managed kzalloc
604 * @dev: Device to allocate memory for
605 * @size: Allocation size
606 * @gfp: Allocation gfp flags
607 *
608 * Managed kzalloc. Memory allocated with this function is
609 * automatically freed on driver detach. Like all other devres
610 * resources, guaranteed alignment is unsigned long long.
611 *
612 * RETURNS:
613 * Pointer to allocated memory on success, NULL on failure.
614 */
615void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
616{
617 struct devres *dr;
618
619 /* use raw alloc_dr for kmalloc caller tracing */
620 dr = alloc_dr(devm_kzalloc_release, size, gfp);
621 if (unlikely(!dr))
622 return NULL;
623
624 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
625 devres_add(dev, dr->data);
626 return dr->data;
627}
628EXPORT_SYMBOL_GPL(devm_kzalloc);
629
630/**
631 * devm_kfree - Managed kfree
632 * @dev: Device this memory belongs to
633 * @p: Memory to free
634 *
635 * Free memory allocated with dev_kzalloc().
636 */
637void devm_kfree(struct device *dev, void *p)
638{
639 int rc;
640
641 rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
642 WARN_ON(rc);
643}
644EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
new file mode 100644
index 000000000000..ca9186f70a69
--- /dev/null
+++ b/drivers/base/dma-mapping.c
@@ -0,0 +1,218 @@
1/*
2 * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/dma-mapping.h>
11
12/*
13 * Managed DMA API
14 */
15struct dma_devres {
16 size_t size;
17 void *vaddr;
18 dma_addr_t dma_handle;
19};
20
21static void dmam_coherent_release(struct device *dev, void *res)
22{
23 struct dma_devres *this = res;
24
25 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
26}
27
28static void dmam_noncoherent_release(struct device *dev, void *res)
29{
30 struct dma_devres *this = res;
31
32 dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
33}
34
35static int dmam_match(struct device *dev, void *res, void *match_data)
36{
37 struct dma_devres *this = res, *match = match_data;
38
39 if (this->vaddr == match->vaddr) {
40 WARN_ON(this->size != match->size ||
41 this->dma_handle != match->dma_handle);
42 return 1;
43 }
44 return 0;
45}
46
47/**
48 * dmam_alloc_coherent - Managed dma_alloc_coherent()
49 * @dev: Device to allocate coherent memory for
50 * @size: Size of allocation
51 * @dma_handle: Out argument for allocated DMA handle
52 * @gfp: Allocation flags
53 *
54 * Managed dma_alloc_coherent(). Memory allocated using this function
55 * will be automatically released on driver detach.
56 *
57 * RETURNS:
58 * Pointer to allocated memory on success, NULL on failure.
59 */
60void * dmam_alloc_coherent(struct device *dev, size_t size,
61 dma_addr_t *dma_handle, gfp_t gfp)
62{
63 struct dma_devres *dr;
64 void *vaddr;
65
66 dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
67 if (!dr)
68 return NULL;
69
70 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
71 if (!vaddr) {
72 devres_free(dr);
73 return NULL;
74 }
75
76 dr->vaddr = vaddr;
77 dr->dma_handle = *dma_handle;
78 dr->size = size;
79
80 devres_add(dev, dr);
81
82 return vaddr;
83}
84EXPORT_SYMBOL(dmam_alloc_coherent);
85
86/**
87 * dmam_free_coherent - Managed dma_free_coherent()
88 * @dev: Device to free coherent memory for
89 * @size: Size of allocation
90 * @vaddr: Virtual address of the memory to free
91 * @dma_handle: DMA handle of the memory to free
92 *
93 * Managed dma_free_coherent().
94 */
95void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
96 dma_addr_t dma_handle)
97{
98 struct dma_devres match_data = { size, vaddr, dma_handle };
99
100 dma_free_coherent(dev, size, vaddr, dma_handle);
101 WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
102 &match_data));
103}
104EXPORT_SYMBOL(dmam_free_coherent);
105
106/**
107 * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
108 * @dev: Device to allocate non_coherent memory for
109 * @size: Size of allocation
110 * @dma_handle: Out argument for allocated DMA handle
111 * @gfp: Allocation flags
112 *
113 * Managed dma_alloc_non_coherent(). Memory allocated using this
114 * function will be automatically released on driver detach.
115 *
116 * RETURNS:
117 * Pointer to allocated memory on success, NULL on failure.
118 */
119void *dmam_alloc_noncoherent(struct device *dev, size_t size,
120 dma_addr_t *dma_handle, gfp_t gfp)
121{
122 struct dma_devres *dr;
123 void *vaddr;
124
125 dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
126 if (!dr)
127 return NULL;
128
129 vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
130 if (!vaddr) {
131 devres_free(dr);
132 return NULL;
133 }
134
135 dr->vaddr = vaddr;
136 dr->dma_handle = *dma_handle;
137 dr->size = size;
138
139 devres_add(dev, dr);
140
141 return vaddr;
142}
143EXPORT_SYMBOL(dmam_alloc_noncoherent);
144
145/**
146 * dmam_free_coherent - Managed dma_free_noncoherent()
147 * @dev: Device to free noncoherent memory for
148 * @size: Size of allocation
149 * @vaddr: Virtual address of the memory to free
150 * @dma_handle: DMA handle of the memory to free
151 *
152 * Managed dma_free_noncoherent().
153 */
154void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
155 dma_addr_t dma_handle)
156{
157 struct dma_devres match_data = { size, vaddr, dma_handle };
158
159 dma_free_noncoherent(dev, size, vaddr, dma_handle);
160 WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
161 &match_data));
162}
163EXPORT_SYMBOL(dmam_free_noncoherent);
164
165#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
166
167static void dmam_coherent_decl_release(struct device *dev, void *res)
168{
169 dma_release_declared_memory(dev);
170}
171
172/**
173 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
174 * @dev: Device to declare coherent memory for
175 * @bus_addr: Bus address of coherent memory to be declared
176 * @device_addr: Device address of coherent memory to be declared
177 * @size: Size of coherent memory to be declared
178 * @flags: Flags
179 *
180 * Managed dma_declare_coherent_memory().
181 *
182 * RETURNS:
183 * 0 on success, -errno on failure.
184 */
185int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
186 dma_addr_t device_addr, size_t size, int flags)
187{
188 void *res;
189 int rc;
190
191 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
192 if (!res)
193 return -ENOMEM;
194
195 rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
196 flags);
197 if (rc == 0)
198 devres_add(dev, res);
199 else
200 devres_free(res);
201
202 return rc;
203}
204EXPORT_SYMBOL(dmam_declare_coherent_memory);
205
206/**
207 * dmam_release_declared_memory - Managed dma_release_declared_memory().
208 * @dev: Device to release declared coherent memory for
209 *
210 * Managed dmam_release_declared_memory().
211 */
212void dmam_release_declared_memory(struct device *dev)
213{
214 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
215}
216EXPORT_SYMBOL(dmam_release_declared_memory);
217
218#endif
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index f95d50277274..cd467c9f33b3 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -415,8 +415,67 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
415 spin_unlock_irqrestore (&pool->lock, flags); 415 spin_unlock_irqrestore (&pool->lock, flags);
416} 416}
417 417
418/*
419 * Managed DMA pool
420 */
421static void dmam_pool_release(struct device *dev, void *res)
422{
423 struct dma_pool *pool = *(struct dma_pool **)res;
424
425 dma_pool_destroy(pool);
426}
427
428static int dmam_pool_match(struct device *dev, void *res, void *match_data)
429{
430 return *(struct dma_pool **)res == match_data;
431}
432
433/**
434 * dmam_pool_create - Managed dma_pool_create()
435 * @name: name of pool, for diagnostics
436 * @dev: device that will be doing the DMA
437 * @size: size of the blocks in this pool.
438 * @align: alignment requirement for blocks; must be a power of two
439 * @allocation: returned blocks won't cross this boundary (or zero)
440 *
441 * Managed dma_pool_create(). DMA pool created with this function is
442 * automatically destroyed on driver detach.
443 */
444struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
445 size_t size, size_t align, size_t allocation)
446{
447 struct dma_pool **ptr, *pool;
448
449 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
450 if (!ptr)
451 return NULL;
452
453 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
454 if (pool)
455 devres_add(dev, ptr);
456 else
457 devres_free(ptr);
458
459 return pool;
460}
461
462/**
463 * dmam_pool_destroy - Managed dma_pool_destroy()
464 * @pool: dma pool that will be destroyed
465 *
466 * Managed dma_pool_destroy().
467 */
468void dmam_pool_destroy(struct dma_pool *pool)
469{
470 struct device *dev = pool->dev;
471
472 dma_pool_destroy(pool);
473 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
474}
418 475
419EXPORT_SYMBOL (dma_pool_create); 476EXPORT_SYMBOL (dma_pool_create);
420EXPORT_SYMBOL (dma_pool_destroy); 477EXPORT_SYMBOL (dma_pool_destroy);
421EXPORT_SYMBOL (dma_pool_alloc); 478EXPORT_SYMBOL (dma_pool_alloc);
422EXPORT_SYMBOL (dma_pool_free); 479EXPORT_SYMBOL (dma_pool_free);
480EXPORT_SYMBOL (dmam_pool_create);
481EXPORT_SYMBOL (dmam_pool_destroy);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 64558f45e6bc..c0a979a5074b 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -35,7 +35,7 @@ enum {
35 FW_STATUS_READY_NOHOTPLUG, 35 FW_STATUS_READY_NOHOTPLUG,
36}; 36};
37 37
38static int loading_timeout = 10; /* In seconds */ 38static int loading_timeout = 60; /* In seconds */
39 39
40/* fw_lock could be moved to 'struct firmware_priv' but since it is just 40/* fw_lock could be moved to 'struct firmware_priv' but since it is just
41 * guarding for corner cases a global lock should be OK */ 41 * guarding for corner cases a global lock should be OK */
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f9c903ba9fcd..30480f6f2af2 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -611,8 +611,15 @@ EXPORT_SYMBOL_GPL(platform_bus_type);
611 611
612int __init platform_bus_init(void) 612int __init platform_bus_init(void)
613{ 613{
614 device_register(&platform_bus); 614 int error;
615 return bus_register(&platform_bus_type); 615
616 error = device_register(&platform_bus);
617 if (error)
618 return error;
619 error = bus_register(&platform_bus_type);
620 if (error)
621 device_unregister(&platform_bus);
622 return error;
616} 623}
617 624
618#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK 625#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 9e43e39dc35c..d08bb4ee1307 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -610,6 +610,13 @@ config HVC_RTAS
610 help 610 help
611 IBM Console device driver which makes use of RTAS 611 IBM Console device driver which makes use of RTAS
612 612
613config HVC_BEAT
614 bool "Toshiba's Beat Hypervisor Console support"
615 depends on PPC_CELLEB
616 select HVC_DRIVER
617 help
618 Toshiba's Cell Reference Set Beat Console device driver
619
613config HVCS 620config HVCS
614 tristate "IBM Hypervisor Virtual Console Server support" 621 tristate "IBM Hypervisor Virtual Console Server support"
615 depends on PPC_PSERIES 622 depends on PPC_PSERIES
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fc110637ced6..ae8567cc529c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_RIO) += rio/ generic_serial.o
45obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o 45obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
46obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o 46obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
47obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o 47obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
48obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
48obj-$(CONFIG_HVC_DRIVER) += hvc_console.o 49obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
49obj-$(CONFIG_RAW_DRIVER) += raw.o 50obj-$(CONFIG_RAW_DRIVER) += raw.o
50obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o 51obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
@@ -59,6 +60,8 @@ obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o
59obj-$(CONFIG_PRINTER) += lp.o 60obj-$(CONFIG_PRINTER) += lp.o
60obj-$(CONFIG_TIPAR) += tipar.o 61obj-$(CONFIG_TIPAR) += tipar.o
61 62
63obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
64
62obj-$(CONFIG_DTLK) += dtlk.o 65obj-$(CONFIG_DTLK) += dtlk.o
63obj-$(CONFIG_R3964) += n_r3964.o 66obj-$(CONFIG_R3964) += n_r3964.o
64obj-$(CONFIG_APPLICOM) += applicom.o 67obj-$(CONFIG_APPLICOM) += applicom.o
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
new file mode 100644
index 000000000000..179c7a3b6e75
--- /dev/null
+++ b/drivers/char/apm-emulation.c
@@ -0,0 +1,672 @@
1/*
2 * bios-less APM driver for ARM Linux
3 * Jamey Hicks <jamey@crl.dec.com>
4 * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
5 *
6 * APM 1.2 Reference:
7 * Intel Corporation, Microsoft Corporation. Advanced Power Management
8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
9 *
10 * [This document is available from Microsoft at:
11 * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
12 */
13#include <linux/module.h>
14#include <linux/poll.h>
15#include <linux/slab.h>
16#include <linux/proc_fs.h>
17#include <linux/miscdevice.h>
18#include <linux/apm_bios.h>
19#include <linux/capability.h>
20#include <linux/sched.h>
21#include <linux/pm.h>
22#include <linux/apm-emulation.h>
23#include <linux/device.h>
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/init.h>
27#include <linux/completion.h>
28#include <linux/kthread.h>
29#include <linux/delay.h>
30
31#include <asm/system.h>
32
33/*
34 * The apm_bios device is one of the misc char devices.
35 * This is its minor number.
36 */
37#define APM_MINOR_DEV 134
38
39/*
40 * See Documentation/Config.help for the configuration options.
41 *
42 * Various options can be changed at boot time as follows:
43 * (We allow underscores for compatibility with the modules code)
44 * apm=on/off enable/disable APM
45 */
46
47/*
48 * Maximum number of events stored
49 */
50#define APM_MAX_EVENTS 16
51
52struct apm_queue {
53 unsigned int event_head;
54 unsigned int event_tail;
55 apm_event_t events[APM_MAX_EVENTS];
56};
57
58/*
59 * The per-file APM data
60 */
61struct apm_user {
62 struct list_head list;
63
64 unsigned int suser: 1;
65 unsigned int writer: 1;
66 unsigned int reader: 1;
67
68 int suspend_result;
69 unsigned int suspend_state;
70#define SUSPEND_NONE 0 /* no suspend pending */
71#define SUSPEND_PENDING 1 /* suspend pending read */
72#define SUSPEND_READ 2 /* suspend read, pending ack */
73#define SUSPEND_ACKED 3 /* suspend acked */
74#define SUSPEND_WAIT 4 /* waiting for suspend */
75#define SUSPEND_DONE 5 /* suspend completed */
76
77 struct apm_queue queue;
78};
79
80/*
81 * Local variables
82 */
83static int suspends_pending;
84static int apm_disabled;
85static struct task_struct *kapmd_tsk;
86
87static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
88static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
89
90/*
91 * This is a list of everyone who has opened /dev/apm_bios
92 */
93static DECLARE_RWSEM(user_list_lock);
94static LIST_HEAD(apm_user_list);
95
96/*
97 * kapmd info. kapmd provides us a process context to handle
98 * "APM" events within - specifically necessary if we're going
99 * to be suspending the system.
100 */
101static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
102static DEFINE_SPINLOCK(kapmd_queue_lock);
103static struct apm_queue kapmd_queue;
104
105static DEFINE_MUTEX(state_lock);
106
107static const char driver_version[] = "1.13"; /* no spaces */
108
109
110
111/*
112 * Compatibility cruft until the IPAQ people move over to the new
113 * interface.
114 */
115static void __apm_get_power_status(struct apm_power_info *info)
116{
117}
118
119/*
120 * This allows machines to provide their own "apm get power status" function.
121 */
122void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
123EXPORT_SYMBOL(apm_get_power_status);
124
125
126/*
127 * APM event queue management.
128 */
129static inline int queue_empty(struct apm_queue *q)
130{
131 return q->event_head == q->event_tail;
132}
133
134static inline apm_event_t queue_get_event(struct apm_queue *q)
135{
136 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
137 return q->events[q->event_tail];
138}
139
140static void queue_add_event(struct apm_queue *q, apm_event_t event)
141{
142 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
143 if (q->event_head == q->event_tail) {
144 static int notified;
145
146 if (notified++ == 0)
147 printk(KERN_ERR "apm: an event queue overflowed\n");
148 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
149 }
150 q->events[q->event_head] = event;
151}
152
153static void queue_event(apm_event_t event)
154{
155 struct apm_user *as;
156
157 down_read(&user_list_lock);
158 list_for_each_entry(as, &apm_user_list, list) {
159 if (as->reader)
160 queue_add_event(&as->queue, event);
161 }
162 up_read(&user_list_lock);
163 wake_up_interruptible(&apm_waitqueue);
164}
165
166/*
167 * queue_suspend_event - queue an APM suspend event.
168 *
169 * Check that we're in a state where we can suspend. If not,
170 * return -EBUSY. Otherwise, queue an event to all "writer"
171 * users. If there are no "writer" users, return '1' to
172 * indicate that we can immediately suspend.
173 */
174static int queue_suspend_event(apm_event_t event, struct apm_user *sender)
175{
176 struct apm_user *as;
177 int ret = 1;
178
179 mutex_lock(&state_lock);
180 down_read(&user_list_lock);
181
182 /*
183 * If a thread is still processing, we can't suspend, so reject
184 * the request.
185 */
186 list_for_each_entry(as, &apm_user_list, list) {
187 if (as != sender && as->reader && as->writer && as->suser &&
188 as->suspend_state != SUSPEND_NONE) {
189 ret = -EBUSY;
190 goto out;
191 }
192 }
193
194 list_for_each_entry(as, &apm_user_list, list) {
195 if (as != sender && as->reader && as->writer && as->suser) {
196 as->suspend_state = SUSPEND_PENDING;
197 suspends_pending++;
198 queue_add_event(&as->queue, event);
199 ret = 0;
200 }
201 }
202 out:
203 up_read(&user_list_lock);
204 mutex_unlock(&state_lock);
205 wake_up_interruptible(&apm_waitqueue);
206 return ret;
207}
208
209static void apm_suspend(void)
210{
211 struct apm_user *as;
212 int err = pm_suspend(PM_SUSPEND_MEM);
213
214 /*
215 * Anyone on the APM queues will think we're still suspended.
216 * Send a message so everyone knows we're now awake again.
217 */
218 queue_event(APM_NORMAL_RESUME);
219
220 /*
221 * Finally, wake up anyone who is sleeping on the suspend.
222 */
223 mutex_lock(&state_lock);
224 down_read(&user_list_lock);
225 list_for_each_entry(as, &apm_user_list, list) {
226 if (as->suspend_state == SUSPEND_WAIT ||
227 as->suspend_state == SUSPEND_ACKED) {
228 as->suspend_result = err;
229 as->suspend_state = SUSPEND_DONE;
230 }
231 }
232 up_read(&user_list_lock);
233 mutex_unlock(&state_lock);
234
235 wake_up(&apm_suspend_waitqueue);
236}
237
238static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
239{
240 struct apm_user *as = fp->private_data;
241 apm_event_t event;
242 int i = count, ret = 0;
243
244 if (count < sizeof(apm_event_t))
245 return -EINVAL;
246
247 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
248 return -EAGAIN;
249
250 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
251
252 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
253 event = queue_get_event(&as->queue);
254
255 ret = -EFAULT;
256 if (copy_to_user(buf, &event, sizeof(event)))
257 break;
258
259 mutex_lock(&state_lock);
260 if (as->suspend_state == SUSPEND_PENDING &&
261 (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
262 as->suspend_state = SUSPEND_READ;
263 mutex_unlock(&state_lock);
264
265 buf += sizeof(event);
266 i -= sizeof(event);
267 }
268
269 if (i < count)
270 ret = count - i;
271
272 return ret;
273}
274
275static unsigned int apm_poll(struct file *fp, poll_table * wait)
276{
277 struct apm_user *as = fp->private_data;
278
279 poll_wait(fp, &apm_waitqueue, wait);
280 return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
281}
282
283/*
284 * apm_ioctl - handle APM ioctl
285 *
286 * APM_IOC_SUSPEND
287 * This IOCTL is overloaded, and performs two functions. It is used to:
288 * - initiate a suspend
289 * - acknowledge a suspend read from /dev/apm_bios.
290 * Only when everyone who has opened /dev/apm_bios with write permission
291 * has acknowledge does the actual suspend happen.
292 */
293static int
294apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
295{
296 struct apm_user *as = filp->private_data;
297 unsigned long flags;
298 int err = -EINVAL;
299
300 if (!as->suser || !as->writer)
301 return -EPERM;
302
303 switch (cmd) {
304 case APM_IOC_SUSPEND:
305 mutex_lock(&state_lock);
306
307 as->suspend_result = -EINTR;
308
309 if (as->suspend_state == SUSPEND_READ) {
310 int pending;
311
312 /*
313 * If we read a suspend command from /dev/apm_bios,
314 * then the corresponding APM_IOC_SUSPEND ioctl is
315 * interpreted as an acknowledge.
316 */
317 as->suspend_state = SUSPEND_ACKED;
318 suspends_pending--;
319 pending = suspends_pending == 0;
320 mutex_unlock(&state_lock);
321
322 /*
323 * If there are no further acknowledges required,
324 * suspend the system.
325 */
326 if (pending)
327 apm_suspend();
328
329 /*
330 * Wait for the suspend/resume to complete. If there
331 * are pending acknowledges, we wait here for them.
332 *
333 * Note: we need to ensure that the PM subsystem does
334 * not kick us out of the wait when it suspends the
335 * threads.
336 */
337 flags = current->flags;
338 current->flags |= PF_NOFREEZE;
339
340 wait_event(apm_suspend_waitqueue,
341 as->suspend_state == SUSPEND_DONE);
342 } else {
343 as->suspend_state = SUSPEND_WAIT;
344 mutex_unlock(&state_lock);
345
346 /*
347 * Otherwise it is a request to suspend the system.
348 * Queue an event for all readers, and expect an
349 * acknowledge from all writers who haven't already
350 * acknowledged.
351 */
352 err = queue_suspend_event(APM_USER_SUSPEND, as);
353 if (err < 0) {
354 /*
355 * Avoid taking the lock here - this
356 * should be fine.
357 */
358 as->suspend_state = SUSPEND_NONE;
359 break;
360 }
361
362 if (err > 0)
363 apm_suspend();
364
365 /*
366 * Wait for the suspend/resume to complete. If there
367 * are pending acknowledges, we wait here for them.
368 *
369 * Note: we need to ensure that the PM subsystem does
370 * not kick us out of the wait when it suspends the
371 * threads.
372 */
373 flags = current->flags;
374 current->flags |= PF_NOFREEZE;
375
376 wait_event_interruptible(apm_suspend_waitqueue,
377 as->suspend_state == SUSPEND_DONE);
378 }
379
380 current->flags = flags;
381
382 mutex_lock(&state_lock);
383 err = as->suspend_result;
384 as->suspend_state = SUSPEND_NONE;
385 mutex_unlock(&state_lock);
386 break;
387 }
388
389 return err;
390}
391
392static int apm_release(struct inode * inode, struct file * filp)
393{
394 struct apm_user *as = filp->private_data;
395 int pending = 0;
396
397 filp->private_data = NULL;
398
399 down_write(&user_list_lock);
400 list_del(&as->list);
401 up_write(&user_list_lock);
402
403 /*
404 * We are now unhooked from the chain. As far as new
405 * events are concerned, we no longer exist. However, we
406 * need to balance suspends_pending, which means the
407 * possibility of sleeping.
408 */
409 mutex_lock(&state_lock);
410 if (as->suspend_state != SUSPEND_NONE) {
411 suspends_pending -= 1;
412 pending = suspends_pending == 0;
413 }
414 mutex_unlock(&state_lock);
415 if (pending)
416 apm_suspend();
417
418 kfree(as);
419 return 0;
420}
421
422static int apm_open(struct inode * inode, struct file * filp)
423{
424 struct apm_user *as;
425
426 as = kzalloc(sizeof(*as), GFP_KERNEL);
427 if (as) {
428 /*
429 * XXX - this is a tiny bit broken, when we consider BSD
430 * process accounting. If the device is opened by root, we
431 * instantly flag that we used superuser privs. Who knows,
432 * we might close the device immediately without doing a
433 * privileged operation -- cevans
434 */
435 as->suser = capable(CAP_SYS_ADMIN);
436 as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
437 as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
438
439 down_write(&user_list_lock);
440 list_add(&as->list, &apm_user_list);
441 up_write(&user_list_lock);
442
443 filp->private_data = as;
444 }
445
446 return as ? 0 : -ENOMEM;
447}
448
449static struct file_operations apm_bios_fops = {
450 .owner = THIS_MODULE,
451 .read = apm_read,
452 .poll = apm_poll,
453 .ioctl = apm_ioctl,
454 .open = apm_open,
455 .release = apm_release,
456};
457
458static struct miscdevice apm_device = {
459 .minor = APM_MINOR_DEV,
460 .name = "apm_bios",
461 .fops = &apm_bios_fops
462};
463
464
465#ifdef CONFIG_PROC_FS
466/*
467 * Arguments, with symbols from linux/apm_bios.h.
468 *
469 * 0) Linux driver version (this will change if format changes)
470 * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
471 * 2) APM flags from APM Installation Check (0x00):
472 * bit 0: APM_16_BIT_SUPPORT
473 * bit 1: APM_32_BIT_SUPPORT
474 * bit 2: APM_IDLE_SLOWS_CLOCK
475 * bit 3: APM_BIOS_DISABLED
476 * bit 4: APM_BIOS_DISENGAGED
477 * 3) AC line status
478 * 0x00: Off-line
479 * 0x01: On-line
480 * 0x02: On backup power (BIOS >= 1.1 only)
481 * 0xff: Unknown
482 * 4) Battery status
483 * 0x00: High
484 * 0x01: Low
485 * 0x02: Critical
486 * 0x03: Charging
487 * 0x04: Selected battery not present (BIOS >= 1.2 only)
488 * 0xff: Unknown
489 * 5) Battery flag
490 * bit 0: High
491 * bit 1: Low
492 * bit 2: Critical
493 * bit 3: Charging
494 * bit 7: No system battery
495 * 0xff: Unknown
496 * 6) Remaining battery life (percentage of charge):
497 * 0-100: valid
498 * -1: Unknown
499 * 7) Remaining battery life (time units):
500 * Number of remaining minutes or seconds
501 * -1: Unknown
502 * 8) min = minutes; sec = seconds
503 */
504static int apm_get_info(char *buf, char **start, off_t fpos, int length)
505{
506 struct apm_power_info info;
507 char *units;
508 int ret;
509
510 info.ac_line_status = 0xff;
511 info.battery_status = 0xff;
512 info.battery_flag = 0xff;
513 info.battery_life = -1;
514 info.time = -1;
515 info.units = -1;
516
517 if (apm_get_power_status)
518 apm_get_power_status(&info);
519
520 switch (info.units) {
521 default: units = "?"; break;
522 case 0: units = "min"; break;
523 case 1: units = "sec"; break;
524 }
525
526 ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
527 driver_version, APM_32_BIT_SUPPORT,
528 info.ac_line_status, info.battery_status,
529 info.battery_flag, info.battery_life,
530 info.time, units);
531
532 return ret;
533}
534#endif
535
536static int kapmd(void *arg)
537{
538 do {
539 apm_event_t event;
540 int ret;
541
542 wait_event_interruptible(kapmd_wait,
543 !queue_empty(&kapmd_queue) || kthread_should_stop());
544
545 if (kthread_should_stop())
546 break;
547
548 spin_lock_irq(&kapmd_queue_lock);
549 event = 0;
550 if (!queue_empty(&kapmd_queue))
551 event = queue_get_event(&kapmd_queue);
552 spin_unlock_irq(&kapmd_queue_lock);
553
554 switch (event) {
555 case 0:
556 break;
557
558 case APM_LOW_BATTERY:
559 case APM_POWER_STATUS_CHANGE:
560 queue_event(event);
561 break;
562
563 case APM_USER_SUSPEND:
564 case APM_SYS_SUSPEND:
565 ret = queue_suspend_event(event, NULL);
566 if (ret < 0) {
567 /*
568 * We were busy. Try again in 50ms.
569 */
570 queue_add_event(&kapmd_queue, event);
571 msleep(50);
572 }
573 if (ret > 0)
574 apm_suspend();
575 break;
576
577 case APM_CRITICAL_SUSPEND:
578 apm_suspend();
579 break;
580 }
581 } while (1);
582
583 return 0;
584}
585
586static int __init apm_init(void)
587{
588 int ret;
589
590 if (apm_disabled) {
591 printk(KERN_NOTICE "apm: disabled on user request.\n");
592 return -ENODEV;
593 }
594
595 kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
596 if (IS_ERR(kapmd_tsk)) {
597 ret = PTR_ERR(kapmd_tsk);
598 kapmd_tsk = NULL;
599 return ret;
600 }
601 kapmd_tsk->flags |= PF_NOFREEZE;
602 wake_up_process(kapmd_tsk);
603
604#ifdef CONFIG_PROC_FS
605 create_proc_info_entry("apm", 0, NULL, apm_get_info);
606#endif
607
608 ret = misc_register(&apm_device);
609 if (ret != 0) {
610 remove_proc_entry("apm", NULL);
611 kthread_stop(kapmd_tsk);
612 }
613
614 return ret;
615}
616
617static void __exit apm_exit(void)
618{
619 misc_deregister(&apm_device);
620 remove_proc_entry("apm", NULL);
621
622 kthread_stop(kapmd_tsk);
623}
624
625module_init(apm_init);
626module_exit(apm_exit);
627
628MODULE_AUTHOR("Stephen Rothwell");
629MODULE_DESCRIPTION("Advanced Power Management");
630MODULE_LICENSE("GPL");
631
632#ifndef MODULE
633static int __init apm_setup(char *str)
634{
635 while ((str != NULL) && (*str != '\0')) {
636 if (strncmp(str, "off", 3) == 0)
637 apm_disabled = 1;
638 if (strncmp(str, "on", 2) == 0)
639 apm_disabled = 0;
640 str = strchr(str, ',');
641 if (str != NULL)
642 str += strspn(str, ", \t");
643 }
644 return 1;
645}
646
647__setup("apm=", apm_setup);
648#endif
649
650/**
651 * apm_queue_event - queue an APM event for kapmd
652 * @event: APM event
653 *
654 * Queue an APM event for kapmd to process and ultimately take the
655 * appropriate action. Only a subset of events are handled:
656 * %APM_LOW_BATTERY
657 * %APM_POWER_STATUS_CHANGE
658 * %APM_USER_SUSPEND
659 * %APM_SYS_SUSPEND
660 * %APM_CRITICAL_SUSPEND
661 */
662void apm_queue_event(apm_event_t event)
663{
664 unsigned long flags;
665
666 spin_lock_irqsave(&kapmd_queue_lock, flags);
667 queue_add_event(&kapmd_queue, event);
668 spin_unlock_irqrestore(&kapmd_queue_lock, flags);
669
670 wake_up_interruptible(&kapmd_wait);
671}
672EXPORT_SYMBOL(apm_queue_event);
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 6dcdceb81203..85d99e21e188 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -532,11 +532,13 @@ typedef struct drm_mm_node {
532 int free; 532 int free;
533 unsigned long start; 533 unsigned long start;
534 unsigned long size; 534 unsigned long size;
535 struct drm_mm *mm;
535 void *private; 536 void *private;
536} drm_mm_node_t; 537} drm_mm_node_t;
537 538
538typedef struct drm_mm { 539typedef struct drm_mm {
539 drm_mm_node_t root_node; 540 struct list_head fl_entry;
541 struct list_head ml_entry;
540} drm_mm_t; 542} drm_mm_t;
541 543
542/** 544/**
@@ -843,9 +845,6 @@ extern void drm_mem_init(void);
843extern int drm_mem_info(char *buf, char **start, off_t offset, 845extern int drm_mem_info(char *buf, char **start, off_t offset,
844 int request, int *eof, void *data); 846 int request, int *eof, void *data);
845extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); 847extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
846extern void *drm_ioremap(unsigned long offset, unsigned long size,
847 drm_device_t * dev);
848extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev);
849 848
850extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type); 849extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
851extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 850extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
@@ -1053,33 +1052,18 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
1053extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, 1052extern drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
1054 unsigned long size, 1053 unsigned long size,
1055 unsigned alignment); 1054 unsigned alignment);
1056extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur); 1055void drm_mm_put_block(drm_mm_node_t * cur);
1057extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, 1056extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
1058 unsigned alignment, int best_match); 1057 unsigned alignment, int best_match);
1059extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); 1058extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
1060extern void drm_mm_takedown(drm_mm_t *mm); 1059extern void drm_mm_takedown(drm_mm_t *mm);
1060extern int drm_mm_clean(drm_mm_t *mm);
1061extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
1062extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
1063extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
1061 1064
1062/* Inline replacements for DRM_IOREMAP macros */ 1065extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
1063static __inline__ void drm_core_ioremap(struct drm_map *map, 1066extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
1064 struct drm_device *dev)
1065{
1066 map->handle = drm_ioremap(map->offset, map->size, dev);
1067}
1068
1069#if 0
1070static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
1071 struct drm_device *dev)
1072{
1073 map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
1074}
1075#endif /* 0 */
1076
1077static __inline__ void drm_core_ioremapfree(struct drm_map *map,
1078 struct drm_device *dev)
1079{
1080 if (map->handle && map->size)
1081 drm_ioremapfree(map->handle, map->size, dev);
1082}
1083 1067
1084static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, 1068static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
1085 unsigned int token) 1069 unsigned int token)
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 9f65f5697ba8..a6828cc14e58 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -79,14 +79,14 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
79 79
80 if (!use_hashed_handle) { 80 if (!use_hashed_handle) {
81 int ret; 81 int ret;
82 hash->key = user_token; 82 hash->key = user_token >> PAGE_SHIFT;
83 ret = drm_ht_insert_item(&dev->map_hash, hash); 83 ret = drm_ht_insert_item(&dev->map_hash, hash);
84 if (ret != -EINVAL) 84 if (ret != -EINVAL)
85 return ret; 85 return ret;
86 } 86 }
87 return drm_ht_just_insert_please(&dev->map_hash, hash, 87 return drm_ht_just_insert_please(&dev->map_hash, hash,
88 user_token, 32 - PAGE_SHIFT - 3, 88 user_token, 32 - PAGE_SHIFT - 3,
89 PAGE_SHIFT, DRM_MAP_HASH_OFFSET); 89 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
90} 90}
91 91
92/** 92/**
@@ -178,11 +178,11 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
178 } 178 }
179 } 179 }
180 if (map->type == _DRM_REGISTERS) 180 if (map->type == _DRM_REGISTERS)
181 map->handle = drm_ioremap(map->offset, map->size, dev); 181 map->handle = ioremap(map->offset, map->size);
182 break; 182 break;
183 183
184 case _DRM_SHM: 184 case _DRM_SHM:
185 map->handle = vmalloc_32(map->size); 185 map->handle = vmalloc_user(map->size);
186 DRM_DEBUG("%lu %d %p\n", 186 DRM_DEBUG("%lu %d %p\n",
187 map->size, drm_order(map->size), map->handle); 187 map->size, drm_order(map->size), map->handle);
188 if (!map->handle) { 188 if (!map->handle) {
@@ -238,7 +238,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
238 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); 238 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
239 if (!list) { 239 if (!list) {
240 if (map->type == _DRM_REGISTERS) 240 if (map->type == _DRM_REGISTERS)
241 drm_ioremapfree(map->handle, map->size, dev); 241 iounmap(map->handle);
242 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 242 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
243 return -EINVAL; 243 return -EINVAL;
244 } 244 }
@@ -255,14 +255,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
255 ret = drm_map_handle(dev, &list->hash, user_token, 0); 255 ret = drm_map_handle(dev, &list->hash, user_token, 0);
256 if (ret) { 256 if (ret) {
257 if (map->type == _DRM_REGISTERS) 257 if (map->type == _DRM_REGISTERS)
258 drm_ioremapfree(map->handle, map->size, dev); 258 iounmap(map->handle);
259 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 259 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 260 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
261 mutex_unlock(&dev->struct_mutex); 261 mutex_unlock(&dev->struct_mutex);
262 return ret; 262 return ret;
263 } 263 }
264 264
265 list->user_token = list->hash.key; 265 list->user_token = list->hash.key << PAGE_SHIFT;
266 mutex_unlock(&dev->struct_mutex); 266 mutex_unlock(&dev->struct_mutex);
267 267
268 *maplist = list; 268 *maplist = list;
@@ -347,7 +347,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
347 347
348 if (r_list->map == map) { 348 if (r_list->map == map) {
349 list_del(list); 349 list_del(list);
350 drm_ht_remove_key(&dev->map_hash, r_list->user_token); 350 drm_ht_remove_key(&dev->map_hash,
351 r_list->user_token >> PAGE_SHIFT);
351 drm_free(list, sizeof(*list), DRM_MEM_MAPS); 352 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
352 break; 353 break;
353 } 354 }
@@ -362,7 +363,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
362 363
363 switch (map->type) { 364 switch (map->type) {
364 case _DRM_REGISTERS: 365 case _DRM_REGISTERS:
365 drm_ioremapfree(map->handle, map->size, dev); 366 iounmap(map->handle);
366 /* FALLTHROUGH */ 367 /* FALLTHROUGH */
367 case _DRM_FRAME_BUFFER: 368 case _DRM_FRAME_BUFFER:
368 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 369 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index 5681cae1d404..92a867082376 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -79,28 +79,6 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
79} 79}
80 80
81#if __OS_HAS_AGP 81#if __OS_HAS_AGP
82/*
83 * Find the drm_map that covers the range [offset, offset+size).
84 */
85static drm_map_t *drm_lookup_map(unsigned long offset,
86 unsigned long size, drm_device_t * dev)
87{
88 struct list_head *list;
89 drm_map_list_t *r_list;
90 drm_map_t *map;
91
92 list_for_each(list, &dev->maplist->head) {
93 r_list = (drm_map_list_t *) list;
94 map = r_list->map;
95 if (!map)
96 continue;
97 if (map->offset <= offset
98 && (offset + size) <= (map->offset + map->size))
99 return map;
100 }
101 return NULL;
102}
103
104static void *agp_remap(unsigned long offset, unsigned long size, 82static void *agp_remap(unsigned long offset, unsigned long size,
105 drm_device_t * dev) 83 drm_device_t * dev)
106{ 84{
@@ -169,13 +147,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
169} 147}
170 148
171#else /* __OS_HAS_AGP */ 149#else /* __OS_HAS_AGP */
172
173static inline drm_map_t *drm_lookup_map(unsigned long offset,
174 unsigned long size, drm_device_t * dev)
175{
176 return NULL;
177}
178
179static inline void *agp_remap(unsigned long offset, unsigned long size, 150static inline void *agp_remap(unsigned long offset, unsigned long size,
180 drm_device_t * dev) 151 drm_device_t * dev)
181{ 152{
@@ -184,57 +155,28 @@ static inline void *agp_remap(unsigned long offset, unsigned long size,
184 155
185#endif /* agp */ 156#endif /* agp */
186 157
187void *drm_ioremap(unsigned long offset, unsigned long size, 158#endif /* debug_memory */
188 drm_device_t * dev)
189{
190 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
191 drm_map_t *map = drm_lookup_map(offset, size, dev);
192
193 if (map && map->type == _DRM_AGP)
194 return agp_remap(offset, size, dev);
195 }
196 return ioremap(offset, size);
197}
198EXPORT_SYMBOL(drm_ioremap);
199 159
200#if 0 160void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
201void *drm_ioremap_nocache(unsigned long offset,
202 unsigned long size, drm_device_t * dev)
203{ 161{
204 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { 162 if (drm_core_has_AGP(dev) &&
205 drm_map_t *map = drm_lookup_map(offset, size, dev); 163 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
206 164 map->handle = agp_remap(map->offset, map->size, dev);
207 if (map && map->type == _DRM_AGP) 165 else
208 return agp_remap(offset, size, dev); 166 map->handle = ioremap(map->offset, map->size);
209 }
210 return ioremap_nocache(offset, size);
211} 167}
212#endif /* 0 */ 168EXPORT_SYMBOL(drm_core_ioremap);
213 169
214void drm_ioremapfree(void *pt, unsigned long size, 170void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
215 drm_device_t * dev)
216{ 171{
217 /* 172 if (!map->handle || !map->size)
218 * This is a bit ugly. It would be much cleaner if the DRM API would use separate 173 return;
219 * routines for handling mappings in the AGP space. Hopefully this can be done in 174
220 * a future revision of the interface... 175 if (drm_core_has_AGP(dev) &&
221 */ 176 dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
222 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture 177 vunmap(map->handle);
223 && ((unsigned long)pt >= VMALLOC_START 178 else
224 && (unsigned long)pt < VMALLOC_END)) { 179 iounmap(map->handle);
225 unsigned long offset;
226 drm_map_t *map;
227
228 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
229 map = drm_lookup_map(offset, size, dev);
230 if (map && map->type == _DRM_AGP) {
231 vunmap(pt);
232 return;
233 }
234 }
235
236 iounmap(pt);
237} 180}
238EXPORT_SYMBOL(drm_ioremapfree); 181EXPORT_SYMBOL(drm_core_ioremapfree);
239 182
240#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
index f1b97aff10cf..63e425b5ea82 100644
--- a/drivers/char/drm/drm_memory.h
+++ b/drivers/char/drm/drm_memory.h
@@ -56,26 +56,6 @@
56# endif 56# endif
57#endif 57#endif
58 58
59static inline unsigned long drm_follow_page(void *vaddr)
60{
61 pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
62 pud_t *pud = pud_offset(pgd, (unsigned long)vaddr);
63 pmd_t *pmd = pmd_offset(pud, (unsigned long)vaddr);
64 pte_t *ptep = pte_offset_kernel(pmd, (unsigned long)vaddr);
65 return pte_pfn(*ptep) << PAGE_SHIFT;
66}
67
68#else /* __OS_HAS_AGP */ 59#else /* __OS_HAS_AGP */
69 60
70static inline unsigned long drm_follow_page(void *vaddr)
71{
72 return 0;
73}
74
75#endif 61#endif
76
77void *drm_ioremap(unsigned long offset, unsigned long size,
78 drm_device_t * dev);
79
80void drm_ioremapfree(void *pt, unsigned long size,
81 drm_device_t * dev);
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 74581af806e1..6463271deea8 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -205,76 +205,6 @@ void drm_free (void *pt, size_t size, int area) {
205 } 205 }
206} 206}
207 207
208void *drm_ioremap (unsigned long offset, unsigned long size,
209 drm_device_t * dev) {
210 void *pt;
211
212 if (!size) {
213 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
214 "Mapping 0 bytes at 0x%08lx\n", offset);
215 return NULL;
216 }
217
218 if (!(pt = drm_ioremap(offset, size, dev))) {
219 spin_lock(&drm_mem_lock);
220 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
221 spin_unlock(&drm_mem_lock);
222 return NULL;
223 }
224 spin_lock(&drm_mem_lock);
225 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
226 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
227 spin_unlock(&drm_mem_lock);
228 return pt;
229}
230
231#if 0
232void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
233 drm_device_t * dev) {
234 void *pt;
235
236 if (!size) {
237 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
238 "Mapping 0 bytes at 0x%08lx\n", offset);
239 return NULL;
240 }
241
242 if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
243 spin_lock(&drm_mem_lock);
244 ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
245 spin_unlock(&drm_mem_lock);
246 return NULL;
247 }
248 spin_lock(&drm_mem_lock);
249 ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
250 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
251 spin_unlock(&drm_mem_lock);
252 return pt;
253}
254#endif /* 0 */
255
256void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
257 int alloc_count;
258 int free_count;
259
260 if (!pt)
261 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
262 "Attempt to free NULL pointer\n");
263 else
264 drm_ioremapfree(pt, size, dev);
265
266 spin_lock(&drm_mem_lock);
267 drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
268 free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
269 alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
270 spin_unlock(&drm_mem_lock);
271 if (free_count > alloc_count) {
272 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
273 "Excess frees: %d frees, %d allocs\n",
274 free_count, alloc_count);
275 }
276}
277
278#if __OS_HAS_AGP 208#if __OS_HAS_AGP
279 209
280DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { 210DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 617526bd5b0c..9b46b85027d0 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -42,36 +42,131 @@
42 */ 42 */
43 43
44#include "drmP.h" 44#include "drmP.h"
45#include <linux/slab.h>
46
47unsigned long drm_mm_tail_space(drm_mm_t *mm)
48{
49 struct list_head *tail_node;
50 drm_mm_node_t *entry;
51
52 tail_node = mm->ml_entry.prev;
53 entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
54 if (!entry->free)
55 return 0;
56
57 return entry->size;
58}
59
60int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
61{
62 struct list_head *tail_node;
63 drm_mm_node_t *entry;
64
65 tail_node = mm->ml_entry.prev;
66 entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
67 if (!entry->free)
68 return -ENOMEM;
69
70 if (entry->size <= size)
71 return -ENOMEM;
72
73 entry->size -= size;
74 return 0;
75}
76
77
78static int drm_mm_create_tail_node(drm_mm_t *mm,
79 unsigned long start,
80 unsigned long size)
81{
82 drm_mm_node_t *child;
83
84 child = (drm_mm_node_t *)
85 drm_alloc(sizeof(*child), DRM_MEM_MM);
86 if (!child)
87 return -ENOMEM;
88
89 child->free = 1;
90 child->size = size;
91 child->start = start;
92 child->mm = mm;
93
94 list_add_tail(&child->ml_entry, &mm->ml_entry);
95 list_add_tail(&child->fl_entry, &mm->fl_entry);
96
97 return 0;
98}
99
100
101int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
102{
103 struct list_head *tail_node;
104 drm_mm_node_t *entry;
105
106 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
108 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
110 }
111 entry->size += size;
112 return 0;
113}
114
115static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
116 unsigned long size)
117{
118 drm_mm_node_t *child;
119
120 child = (drm_mm_node_t *)
121 drm_alloc(sizeof(*child), DRM_MEM_MM);
122 if (!child)
123 return NULL;
124
125 INIT_LIST_HEAD(&child->fl_entry);
126
127 child->free = 0;
128 child->size = size;
129 child->start = parent->start;
130 child->mm = parent->mm;
131
132 list_add_tail(&child->ml_entry, &parent->ml_entry);
133 INIT_LIST_HEAD(&child->fl_entry);
134
135 parent->size -= size;
136 parent->start += size;
137 return child;
138}
139
140
45 141
46drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, 142drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
47 unsigned long size, unsigned alignment) 143 unsigned long size, unsigned alignment)
48{ 144{
49 145
146 drm_mm_node_t *align_splitoff = NULL;
50 drm_mm_node_t *child; 147 drm_mm_node_t *child;
148 unsigned tmp = 0;
51 149
52 if (alignment) 150 if (alignment)
53 size += alignment - 1; 151 tmp = parent->start % alignment;
152
153 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
155 if (!align_splitoff)
156 return NULL;
157 }
54 158
55 if (parent->size == size) { 159 if (parent->size == size) {
56 list_del_init(&parent->fl_entry); 160 list_del_init(&parent->fl_entry);
57 parent->free = 0; 161 parent->free = 0;
58 return parent; 162 return parent;
59 } else { 163 } else {
60 child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM); 164 child = drm_mm_split_at_start(parent, size);
61 if (!child) 165 }
62 return NULL;
63
64 INIT_LIST_HEAD(&child->ml_entry);
65 INIT_LIST_HEAD(&child->fl_entry);
66 166
67 child->free = 0; 167 if (align_splitoff)
68 child->size = size; 168 drm_mm_put_block(align_splitoff);
69 child->start = parent->start;
70 169
71 list_add_tail(&child->ml_entry, &parent->ml_entry);
72 parent->size -= size;
73 parent->start += size;
74 }
75 return child; 170 return child;
76} 171}
77 172
@@ -80,12 +175,12 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
80 * Otherwise add to the free stack. 175 * Otherwise add to the free stack.
81 */ 176 */
82 177
83void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur) 178void drm_mm_put_block(drm_mm_node_t * cur)
84{ 179{
85 180
86 drm_mm_node_t *list_root = &mm->root_node; 181 drm_mm_t *mm = cur->mm;
87 struct list_head *cur_head = &cur->ml_entry; 182 struct list_head *cur_head = &cur->ml_entry;
88 struct list_head *root_head = &list_root->ml_entry; 183 struct list_head *root_head = &mm->ml_entry;
89 drm_mm_node_t *prev_node = NULL; 184 drm_mm_node_t *prev_node = NULL;
90 drm_mm_node_t *next_node; 185 drm_mm_node_t *next_node;
91 186
@@ -116,7 +211,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
116 } 211 }
117 if (!merged) { 212 if (!merged) {
118 cur->free = 1; 213 cur->free = 1;
119 list_add(&cur->fl_entry, &list_root->fl_entry); 214 list_add(&cur->fl_entry, &mm->fl_entry);
120 } else { 215 } else {
121 list_del(&cur->ml_entry); 216 list_del(&cur->ml_entry);
122 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 217 drm_free(cur, sizeof(*cur), DRM_MEM_MM);
@@ -128,20 +223,30 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
128 unsigned alignment, int best_match) 223 unsigned alignment, int best_match)
129{ 224{
130 struct list_head *list; 225 struct list_head *list;
131 const struct list_head *free_stack = &mm->root_node.fl_entry; 226 const struct list_head *free_stack = &mm->fl_entry;
132 drm_mm_node_t *entry; 227 drm_mm_node_t *entry;
133 drm_mm_node_t *best; 228 drm_mm_node_t *best;
134 unsigned long best_size; 229 unsigned long best_size;
230 unsigned wasted;
135 231
136 best = NULL; 232 best = NULL;
137 best_size = ~0UL; 233 best_size = ~0UL;
138 234
139 if (alignment)
140 size += alignment - 1;
141
142 list_for_each(list, free_stack) { 235 list_for_each(list, free_stack) {
143 entry = list_entry(list, drm_mm_node_t, fl_entry); 236 entry = list_entry(list, drm_mm_node_t, fl_entry);
144 if (entry->size >= size) { 237 wasted = 0;
238
239 if (entry->size < size)
240 continue;
241
242 if (alignment) {
243 register unsigned tmp = entry->start % alignment;
244 if (tmp)
245 wasted += alignment - tmp;
246 }
247
248
249 if (entry->size >= size + wasted) {
145 if (!best_match) 250 if (!best_match)
146 return entry; 251 return entry;
147 if (size < best_size) { 252 if (size < best_size) {
@@ -154,40 +259,32 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
154 return best; 259 return best;
155} 260}
156 261
157int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) 262int drm_mm_clean(drm_mm_t * mm)
158{ 263{
159 drm_mm_node_t *child; 264 struct list_head *head = &mm->ml_entry;
160
161 INIT_LIST_HEAD(&mm->root_node.ml_entry);
162 INIT_LIST_HEAD(&mm->root_node.fl_entry);
163 child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
164 if (!child)
165 return -ENOMEM;
166
167 INIT_LIST_HEAD(&child->ml_entry);
168 INIT_LIST_HEAD(&child->fl_entry);
169 265
170 child->start = start; 266 return (head->next->next == head);
171 child->size = size; 267}
172 child->free = 1;
173 268
174 list_add(&child->fl_entry, &mm->root_node.fl_entry); 269int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
175 list_add(&child->ml_entry, &mm->root_node.ml_entry); 270{
271 INIT_LIST_HEAD(&mm->ml_entry);
272 INIT_LIST_HEAD(&mm->fl_entry);
176 273
177 return 0; 274 return drm_mm_create_tail_node(mm, start, size);
178} 275}
179 276
180EXPORT_SYMBOL(drm_mm_init); 277EXPORT_SYMBOL(drm_mm_init);
181 278
182void drm_mm_takedown(drm_mm_t * mm) 279void drm_mm_takedown(drm_mm_t * mm)
183{ 280{
184 struct list_head *bnode = mm->root_node.fl_entry.next; 281 struct list_head *bnode = mm->fl_entry.next;
185 drm_mm_node_t *entry; 282 drm_mm_node_t *entry;
186 283
187 entry = list_entry(bnode, drm_mm_node_t, fl_entry); 284 entry = list_entry(bnode, drm_mm_node_t, fl_entry);
188 285
189 if (entry->ml_entry.next != &mm->root_node.ml_entry || 286 if (entry->ml_entry.next != &mm->ml_entry ||
190 entry->fl_entry.next != &mm->root_node.fl_entry) { 287 entry->fl_entry.next != &mm->fl_entry) {
191 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 288 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
192 return; 289 return;
193 } 290 }
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 09398d5fbd3f..ad54b845978b 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -226,12 +226,14 @@
226 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 226 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
227 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ 227 {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 228 {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
229 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 230 {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
230 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 231 {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
231 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 232 {0x1106, 0x3304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
232 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 233 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
233 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 234 {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
234 {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 235 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
236 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
235 {0, 0, 0} 237 {0, 0, 0}
236 238
237#define i810_PCI_IDS \ 239#define i810_PCI_IDS \
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 62d5fe15f046..7fd0da712142 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -500,7 +500,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
500 for (pt = dev->vmalist; pt; pt = pt->next) { 500 for (pt = dev->vmalist; pt; pt = pt->next) {
501 if (!(vma = pt->vma)) 501 if (!(vma = pt->vma))
502 continue; 502 continue;
503 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx", 503 DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
504 pt->pid, 504 pt->pid,
505 vma->vm_start, 505 vma->vm_start,
506 vma->vm_end, 506 vma->vm_end,
@@ -510,7 +510,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
510 vma->vm_flags & VM_MAYSHARE ? 's' : 'p', 510 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
511 vma->vm_flags & VM_LOCKED ? 'l' : '-', 511 vma->vm_flags & VM_LOCKED ? 'l' : '-',
512 vma->vm_flags & VM_IO ? 'i' : '-', 512 vma->vm_flags & VM_IO ? 'i' : '-',
513 vma->vm_pgoff << PAGE_SHIFT); 513 vma->vm_pgoff);
514 514
515#if defined(__i386__) 515#if defined(__i386__)
516 pgprot = pgprot_val(vma->vm_page_prot); 516 pgprot = pgprot_val(vma->vm_page_prot);
diff --git a/drivers/char/drm/drm_sman.c b/drivers/char/drm/drm_sman.c
index 19c81d2e13d0..e15db6d6bea9 100644
--- a/drivers/char/drm/drm_sman.c
+++ b/drivers/char/drm/drm_sman.c
@@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
101 101
102static void drm_sman_mm_free(void *private, void *ref) 102static void drm_sman_mm_free(void *private, void *ref)
103{ 103{
104 drm_mm_t *mm = (drm_mm_t *) private;
105 drm_mm_node_t *node = (drm_mm_node_t *) ref; 104 drm_mm_node_t *node = (drm_mm_node_t *) ref;
106 105
107 drm_mm_put_block(mm, node); 106 drm_mm_put_block(node);
108} 107}
109 108
110static void drm_sman_mm_destroy(void *private) 109static void drm_sman_mm_destroy(void *private)
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index b9cfc077f6bc..54a632848955 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -70,7 +70,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
70 if (!dev->agp || !dev->agp->cant_use_aperture) 70 if (!dev->agp || !dev->agp->cant_use_aperture)
71 goto vm_nopage_error; 71 goto vm_nopage_error;
72 72
73 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) 73 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
74 goto vm_nopage_error; 74 goto vm_nopage_error;
75 75
76 r_list = drm_hash_entry(hash, drm_map_list_t, hash); 76 r_list = drm_hash_entry(hash, drm_map_list_t, hash);
@@ -227,7 +227,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
227 map->size); 227 map->size);
228 DRM_DEBUG("mtrr_del = %d\n", retcode); 228 DRM_DEBUG("mtrr_del = %d\n", retcode);
229 } 229 }
230 drm_ioremapfree(map->handle, map->size, dev); 230 iounmap(map->handle);
231 break; 231 break;
232 case _DRM_SHM: 232 case _DRM_SHM:
233 vfree(map->handle); 233 vfree(map->handle);
@@ -463,8 +463,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
463 lock_kernel(); 463 lock_kernel();
464 dev = priv->head->dev; 464 dev = priv->head->dev;
465 dma = dev->dma; 465 dma = dev->dma;
466 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 466 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
467 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT); 467 vma->vm_start, vma->vm_end, vma->vm_pgoff);
468 468
469 /* Length must match exact page count */ 469 /* Length must match exact page count */
470 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { 470 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
@@ -537,8 +537,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
537 unsigned long offset = 0; 537 unsigned long offset = 0;
538 drm_hash_item_t *hash; 538 drm_hash_item_t *hash;
539 539
540 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", 540 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
541 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT); 541 vma->vm_start, vma->vm_end, vma->vm_pgoff);
542 542
543 if (!priv->authenticated) 543 if (!priv->authenticated)
544 return -EACCES; 544 return -EACCES;
@@ -547,7 +547,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
547 * the AGP mapped at physical address 0 547 * the AGP mapped at physical address 0
548 * --BenH. 548 * --BenH.
549 */ 549 */
550 if (!(vma->vm_pgoff << PAGE_SHIFT) 550 if (!vma->vm_pgoff
551#if __OS_HAS_AGP 551#if __OS_HAS_AGP
552 && (!dev->agp 552 && (!dev->agp
553 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 553 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
@@ -555,7 +555,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
555 ) 555 )
556 return drm_mmap_dma(filp, vma); 556 return drm_mmap_dma(filp, vma);
557 557
558 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) { 558 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
559 DRM_ERROR("Could not find map\n"); 559 DRM_ERROR("Could not find map\n");
560 return -EINVAL; 560 return -EINVAL;
561 } 561 }
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index fa2de70f7401..60cb4e45a75e 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -219,8 +219,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
219 (drm_i810_private_t *) dev->dev_private; 219 (drm_i810_private_t *) dev->dev_private;
220 220
221 if (dev_priv->ring.virtual_start) { 221 if (dev_priv->ring.virtual_start) {
222 drm_ioremapfree((void *)dev_priv->ring.virtual_start, 222 drm_core_ioremapfree(&dev_priv->ring.map, dev);
223 dev_priv->ring.Size, dev);
224 } 223 }
225 if (dev_priv->hw_status_page) { 224 if (dev_priv->hw_status_page) {
226 pci_free_consistent(dev->pdev, PAGE_SIZE, 225 pci_free_consistent(dev->pdev, PAGE_SIZE,
@@ -236,9 +235,9 @@ static int i810_dma_cleanup(drm_device_t * dev)
236 for (i = 0; i < dma->buf_count; i++) { 235 for (i = 0; i < dma->buf_count; i++) {
237 drm_buf_t *buf = dma->buflist[i]; 236 drm_buf_t *buf = dma->buflist[i];
238 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 237 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
238
239 if (buf_priv->kernel_virtual && buf->total) 239 if (buf_priv->kernel_virtual && buf->total)
240 drm_ioremapfree(buf_priv->kernel_virtual, 240 drm_core_ioremapfree(&buf_priv->map, dev);
241 buf->total, dev);
242 } 241 }
243 } 242 }
244 return 0; 243 return 0;
@@ -311,8 +310,15 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
311 310
312 *buf_priv->in_use = I810_BUF_FREE; 311 *buf_priv->in_use = I810_BUF_FREE;
313 312
314 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, 313 buf_priv->map.offset = buf->bus_address;
315 buf->total, dev); 314 buf_priv->map.size = buf->total;
315 buf_priv->map.type = _DRM_AGP;
316 buf_priv->map.flags = 0;
317 buf_priv->map.mtrr = 0;
318
319 drm_core_ioremap(&buf_priv->map, dev);
320 buf_priv->kernel_virtual = buf_priv->map.handle;
321
316 } 322 }
317 return 0; 323 return 0;
318} 324}
@@ -363,18 +369,24 @@ static int i810_dma_initialize(drm_device_t * dev,
363 dev_priv->ring.End = init->ring_end; 369 dev_priv->ring.End = init->ring_end;
364 dev_priv->ring.Size = init->ring_size; 370 dev_priv->ring.Size = init->ring_size;
365 371
366 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + 372 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
367 init->ring_start, 373 dev_priv->ring.map.size = init->ring_size;
368 init->ring_size, dev); 374 dev_priv->ring.map.type = _DRM_AGP;
375 dev_priv->ring.map.flags = 0;
376 dev_priv->ring.map.mtrr = 0;
369 377
370 if (dev_priv->ring.virtual_start == NULL) { 378 drm_core_ioremap(&dev_priv->ring.map, dev);
379
380 if (dev_priv->ring.map.handle == NULL) {
371 dev->dev_private = (void *)dev_priv; 381 dev->dev_private = (void *)dev_priv;
372 i810_dma_cleanup(dev); 382 i810_dma_cleanup(dev);
373 DRM_ERROR("can not ioremap virtual address for" 383 DRM_ERROR("can not ioremap virtual address for"
374 " ring buffer\n"); 384 " ring buffer\n");
375 return -ENOMEM; 385 return DRM_ERR(ENOMEM);
376 } 386 }
377 387
388 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
389
378 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 390 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
379 391
380 dev_priv->w = init->w; 392 dev_priv->w = init->w;
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index e8cf3ff606f0..e6df49f4928a 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -61,6 +61,7 @@ typedef struct drm_i810_buf_priv {
61 int currently_mapped; 61 int currently_mapped;
62 void *virtual; 62 void *virtual;
63 void *kernel_virtual; 63 void *kernel_virtual;
64 drm_local_map_t map;
64} drm_i810_buf_priv_t; 65} drm_i810_buf_priv_t;
65 66
66typedef struct _drm_i810_ring_buffer { 67typedef struct _drm_i810_ring_buffer {
@@ -72,6 +73,7 @@ typedef struct _drm_i810_ring_buffer {
72 int head; 73 int head;
73 int tail; 74 int tail;
74 int space; 75 int space;
76 drm_local_map_t map;
75} drm_i810_ring_buffer_t; 77} drm_i810_ring_buffer_t;
76 78
77typedef struct drm_i810_private { 79typedef struct drm_i810_private {
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 4f0e5746ab33..95224455ec0c 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -223,8 +223,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
223 (drm_i830_private_t *) dev->dev_private; 223 (drm_i830_private_t *) dev->dev_private;
224 224
225 if (dev_priv->ring.virtual_start) { 225 if (dev_priv->ring.virtual_start) {
226 drm_ioremapfree((void *)dev_priv->ring.virtual_start, 226 drm_core_ioremapfree(&dev_priv->ring.map, dev);
227 dev_priv->ring.Size, dev);
228 } 227 }
229 if (dev_priv->hw_status_page) { 228 if (dev_priv->hw_status_page) {
230 pci_free_consistent(dev->pdev, PAGE_SIZE, 229 pci_free_consistent(dev->pdev, PAGE_SIZE,
@@ -242,8 +241,7 @@ static int i830_dma_cleanup(drm_device_t * dev)
242 drm_buf_t *buf = dma->buflist[i]; 241 drm_buf_t *buf = dma->buflist[i];
243 drm_i830_buf_priv_t *buf_priv = buf->dev_private; 242 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
244 if (buf_priv->kernel_virtual && buf->total) 243 if (buf_priv->kernel_virtual && buf->total)
245 drm_ioremapfree(buf_priv->kernel_virtual, 244 drm_core_ioremapfree(&buf_priv->map, dev);
246 buf->total, dev);
247 } 245 }
248 } 246 }
249 return 0; 247 return 0;
@@ -320,8 +318,14 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv)
320 318
321 *buf_priv->in_use = I830_BUF_FREE; 319 *buf_priv->in_use = I830_BUF_FREE;
322 320
323 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, 321 buf_priv->map.offset = buf->bus_address;
324 buf->total, dev); 322 buf_priv->map.size = buf->total;
323 buf_priv->map.type = _DRM_AGP;
324 buf_priv->map.flags = 0;
325 buf_priv->map.mtrr = 0;
326
327 drm_core_ioremap(&buf_priv->map, dev);
328 buf_priv->kernel_virtual = buf_priv->map.handle;
325 } 329 }
326 return 0; 330 return 0;
327} 331}
@@ -373,18 +377,24 @@ static int i830_dma_initialize(drm_device_t * dev,
373 dev_priv->ring.End = init->ring_end; 377 dev_priv->ring.End = init->ring_end;
374 dev_priv->ring.Size = init->ring_size; 378 dev_priv->ring.Size = init->ring_size;
375 379
376 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + 380 dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
377 init->ring_start, 381 dev_priv->ring.map.size = init->ring_size;
378 init->ring_size, dev); 382 dev_priv->ring.map.type = _DRM_AGP;
383 dev_priv->ring.map.flags = 0;
384 dev_priv->ring.map.mtrr = 0;
385
386 drm_core_ioremap(&dev_priv->ring.map, dev);
379 387
380 if (dev_priv->ring.virtual_start == NULL) { 388 if (dev_priv->ring.map.handle == NULL) {
381 dev->dev_private = (void *)dev_priv; 389 dev->dev_private = (void *)dev_priv;
382 i830_dma_cleanup(dev); 390 i830_dma_cleanup(dev);
383 DRM_ERROR("can not ioremap virtual address for" 391 DRM_ERROR("can not ioremap virtual address for"
384 " ring buffer\n"); 392 " ring buffer\n");
385 return -ENOMEM; 393 return DRM_ERR(ENOMEM);
386 } 394 }
387 395
396 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
397
388 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 398 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
389 399
390 dev_priv->w = init->w; 400 dev_priv->w = init->w;
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index 85bc5be6f916..e91f94afb4bb 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -68,6 +68,7 @@ typedef struct drm_i830_buf_priv {
68 int currently_mapped; 68 int currently_mapped;
69 void __user *virtual; 69 void __user *virtual;
70 void *kernel_virtual; 70 void *kernel_virtual;
71 drm_local_map_t map;
71} drm_i830_buf_priv_t; 72} drm_i830_buf_priv_t;
72 73
73typedef struct _drm_i830_ring_buffer { 74typedef struct _drm_i830_ring_buffer {
@@ -79,6 +80,7 @@ typedef struct _drm_i830_ring_buffer {
79 int head; 80 int head;
80 int tail; 81 int tail;
81 int space; 82 int space;
83 drm_local_map_t map;
82} drm_i830_ring_buffer_t; 84} drm_i830_ring_buffer_t;
83 85
84typedef struct drm_i830_private { 86typedef struct drm_i830_private {
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index a691ae74129d..c0539c6299cf 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -190,6 +190,11 @@ static int via_initialize(drm_device_t * dev,
190 return DRM_ERR(EFAULT); 190 return DRM_ERR(EFAULT);
191 } 191 }
192 192
193 if (dev_priv->chipset == VIA_DX9_0) {
194 DRM_ERROR("AGP DMA is not supported on this chip\n");
195 return DRM_ERR(EINVAL);
196 }
197
193 dev_priv->ring.map.offset = dev->agp->base + init->offset; 198 dev_priv->ring.map.offset = dev->agp->base + init->offset;
194 dev_priv->ring.map.size = init->size; 199 dev_priv->ring.map.size = init->size;
195 dev_priv->ring.map.type = 0; 200 dev_priv->ring.map.type = 0;
@@ -480,6 +485,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
480 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); 485 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
481 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); 486 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
482 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); 487 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
488 VIA_READ(VIA_REG_TRANSPACE);
483 } 489 }
484 } 490 }
485 return paused; 491 return paused;
@@ -557,8 +563,9 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
557 563
558 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); 564 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
559 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); 565 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
560 566 DRM_WRITEMEMORYBARRIER();
561 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); 567 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
568 VIA_READ(VIA_REG_TRANSPACE);
562} 569}
563 570
564static void via_pad_cache(drm_via_private_t * dev_priv, int qwords) 571static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index 806f9ce5f47b..2054d5773717 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -218,7 +218,9 @@ via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); 218 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); 219 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); 220 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
221 DRM_WRITEMEMORYBARRIER();
221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); 222 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
223 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
222} 224}
223 225
224/* 226/*
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index d21b5b75da0f..8b8778d4a423 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -29,10 +29,10 @@
29 29
30#define DRIVER_NAME "via" 30#define DRIVER_NAME "via"
31#define DRIVER_DESC "VIA Unichrome / Pro" 31#define DRIVER_DESC "VIA Unichrome / Pro"
32#define DRIVER_DATE "20060529" 32#define DRIVER_DATE "20061227"
33 33
34#define DRIVER_MAJOR 2 34#define DRIVER_MAJOR 2
35#define DRIVER_MINOR 10 35#define DRIVER_MINOR 11
36#define DRIVER_PATCHLEVEL 0 36#define DRIVER_PATCHLEVEL 0
37 37
38#include "via_verifier.h" 38#include "via_verifier.h"
@@ -79,7 +79,7 @@ typedef struct drm_via_private {
79 char pci_buf[VIA_PCI_BUF_SIZE]; 79 char pci_buf[VIA_PCI_BUF_SIZE];
80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; 80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
81 uint32_t num_fire_offsets; 81 uint32_t num_fire_offsets;
82 int pro_group_a; 82 int chipset;
83 drm_via_irq_t via_irqs[VIA_NUM_IRQS]; 83 drm_via_irq_t via_irqs[VIA_NUM_IRQS];
84 unsigned num_irqs; 84 unsigned num_irqs;
85 maskarray_t *irq_masks; 85 maskarray_t *irq_masks;
@@ -96,8 +96,9 @@ typedef struct drm_via_private {
96} drm_via_private_t; 96} drm_via_private_t;
97 97
98enum via_family { 98enum via_family {
99 VIA_OTHER = 0, 99 VIA_OTHER = 0, /* Baseline */
100 VIA_PRO_GROUP_A, 100 VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
101 VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
101}; 102};
102 103
103/* VIA MMIO register access */ 104/* VIA MMIO register access */
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index c33d068cde19..1ac5941ad237 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -258,12 +258,16 @@ void via_driver_irq_preinstall(drm_device_t * dev)
258 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; 258 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
259 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; 259 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
260 260
261 dev_priv->irq_masks = (dev_priv->pro_group_a) ? 261 if (dev_priv->chipset == VIA_PRO_GROUP_A ||
262 via_pro_group_a_irqs : via_unichrome_irqs; 262 dev_priv->chipset == VIA_DX9_0) {
263 dev_priv->num_irqs = (dev_priv->pro_group_a) ? 263 dev_priv->irq_masks = via_pro_group_a_irqs;
264 via_num_pro_group_a : via_num_unichrome; 264 dev_priv->num_irqs = via_num_pro_group_a;
265 dev_priv->irq_map = (dev_priv->pro_group_a) ? 265 dev_priv->irq_map = via_irqmap_pro_group_a;
266 via_irqmap_pro_group_a : via_irqmap_unichrome; 266 } else {
267 dev_priv->irq_masks = via_unichrome_irqs;
268 dev_priv->num_irqs = via_num_unichrome;
269 dev_priv->irq_map = via_irqmap_unichrome;
270 }
267 271
268 for (i = 0; i < dev_priv->num_irqs; ++i) { 272 for (i = 0; i < dev_priv->num_irqs; ++i) {
269 atomic_set(&cur_irq->irq_received, 0); 273 atomic_set(&cur_irq->irq_received, 0);
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 782011e0a58d..4e3fc072aa3b 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -106,8 +106,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset)
106 106
107 dev->dev_private = (void *)dev_priv; 107 dev->dev_private = (void *)dev_priv;
108 108
109 if (chipset == VIA_PRO_GROUP_A) 109 dev_priv->chipset = chipset;
110 dev_priv->pro_group_a = 1;
111 110
112 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); 111 ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
113 if (ret) { 112 if (ret) {
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 70c897c88766..2e7e08078287 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -306,6 +306,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
306 unsigned long lo = ~0, hi = 0, tmp; 306 unsigned long lo = ~0, hi = 0, tmp;
307 uint32_t *addr, *pitch, *height, tex; 307 uint32_t *addr, *pitch, *height, tex;
308 unsigned i; 308 unsigned i;
309 int npot;
309 310
310 if (end > 9) 311 if (end > 9)
311 end = 9; 312 end = 9;
@@ -316,12 +317,15 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
316 &(cur_seq->t_addr[tex = cur_seq->texture][start]); 317 &(cur_seq->t_addr[tex = cur_seq->texture][start]);
317 pitch = &(cur_seq->pitch[tex][start]); 318 pitch = &(cur_seq->pitch[tex][start]);
318 height = &(cur_seq->height[tex][start]); 319 height = &(cur_seq->height[tex][start]);
319 320 npot = cur_seq->tex_npot[tex];
320 for (i = start; i <= end; ++i) { 321 for (i = start; i <= end; ++i) {
321 tmp = *addr++; 322 tmp = *addr++;
322 if (tmp < lo) 323 if (tmp < lo)
323 lo = tmp; 324 lo = tmp;
324 tmp += (*height++ << *pitch++); 325 if (i == 0 && npot)
326 tmp += (*height++ * *pitch++);
327 else
328 tmp += (*height++ << *pitch++);
325 if (tmp > hi) 329 if (tmp > hi)
326 hi = tmp; 330 hi = tmp;
327 } 331 }
@@ -443,13 +447,21 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
443 return 0; 447 return 0;
444 case check_texture_addr3: 448 case check_texture_addr3:
445 cur_seq->unfinished = tex_address; 449 cur_seq->unfinished = tex_address;
446 tmp = ((cmd >> 24) - 0x2B); 450 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
447 cur_seq->pitch[cur_seq->texture][tmp] = 451 if (tmp == 0 &&
448 (cmd & 0x00F00000) >> 20; 452 (cmd & HC_HTXnEnPit_MASK)) {
449 if (!tmp && (cmd & 0x000FFFFF)) { 453 cur_seq->pitch[cur_seq->texture][tmp] =
450 DRM_ERROR 454 (cmd & HC_HTXnLnPit_MASK);
451 ("Unimplemented texture level 0 pitch mode.\n"); 455 cur_seq->tex_npot[cur_seq->texture] = 1;
452 return 2; 456 } else {
457 cur_seq->pitch[cur_seq->texture][tmp] =
458 (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
459 cur_seq->tex_npot[cur_seq->texture] = 0;
460 if (cmd & 0x000FFFFF) {
461 DRM_ERROR
462 ("Unimplemented texture level 0 pitch mode.\n");
463 return 2;
464 }
453 } 465 }
454 return 0; 466 return 0;
455 case check_texture_addr4: 467 case check_texture_addr4:
@@ -961,7 +973,13 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
961 uint32_t cmd; 973 uint32_t cmd;
962 const uint32_t *buf_end = buf + (size >> 2); 974 const uint32_t *buf_end = buf + (size >> 2);
963 verifier_state_t state = state_command; 975 verifier_state_t state = state_command;
964 int pro_group_a = dev_priv->pro_group_a; 976 int cme_video;
977 int supported_3d;
978
979 cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
980 dev_priv->chipset == VIA_DX9_0);
981
982 supported_3d = dev_priv->chipset != VIA_DX9_0;
965 983
966 hc_state->dev = dev; 984 hc_state->dev = dev;
967 hc_state->unfinished = no_sequence; 985 hc_state->unfinished = no_sequence;
@@ -986,17 +1004,21 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
986 state = via_check_vheader6(&buf, buf_end); 1004 state = via_check_vheader6(&buf, buf_end);
987 break; 1005 break;
988 case state_command: 1006 case state_command:
989 if (HALCYON_HEADER2 == (cmd = *buf)) 1007 if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1008 supported_3d)
990 state = state_header2; 1009 state = state_header2;
991 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1010 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
992 state = state_header1; 1011 state = state_header1;
993 else if (pro_group_a 1012 else if (cme_video
994 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) 1013 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
995 state = state_vheader5; 1014 state = state_vheader5;
996 else if (pro_group_a 1015 else if (cme_video
997 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) 1016 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
998 state = state_vheader6; 1017 state = state_vheader6;
999 else { 1018 else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1019 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1020 state = state_error;
1021 } else {
1000 DRM_ERROR 1022 DRM_ERROR
1001 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", 1023 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1002 cmd); 1024 cmd);
diff --git a/drivers/char/drm/via_verifier.h b/drivers/char/drm/via_verifier.h
index 256590fcc22a..b77f59df0278 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/char/drm/via_verifier.h
@@ -43,6 +43,7 @@ typedef struct {
43 uint32_t tex_level_lo[2]; 43 uint32_t tex_level_lo[2];
44 uint32_t tex_level_hi[2]; 44 uint32_t tex_level_hi[2];
45 uint32_t tex_palette_size[2]; 45 uint32_t tex_palette_size[2];
46 uint32_t tex_npot[2];
46 drm_via_sequence_t unfinished; 47 drm_via_sequence_t unfinished;
47 int agp_texture; 48 int agp_texture;
48 int multitex; 49 int multitex;
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
new file mode 100644
index 000000000000..6f019f19be71
--- /dev/null
+++ b/drivers/char/hvc_beat.c
@@ -0,0 +1,134 @@
1/*
2 * Beat hypervisor console driver
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/char/hvc_rtas.c:
7 * (C) Copyright IBM Corporation 2001-2005
8 * (C) Copyright Red Hat, Inc. 2005
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/err.h>
28#include <linux/string.h>
29#include <linux/console.h>
30#include <asm/prom.h>
31#include <asm/hvconsole.h>
32#include <asm/firmware.h>
33
34#include "hvc_console.h"
35
36extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
37extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
38
39struct hvc_struct *hvc_beat_dev = NULL;
40
41/* bug: only one queue is available regardless of vtermno */
42static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
43{
44 static unsigned char q[sizeof(unsigned long) * 2]
45 __attribute__((aligned(sizeof(unsigned long))));
46 static int qlen = 0;
47 unsigned long got;
48
49again:
50 if (qlen) {
51 if (qlen > cnt) {
52 memcpy(buf, q, cnt);
53 qlen -= cnt;
54 memmove(q + cnt, q, qlen);
55 return cnt;
56 } else { /* qlen <= cnt */
57 int r;
58
59 memcpy(buf, q, qlen);
60 r = qlen;
61 qlen = 0;
62 return r;
63 }
64 }
65 if (beat_get_term_char(vtermno, &got,
66 ((unsigned long *)q), ((unsigned long *)q) + 1) == 0) {
67 qlen = got;
68 goto again;
69 }
70 return 0;
71}
72
73static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
74{
75 unsigned long kb[2];
76 int rest, nlen;
77
78 for (rest = cnt; rest > 0; rest -= nlen) {
79 nlen = (rest > 16) ? 16 : rest;
80 memcpy(kb, buf, nlen);
81 beat_put_term_char(vtermno, rest, kb[0], kb[1]);
82 rest -= nlen;
83 }
84 return cnt;
85}
86
87static struct hv_ops hvc_beat_get_put_ops = {
88 .get_chars = hvc_beat_get_chars,
89 .put_chars = hvc_beat_put_chars,
90};
91
92static int hvc_beat_useit = 1;
93
94static int hvc_beat_config(char *p)
95{
96 hvc_beat_useit = simple_strtoul(p, NULL, 0);
97 return 0;
98}
99
100static int hvc_beat_console_init(void)
101{
102 if (hvc_beat_useit && machine_is_compatible("Beat")) {
103 hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
104 }
105 return 0;
106}
107
108/* temp */
109static int hvc_beat_init(void)
110{
111 struct hvc_struct *hp;
112
113 if (!firmware_has_feature(FW_FEATURE_BEAT))
114 return -ENODEV;
115
116 hp = hvc_alloc(0, NO_IRQ, &hvc_beat_get_put_ops, 16);
117 if (IS_ERR(hp))
118 return PTR_ERR(hp);
119 hvc_beat_dev = hp;
120 return 0;
121}
122
123static void __exit hvc_beat_exit(void)
124{
125 if (hvc_beat_dev)
126 hvc_remove(hvc_beat_dev);
127}
128
129module_init(hvc_beat_init);
130module_exit(hvc_beat_exit);
131
132__setup("hvc_beat=", hvc_beat_config);
133
134console_initcall(hvc_beat_console_init);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f1afd26a509f..a7b33d2f5991 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1802,7 +1802,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1802 return -ENODEV; 1802 return -ENODEV;
1803 } 1803 }
1804 1804
1805 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1805 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1806 addr_space = IPMI_MEM_ADDR_SPACE; 1806 addr_space = IPMI_MEM_ADDR_SPACE;
1807 else 1807 else
1808 addr_space = IPMI_IO_ADDR_SPACE; 1808 addr_space = IPMI_IO_ADDR_SPACE;
@@ -1848,19 +1848,19 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1848 info->irq_setup = NULL; 1848 info->irq_setup = NULL;
1849 } 1849 }
1850 1850
1851 if (spmi->addr.register_bit_width) { 1851 if (spmi->addr.bit_width) {
1852 /* A (hopefully) properly formed register bit width. */ 1852 /* A (hopefully) properly formed register bit width. */
1853 info->io.regspacing = spmi->addr.register_bit_width / 8; 1853 info->io.regspacing = spmi->addr.bit_width / 8;
1854 } else { 1854 } else {
1855 info->io.regspacing = DEFAULT_REGSPACING; 1855 info->io.regspacing = DEFAULT_REGSPACING;
1856 } 1856 }
1857 info->io.regsize = info->io.regspacing; 1857 info->io.regsize = info->io.regspacing;
1858 info->io.regshift = spmi->addr.register_bit_offset; 1858 info->io.regshift = spmi->addr.bit_offset;
1859 1859
1860 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1860 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1861 info->io_setup = mem_setup; 1861 info->io_setup = mem_setup;
1862 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1862 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1863 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1863 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1864 info->io_setup = port_setup; 1864 info->io_setup = port_setup;
1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1866 } else { 1866 } else {
@@ -1888,10 +1888,8 @@ static __devinit void acpi_find_bmc(void)
1888 return; 1888 return;
1889 1889
1890 for (i = 0; ; i++) { 1890 for (i = 0; ; i++) {
1891 status = acpi_get_firmware_table("SPMI", i+1, 1891 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1892 ACPI_LOGICAL_ADDRESSING, 1892 (struct acpi_table_header **)&spmi);
1893 (struct acpi_table_header **)
1894 &spmi);
1895 if (status != AE_OK) 1893 if (status != AE_OK)
1896 return; 1894 return;
1897 1895
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 13935235e066..7fd3cd5ddf21 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -215,7 +215,7 @@ static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
215} 215}
216static struct sysrq_key_op sysrq_showstate_blocked_op = { 216static struct sysrq_key_op sysrq_showstate_blocked_op = {
217 .handler = sysrq_handle_showstate_blocked, 217 .handler = sysrq_handle_showstate_blocked,
218 .help_msg = "showBlockedTasks", 218 .help_msg = "shoW-blocked-tasks",
219 .action_msg = "Show Blocked State", 219 .action_msg = "Show Blocked State",
220 .enable_mask = SYSRQ_ENABLE_DUMP, 220 .enable_mask = SYSRQ_ENABLE_DUMP,
221}; 221};
@@ -315,15 +315,16 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
315 &sysrq_loglevel_op, /* 9 */ 315 &sysrq_loglevel_op, /* 9 */
316 316
317 /* 317 /*
318 * Don't use for system provided sysrqs, it is handled specially on 318 * a: Don't use for system provided sysrqs, it is handled specially on
319 * sparc and will never arrive 319 * sparc and will never arrive.
320 */ 320 */
321 NULL, /* a */ 321 NULL, /* a */
322 &sysrq_reboot_op, /* b */ 322 &sysrq_reboot_op, /* b */
323 &sysrq_crashdump_op, /* c */ 323 &sysrq_crashdump_op, /* c & ibm_emac driver debug */
324 &sysrq_showlocks_op, /* d */ 324 &sysrq_showlocks_op, /* d */
325 &sysrq_term_op, /* e */ 325 &sysrq_term_op, /* e */
326 &sysrq_moom_op, /* f */ 326 &sysrq_moom_op, /* f */
327 /* g: May be registered by ppc for kgdb */
327 NULL, /* g */ 328 NULL, /* g */
328 NULL, /* h */ 329 NULL, /* h */
329 &sysrq_kill_op, /* i */ 330 &sysrq_kill_op, /* i */
@@ -332,18 +333,19 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
332 NULL, /* l */ 333 NULL, /* l */
333 &sysrq_showmem_op, /* m */ 334 &sysrq_showmem_op, /* m */
334 &sysrq_unrt_op, /* n */ 335 &sysrq_unrt_op, /* n */
335 /* This will often be registered as 'Off' at init time */ 336 /* o: This will often be registered as 'Off' at init time */
336 NULL, /* o */ 337 NULL, /* o */
337 &sysrq_showregs_op, /* p */ 338 &sysrq_showregs_op, /* p */
338 NULL, /* q */ 339 NULL, /* q */
339 &sysrq_unraw_op, /* r */ 340 &sysrq_unraw_op, /* r */
340 &sysrq_sync_op, /* s */ 341 &sysrq_sync_op, /* s */
341 &sysrq_showstate_op, /* t */ 342 &sysrq_showstate_op, /* t */
342 &sysrq_mountro_op, /* u */ 343 &sysrq_mountro_op, /* u */
343 /* May be assigned at init time by SMP VOYAGER */ 344 /* v: May be registered at init time by SMP VOYAGER */
344 NULL, /* v */ 345 NULL, /* v */
345 NULL, /* w */ 346 &sysrq_showstate_blocked_op, /* w */
346 &sysrq_showstate_blocked_op, /* x */ 347 /* x: May be registered on ppc/powerpc for xmon */
348 NULL, /* x */
347 NULL, /* y */ 349 NULL, /* y */
348 NULL /* z */ 350 NULL /* z */
349}; 351};
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
index a611972024e6..7fca5f470beb 100644
--- a/drivers/char/tpm/tpm_bios.c
+++ b/drivers/char/tpm/tpm_bios.c
@@ -372,10 +372,8 @@ static int read_log(struct tpm_bios_log *log)
372 } 372 }
373 373
374 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */ 374 /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
375 status = acpi_get_firmware_table(ACPI_TCPA_SIG, 1, 375 status = acpi_get_table(ACPI_SIG_TCPA, 1,
376 ACPI_LOGICAL_ADDRESSING, 376 (struct acpi_table_header **)&buff);
377 (struct acpi_table_header **)
378 &buff);
379 377
380 if (ACPI_FAILURE(status)) { 378 if (ACPI_FAILURE(status)) {
381 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n", 379 printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
@@ -409,7 +407,7 @@ static int read_log(struct tpm_bios_log *log)
409 407
410 log->bios_event_log_end = log->bios_event_log + len; 408 log->bios_event_log_end = log->bios_event_log + len;
411 409
412 acpi_os_map_memory(start, len, (void *) &virt); 410 virt = acpi_os_map_memory(start, len);
413 411
414 memcpy(log->bios_event_log, virt, len); 412 memcpy(log->bios_event_log, virt, len);
415 413
diff --git a/drivers/char/watchdog/booke_wdt.c b/drivers/char/watchdog/booke_wdt.c
index 488902231cc2..0e23f29f71ab 100644
--- a/drivers/char/watchdog/booke_wdt.c
+++ b/drivers/char/watchdog/booke_wdt.c
@@ -35,7 +35,7 @@
35#ifdef CONFIG_FSL_BOOKE 35#ifdef CONFIG_FSL_BOOKE
36#define WDT_PERIOD_DEFAULT 63 /* Ex. wdt_period=28 bus=333Mhz , reset=~40sec */ 36#define WDT_PERIOD_DEFAULT 63 /* Ex. wdt_period=28 bus=333Mhz , reset=~40sec */
37#else 37#else
38#define WDT_PERIOD_DEFAULT 4 /* Refer to the PPC40x and PPC4xx manuals */ 38#define WDT_PERIOD_DEFAULT 3 /* Refer to the PPC40x and PPC4xx manuals */
39#endif /* for timing information */ 39#endif /* for timing information */
40 40
41u32 booke_wdt_enabled = 0; 41u32 booke_wdt_enabled = 0;
@@ -48,12 +48,22 @@ u32 booke_wdt_period = WDT_PERIOD_DEFAULT;
48#endif 48#endif
49 49
50/* 50/*
51 * booke_wdt_ping:
52 */
53static __inline__ void booke_wdt_ping(void)
54{
55 mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
56}
57
58/*
51 * booke_wdt_enable: 59 * booke_wdt_enable:
52 */ 60 */
53static __inline__ void booke_wdt_enable(void) 61static __inline__ void booke_wdt_enable(void)
54{ 62{
55 u32 val; 63 u32 val;
56 64
65 /* clear status before enabling watchdog */
66 booke_wdt_ping();
57 val = mfspr(SPRN_TCR); 67 val = mfspr(SPRN_TCR);
58 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period)); 68 val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
59 69
@@ -61,14 +71,6 @@ static __inline__ void booke_wdt_enable(void)
61} 71}
62 72
63/* 73/*
64 * booke_wdt_ping:
65 */
66static __inline__ void booke_wdt_ping(void)
67{
68 mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
69}
70
71/*
72 * booke_wdt_write: 74 * booke_wdt_write:
73 */ 75 */
74static ssize_t booke_wdt_write (struct file *file, const char __user *buf, 76static ssize_t booke_wdt_write (struct file *file, const char __user *buf,
diff --git a/drivers/char/watchdog/machzwd.c b/drivers/char/watchdog/machzwd.c
index 276577d08fba..4d730fdbd528 100644
--- a/drivers/char/watchdog/machzwd.c
+++ b/drivers/char/watchdog/machzwd.c
@@ -325,7 +325,7 @@ static int zf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
325 return put_user(0, p); 325 return put_user(0, p);
326 326
327 case WDIOC_KEEPALIVE: 327 case WDIOC_KEEPALIVE:
328 zf_ping(0); 328 zf_ping(NULL);
329 break; 329 break;
330 330
331 default: 331 default:
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 879250d3d069..ff8c4beaace4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA
51 If unsure say M. The compiled module will be 51 If unsure say M. The compiled module will be
52 called padlock-sha.ko 52 called padlock-sha.ko
53 53
54source "arch/s390/crypto/Kconfig"
55
54config CRYPTO_DEV_GEODE 56config CRYPTO_DEV_GEODE
55 tristate "Support for the Geode LX AES engine" 57 tristate "Support for the Geode LX AES engine"
56 depends on CRYPTO && X86_32 && PCI 58 depends on CRYPTO && X86_32 && PCI
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 43a68398656f..31ea405f2eeb 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -457,7 +457,7 @@ static struct pci_driver geode_aes_driver = {
457static int __init 457static int __init
458geode_aes_init(void) 458geode_aes_init(void)
459{ 459{
460 return pci_module_init(&geode_aes_driver); 460 return pci_register_driver(&geode_aes_driver);
461} 461}
462 462
463static void __exit 463static void __exit
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index c2ad72fefd9d..2b4b76e8bd72 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -26,7 +26,7 @@ setup_serial_console(struct pcdp_uart *uart)
26 static char options[64], *p = options; 26 static char options[64], *p = options;
27 char parity; 27 char parity;
28 28
29 mmio = (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY); 29 mmio = (uart->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY);
30 p += sprintf(p, "console=uart,%s,0x%lx", 30 p += sprintf(p, "console=uart,%s,0x%lx",
31 mmio ? "mmio" : "io", uart->addr.address); 31 mmio ? "mmio" : "io", uart->addr.address);
32 if (uart->baud) { 32 if (uart->baud) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ec796ad087df..850788f4dd2e 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -22,5 +22,19 @@ config HID
22 22
23 If unsure, say Y 23 If unsure, say Y
24 24
25config HID_DEBUG
26 bool "HID debugging support"
27 depends on HID
28 ---help---
29 This option lets the HID layer output diagnostics about its internal
30 state, resolve HID usages, dump HID fields, etc. Individual HID drivers
31 use this debugging facility to output information about individual HID
32 devices, etc.
33
34 This feature is useful for those who are either debugging the HID parser
35 or any HID hardware device.
36
37 If unsure, say N
38
25endmenu 39endmenu
26 40
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6432392110bf..52e97d8f3c95 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,15 +1,8 @@
1# 1#
2# Makefile for the HID driver 2# Makefile for the HID driver
3# 3#
4 4hid-objs := hid-core.o hid-input.o
5# Multipart objects.
6hid-objs := hid-core.o hid-input.o
7
8# Optional parts of multipart objects.
9 5
10obj-$(CONFIG_HID) += hid.o 6obj-$(CONFIG_HID) += hid.o
11 7hid-$(CONFIG_HID_DEBUG) += hid-debug.o
12ifeq ($(CONFIG_INPUT_DEBUG),y)
13EXTRA_CFLAGS += -DDEBUG
14endif
15 8
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 49f18f5b2514..8c7d48eff7b7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -28,11 +28,9 @@
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/wait.h> 29#include <linux/wait.h>
30 30
31#undef DEBUG
32#undef DEBUG_DATA
33
34#include <linux/hid.h> 31#include <linux/hid.h>
35#include <linux/hiddev.h> 32#include <linux/hiddev.h>
33#include <linux/hid-debug.h>
36 34
37/* 35/*
38 * Version Information 36 * Version Information
@@ -951,7 +949,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
951 return -1; 949 return -1;
952 } 950 }
953 951
954#ifdef DEBUG_DATA 952#ifdef CONFIG_HID_DEBUG
955 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un"); 953 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
956#endif 954#endif
957 955
@@ -961,7 +959,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
961 size--; 959 size--;
962 } 960 }
963 961
964#ifdef DEBUG_DATA 962#ifdef CONFIG_HID_DEBUG
965 { 963 {
966 int i; 964 int i;
967 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size); 965 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
new file mode 100644
index 000000000000..89241be4ec9b
--- /dev/null
+++ b/drivers/hid/hid-debug.c
@@ -0,0 +1,764 @@
1/*
2 * $Id: hid-debug.h,v 1.8 2001/09/25 09:37:57 vojtech Exp $
3 *
4 * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de>
5 * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz>
6 * (c) 2007 Jiri Kosina
7 *
8 * Some debug stuff for the HID parser.
9 */
10
11/*
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 * Should you need to contact me, the author, you can do so either by
27 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
28 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
29 */
30
31#include <linux/hid.h>
32
33struct hid_usage_entry {
34 unsigned page;
35 unsigned usage;
36 char *description;
37};
38
39static const struct hid_usage_entry hid_usage_table[] = {
40 { 0, 0, "Undefined" },
41 { 1, 0, "GenericDesktop" },
42 {0, 0x01, "Pointer"},
43 {0, 0x02, "Mouse"},
44 {0, 0x04, "Joystick"},
45 {0, 0x05, "GamePad"},
46 {0, 0x06, "Keyboard"},
47 {0, 0x07, "Keypad"},
48 {0, 0x08, "MultiAxis"},
49 {0, 0x30, "X"},
50 {0, 0x31, "Y"},
51 {0, 0x32, "Z"},
52 {0, 0x33, "Rx"},
53 {0, 0x34, "Ry"},
54 {0, 0x35, "Rz"},
55 {0, 0x36, "Slider"},
56 {0, 0x37, "Dial"},
57 {0, 0x38, "Wheel"},
58 {0, 0x39, "HatSwitch"},
59 {0, 0x3a, "CountedBuffer"},
60 {0, 0x3b, "ByteCount"},
61 {0, 0x3c, "MotionWakeup"},
62 {0, 0x3d, "Start"},
63 {0, 0x3e, "Select"},
64 {0, 0x40, "Vx"},
65 {0, 0x41, "Vy"},
66 {0, 0x42, "Vz"},
67 {0, 0x43, "Vbrx"},
68 {0, 0x44, "Vbry"},
69 {0, 0x45, "Vbrz"},
70 {0, 0x46, "Vno"},
71 {0, 0x80, "SystemControl"},
72 {0, 0x81, "SystemPowerDown"},
73 {0, 0x82, "SystemSleep"},
74 {0, 0x83, "SystemWakeUp"},
75 {0, 0x84, "SystemContextMenu"},
76 {0, 0x85, "SystemMainMenu"},
77 {0, 0x86, "SystemAppMenu"},
78 {0, 0x87, "SystemMenuHelp"},
79 {0, 0x88, "SystemMenuExit"},
80 {0, 0x89, "SystemMenuSelect"},
81 {0, 0x8a, "SystemMenuRight"},
82 {0, 0x8b, "SystemMenuLeft"},
83 {0, 0x8c, "SystemMenuUp"},
84 {0, 0x8d, "SystemMenuDown"},
85 {0, 0x90, "D-PadUp"},
86 {0, 0x91, "D-PadDown"},
87 {0, 0x92, "D-PadRight"},
88 {0, 0x93, "D-PadLeft"},
89 { 2, 0, "Simulation" },
90 {0, 0xb0, "Aileron"},
91 {0, 0xb1, "AileronTrim"},
92 {0, 0xb2, "Anti-Torque"},
93 {0, 0xb3, "Autopilot"},
94 {0, 0xb4, "Chaff"},
95 {0, 0xb5, "Collective"},
96 {0, 0xb6, "DiveBrake"},
97 {0, 0xb7, "ElectronicCountermeasures"},
98 {0, 0xb8, "Elevator"},
99 {0, 0xb9, "ElevatorTrim"},
100 {0, 0xba, "Rudder"},
101 {0, 0xbb, "Throttle"},
102 {0, 0xbc, "FlightCommunications"},
103 {0, 0xbd, "FlareRelease"},
104 {0, 0xbe, "LandingGear"},
105 {0, 0xbf, "ToeBrake"},
106 { 7, 0, "Keyboard" },
107 { 8, 0, "LED" },
108 {0, 0x01, "NumLock"},
109 {0, 0x02, "CapsLock"},
110 {0, 0x03, "ScrollLock"},
111 {0, 0x04, "Compose"},
112 {0, 0x05, "Kana"},
113 {0, 0x4b, "GenericIndicator"},
114 { 9, 0, "Button" },
115 { 10, 0, "Ordinal" },
116 { 12, 0, "Consumer" },
117 {0, 0x238, "HorizontalWheel"},
118 { 13, 0, "Digitizers" },
119 {0, 0x01, "Digitizer"},
120 {0, 0x02, "Pen"},
121 {0, 0x03, "LightPen"},
122 {0, 0x04, "TouchScreen"},
123 {0, 0x05, "TouchPad"},
124 {0, 0x20, "Stylus"},
125 {0, 0x21, "Puck"},
126 {0, 0x22, "Finger"},
127 {0, 0x30, "TipPressure"},
128 {0, 0x31, "BarrelPressure"},
129 {0, 0x32, "InRange"},
130 {0, 0x33, "Touch"},
131 {0, 0x34, "UnTouch"},
132 {0, 0x35, "Tap"},
133 {0, 0x39, "TabletFunctionKey"},
134 {0, 0x3a, "ProgramChangeKey"},
135 {0, 0x3c, "Invert"},
136 {0, 0x42, "TipSwitch"},
137 {0, 0x43, "SecondaryTipSwitch"},
138 {0, 0x44, "BarrelSwitch"},
139 {0, 0x45, "Eraser"},
140 {0, 0x46, "TabletPick"},
141 { 15, 0, "PhysicalInterfaceDevice" },
142 {0, 0x00, "Undefined"},
143 {0, 0x01, "Physical_Interface_Device"},
144 {0, 0x20, "Normal"},
145 {0, 0x21, "Set_Effect_Report"},
146 {0, 0x22, "Effect_Block_Index"},
147 {0, 0x23, "Parameter_Block_Offset"},
148 {0, 0x24, "ROM_Flag"},
149 {0, 0x25, "Effect_Type"},
150 {0, 0x26, "ET_Constant_Force"},
151 {0, 0x27, "ET_Ramp"},
152 {0, 0x28, "ET_Custom_Force_Data"},
153 {0, 0x30, "ET_Square"},
154 {0, 0x31, "ET_Sine"},
155 {0, 0x32, "ET_Triangle"},
156 {0, 0x33, "ET_Sawtooth_Up"},
157 {0, 0x34, "ET_Sawtooth_Down"},
158 {0, 0x40, "ET_Spring"},
159 {0, 0x41, "ET_Damper"},
160 {0, 0x42, "ET_Inertia"},
161 {0, 0x43, "ET_Friction"},
162 {0, 0x50, "Duration"},
163 {0, 0x51, "Sample_Period"},
164 {0, 0x52, "Gain"},
165 {0, 0x53, "Trigger_Button"},
166 {0, 0x54, "Trigger_Repeat_Interval"},
167 {0, 0x55, "Axes_Enable"},
168 {0, 0x56, "Direction_Enable"},
169 {0, 0x57, "Direction"},
170 {0, 0x58, "Type_Specific_Block_Offset"},
171 {0, 0x59, "Block_Type"},
172 {0, 0x5A, "Set_Envelope_Report"},
173 {0, 0x5B, "Attack_Level"},
174 {0, 0x5C, "Attack_Time"},
175 {0, 0x5D, "Fade_Level"},
176 {0, 0x5E, "Fade_Time"},
177 {0, 0x5F, "Set_Condition_Report"},
178 {0, 0x60, "CP_Offset"},
179 {0, 0x61, "Positive_Coefficient"},
180 {0, 0x62, "Negative_Coefficient"},
181 {0, 0x63, "Positive_Saturation"},
182 {0, 0x64, "Negative_Saturation"},
183 {0, 0x65, "Dead_Band"},
184 {0, 0x66, "Download_Force_Sample"},
185 {0, 0x67, "Isoch_Custom_Force_Enable"},
186 {0, 0x68, "Custom_Force_Data_Report"},
187 {0, 0x69, "Custom_Force_Data"},
188 {0, 0x6A, "Custom_Force_Vendor_Defined_Data"},
189 {0, 0x6B, "Set_Custom_Force_Report"},
190 {0, 0x6C, "Custom_Force_Data_Offset"},
191 {0, 0x6D, "Sample_Count"},
192 {0, 0x6E, "Set_Periodic_Report"},
193 {0, 0x6F, "Offset"},
194 {0, 0x70, "Magnitude"},
195 {0, 0x71, "Phase"},
196 {0, 0x72, "Period"},
197 {0, 0x73, "Set_Constant_Force_Report"},
198 {0, 0x74, "Set_Ramp_Force_Report"},
199 {0, 0x75, "Ramp_Start"},
200 {0, 0x76, "Ramp_End"},
201 {0, 0x77, "Effect_Operation_Report"},
202 {0, 0x78, "Effect_Operation"},
203 {0, 0x79, "Op_Effect_Start"},
204 {0, 0x7A, "Op_Effect_Start_Solo"},
205 {0, 0x7B, "Op_Effect_Stop"},
206 {0, 0x7C, "Loop_Count"},
207 {0, 0x7D, "Device_Gain_Report"},
208 {0, 0x7E, "Device_Gain"},
209 {0, 0x7F, "PID_Pool_Report"},
210 {0, 0x80, "RAM_Pool_Size"},
211 {0, 0x81, "ROM_Pool_Size"},
212 {0, 0x82, "ROM_Effect_Block_Count"},
213 {0, 0x83, "Simultaneous_Effects_Max"},
214 {0, 0x84, "Pool_Alignment"},
215 {0, 0x85, "PID_Pool_Move_Report"},
216 {0, 0x86, "Move_Source"},
217 {0, 0x87, "Move_Destination"},
218 {0, 0x88, "Move_Length"},
219 {0, 0x89, "PID_Block_Load_Report"},
220 {0, 0x8B, "Block_Load_Status"},
221 {0, 0x8C, "Block_Load_Success"},
222 {0, 0x8D, "Block_Load_Full"},
223 {0, 0x8E, "Block_Load_Error"},
224 {0, 0x8F, "Block_Handle"},
225 {0, 0x90, "PID_Block_Free_Report"},
226 {0, 0x91, "Type_Specific_Block_Handle"},
227 {0, 0x92, "PID_State_Report"},
228 {0, 0x94, "Effect_Playing"},
229 {0, 0x95, "PID_Device_Control_Report"},
230 {0, 0x96, "PID_Device_Control"},
231 {0, 0x97, "DC_Enable_Actuators"},
232 {0, 0x98, "DC_Disable_Actuators"},
233 {0, 0x99, "DC_Stop_All_Effects"},
234 {0, 0x9A, "DC_Device_Reset"},
235 {0, 0x9B, "DC_Device_Pause"},
236 {0, 0x9C, "DC_Device_Continue"},
237 {0, 0x9F, "Device_Paused"},
238 {0, 0xA0, "Actuators_Enabled"},
239 {0, 0xA4, "Safety_Switch"},
240 {0, 0xA5, "Actuator_Override_Switch"},
241 {0, 0xA6, "Actuator_Power"},
242 {0, 0xA7, "Start_Delay"},
243 {0, 0xA8, "Parameter_Block_Size"},
244 {0, 0xA9, "Device_Managed_Pool"},
245 {0, 0xAA, "Shared_Parameter_Blocks"},
246 {0, 0xAB, "Create_New_Effect_Report"},
247 {0, 0xAC, "RAM_Pool_Available"},
248 { 0x84, 0, "Power Device" },
249 { 0x84, 0x02, "PresentStatus" },
250 { 0x84, 0x03, "ChangeStatus" },
251 { 0x84, 0x04, "UPS" },
252 { 0x84, 0x05, "PowerSupply" },
253 { 0x84, 0x10, "BatterySystem" },
254 { 0x84, 0x11, "BatterySystemID" },
255 { 0x84, 0x12, "Battery" },
256 { 0x84, 0x13, "BatteryID" },
257 { 0x84, 0x14, "Charger" },
258 { 0x84, 0x15, "ChargerID" },
259 { 0x84, 0x16, "PowerConverter" },
260 { 0x84, 0x17, "PowerConverterID" },
261 { 0x84, 0x18, "OutletSystem" },
262 { 0x84, 0x19, "OutletSystemID" },
263 { 0x84, 0x1a, "Input" },
264 { 0x84, 0x1b, "InputID" },
265 { 0x84, 0x1c, "Output" },
266 { 0x84, 0x1d, "OutputID" },
267 { 0x84, 0x1e, "Flow" },
268 { 0x84, 0x1f, "FlowID" },
269 { 0x84, 0x20, "Outlet" },
270 { 0x84, 0x21, "OutletID" },
271 { 0x84, 0x22, "Gang" },
272 { 0x84, 0x24, "PowerSummary" },
273 { 0x84, 0x25, "PowerSummaryID" },
274 { 0x84, 0x30, "Voltage" },
275 { 0x84, 0x31, "Current" },
276 { 0x84, 0x32, "Frequency" },
277 { 0x84, 0x33, "ApparentPower" },
278 { 0x84, 0x35, "PercentLoad" },
279 { 0x84, 0x40, "ConfigVoltage" },
280 { 0x84, 0x41, "ConfigCurrent" },
281 { 0x84, 0x43, "ConfigApparentPower" },
282 { 0x84, 0x53, "LowVoltageTransfer" },
283 { 0x84, 0x54, "HighVoltageTransfer" },
284 { 0x84, 0x56, "DelayBeforeStartup" },
285 { 0x84, 0x57, "DelayBeforeShutdown" },
286 { 0x84, 0x58, "Test" },
287 { 0x84, 0x5a, "AudibleAlarmControl" },
288 { 0x84, 0x60, "Present" },
289 { 0x84, 0x61, "Good" },
290 { 0x84, 0x62, "InternalFailure" },
291 { 0x84, 0x65, "Overload" },
292 { 0x84, 0x66, "OverCharged" },
293 { 0x84, 0x67, "OverTemperature" },
294 { 0x84, 0x68, "ShutdownRequested" },
295 { 0x84, 0x69, "ShutdownImminent" },
296 { 0x84, 0x6b, "SwitchOn/Off" },
297 { 0x84, 0x6c, "Switchable" },
298 { 0x84, 0x6d, "Used" },
299 { 0x84, 0x6e, "Boost" },
300 { 0x84, 0x73, "CommunicationLost" },
301 { 0x84, 0xfd, "iManufacturer" },
302 { 0x84, 0xfe, "iProduct" },
303 { 0x84, 0xff, "iSerialNumber" },
304 { 0x85, 0, "Battery System" },
305 { 0x85, 0x01, "SMBBatteryMode" },
306 { 0x85, 0x02, "SMBBatteryStatus" },
307 { 0x85, 0x03, "SMBAlarmWarning" },
308 { 0x85, 0x04, "SMBChargerMode" },
309 { 0x85, 0x05, "SMBChargerStatus" },
310 { 0x85, 0x06, "SMBChargerSpecInfo" },
311 { 0x85, 0x07, "SMBSelectorState" },
312 { 0x85, 0x08, "SMBSelectorPresets" },
313 { 0x85, 0x09, "SMBSelectorInfo" },
314 { 0x85, 0x29, "RemainingCapacityLimit" },
315 { 0x85, 0x2c, "CapacityMode" },
316 { 0x85, 0x42, "BelowRemainingCapacityLimit" },
317 { 0x85, 0x44, "Charging" },
318 { 0x85, 0x45, "Discharging" },
319 { 0x85, 0x4b, "NeedReplacement" },
320 { 0x85, 0x66, "RemainingCapacity" },
321 { 0x85, 0x68, "RunTimeToEmpty" },
322 { 0x85, 0x6a, "AverageTimeToFull" },
323 { 0x85, 0x83, "DesignCapacity" },
324 { 0x85, 0x85, "ManufacturerDate" },
325 { 0x85, 0x89, "iDeviceChemistry" },
326 { 0x85, 0x8b, "Rechargable" },
327 { 0x85, 0x8f, "iOEMInformation" },
328 { 0x85, 0x8d, "CapacityGranularity1" },
329 { 0x85, 0xd0, "ACPresent" },
330 /* pages 0xff00 to 0xffff are vendor-specific */
331 { 0xffff, 0, "Vendor-specific-FF" },
332 { 0, 0, NULL }
333};
334
335static void resolv_usage_page(unsigned page) {
336 const struct hid_usage_entry *p;
337
338 for (p = hid_usage_table; p->description; p++)
339 if (p->page == page) {
340 printk("%s", p->description);
341 return;
342 }
343 printk("%04x", page);
344}
345
346void hid_resolv_usage(unsigned usage) {
347 const struct hid_usage_entry *p;
348
349 resolv_usage_page(usage >> 16);
350 printk(".");
351 for (p = hid_usage_table; p->description; p++)
352 if (p->page == (usage >> 16)) {
353 for(++p; p->description && p->usage != 0; p++)
354 if (p->usage == (usage & 0xffff)) {
355 printk("%s", p->description);
356 return;
357 }
358 break;
359 }
360 printk("%04x", usage & 0xffff);
361}
362EXPORT_SYMBOL_GPL(hid_resolv_usage);
363
364__inline__ static void tab(int n) {
365 while (n--) printk(" ");
366}
367
368void hid_dump_field(struct hid_field *field, int n) {
369 int j;
370
371 if (field->physical) {
372 tab(n);
373 printk("Physical(");
374 hid_resolv_usage(field->physical); printk(")\n");
375 }
376 if (field->logical) {
377 tab(n);
378 printk("Logical(");
379 hid_resolv_usage(field->logical); printk(")\n");
380 }
381 tab(n); printk("Usage(%d)\n", field->maxusage);
382 for (j = 0; j < field->maxusage; j++) {
383 tab(n+2); hid_resolv_usage(field->usage[j].hid); printk("\n");
384 }
385 if (field->logical_minimum != field->logical_maximum) {
386 tab(n); printk("Logical Minimum(%d)\n", field->logical_minimum);
387 tab(n); printk("Logical Maximum(%d)\n", field->logical_maximum);
388 }
389 if (field->physical_minimum != field->physical_maximum) {
390 tab(n); printk("Physical Minimum(%d)\n", field->physical_minimum);
391 tab(n); printk("Physical Maximum(%d)\n", field->physical_maximum);
392 }
393 if (field->unit_exponent) {
394 tab(n); printk("Unit Exponent(%d)\n", field->unit_exponent);
395 }
396 if (field->unit) {
397 char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" };
398 char *units[5][8] = {
399 { "None", "None", "None", "None", "None", "None", "None", "None" },
400 { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
401 { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" },
402 { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" },
403 { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }
404 };
405
406 int i;
407 int sys;
408 __u32 data = field->unit;
409
410 /* First nibble tells us which system we're in. */
411 sys = data & 0xf;
412 data >>= 4;
413
414 if(sys > 4) {
415 tab(n); printk("Unit(Invalid)\n");
416 }
417 else {
418 int earlier_unit = 0;
419
420 tab(n); printk("Unit(%s : ", systems[sys]);
421
422 for (i=1 ; i<sizeof(__u32)*2 ; i++) {
423 char nibble = data & 0xf;
424 data >>= 4;
425 if (nibble != 0) {
426 if(earlier_unit++ > 0)
427 printk("*");
428 printk("%s", units[sys][i]);
429 if(nibble != 1) {
430 /* This is a _signed_ nibble(!) */
431
432 int val = nibble & 0x7;
433 if(nibble & 0x08)
434 val = -((0x7 & ~val) +1);
435 printk("^%d", val);
436 }
437 }
438 }
439 printk(")\n");
440 }
441 }
442 tab(n); printk("Report Size(%u)\n", field->report_size);
443 tab(n); printk("Report Count(%u)\n", field->report_count);
444 tab(n); printk("Report Offset(%u)\n", field->report_offset);
445
446 tab(n); printk("Flags( ");
447 j = field->flags;
448 printk("%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : "");
449 printk("%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array ");
450 printk("%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute ");
451 printk("%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : "");
452 printk("%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : "");
453 printk("%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPrefferedState " : "");
454 printk("%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : "");
455 printk("%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : "");
456 printk("%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : "");
457 printk(")\n");
458}
459EXPORT_SYMBOL_GPL(hid_dump_field);
460
461void hid_dump_device(struct hid_device *device) {
462 struct hid_report_enum *report_enum;
463 struct hid_report *report;
464 struct list_head *list;
465 unsigned i,k;
466 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
467
468 for (i = 0; i < HID_REPORT_TYPES; i++) {
469 report_enum = device->report_enum + i;
470 list = report_enum->report_list.next;
471 while (list != &report_enum->report_list) {
472 report = (struct hid_report *) list;
473 tab(2);
474 printk("%s", table[i]);
475 if (report->id)
476 printk("(%d)", report->id);
477 printk("[%s]", table[report->type]);
478 printk("\n");
479 for (k = 0; k < report->maxfield; k++) {
480 tab(4);
481 printk("Field(%d)\n", k);
482 hid_dump_field(report->field[k], 6);
483 }
484 list = list->next;
485 }
486 }
487}
488EXPORT_SYMBOL_GPL(hid_dump_device);
489
490void hid_dump_input(struct hid_usage *usage, __s32 value) {
491 printk("hid-debug: input ");
492 hid_resolv_usage(usage->hid);
493 printk(" = %d\n", value);
494}
495EXPORT_SYMBOL_GPL(hid_dump_input);
496
497static char *events[EV_MAX + 1] = {
498 [EV_SYN] = "Sync", [EV_KEY] = "Key",
499 [EV_REL] = "Relative", [EV_ABS] = "Absolute",
500 [EV_MSC] = "Misc", [EV_LED] = "LED",
501 [EV_SND] = "Sound", [EV_REP] = "Repeat",
502 [EV_FF] = "ForceFeedback", [EV_PWR] = "Power",
503 [EV_FF_STATUS] = "ForceFeedbackStatus",
504};
505
506static char *syncs[2] = {
507 [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config",
508};
509static char *keys[KEY_MAX + 1] = {
510 [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc",
511 [KEY_1] = "1", [KEY_2] = "2",
512 [KEY_3] = "3", [KEY_4] = "4",
513 [KEY_5] = "5", [KEY_6] = "6",
514 [KEY_7] = "7", [KEY_8] = "8",
515 [KEY_9] = "9", [KEY_0] = "0",
516 [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal",
517 [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab",
518 [KEY_Q] = "Q", [KEY_W] = "W",
519 [KEY_E] = "E", [KEY_R] = "R",
520 [KEY_T] = "T", [KEY_Y] = "Y",
521 [KEY_U] = "U", [KEY_I] = "I",
522 [KEY_O] = "O", [KEY_P] = "P",
523 [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace",
524 [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl",
525 [KEY_A] = "A", [KEY_S] = "S",
526 [KEY_D] = "D", [KEY_F] = "F",
527 [KEY_G] = "G", [KEY_H] = "H",
528 [KEY_J] = "J", [KEY_K] = "K",
529 [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon",
530 [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave",
531 [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash",
532 [KEY_Z] = "Z", [KEY_X] = "X",
533 [KEY_C] = "C", [KEY_V] = "V",
534 [KEY_B] = "B", [KEY_N] = "N",
535 [KEY_M] = "M", [KEY_COMMA] = "Comma",
536 [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash",
537 [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk",
538 [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space",
539 [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1",
540 [KEY_F2] = "F2", [KEY_F3] = "F3",
541 [KEY_F4] = "F4", [KEY_F5] = "F5",
542 [KEY_F6] = "F6", [KEY_F7] = "F7",
543 [KEY_F8] = "F8", [KEY_F9] = "F9",
544 [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock",
545 [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7",
546 [KEY_KP8] = "KP8", [KEY_KP9] = "KP9",
547 [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4",
548 [KEY_KP5] = "KP5", [KEY_KP6] = "KP6",
549 [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1",
550 [KEY_KP2] = "KP2", [KEY_KP3] = "KP3",
551 [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot",
552 [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd",
553 [KEY_F11] = "F11", [KEY_F12] = "F12",
554 [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana",
555 [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan",
556 [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan",
557 [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter",
558 [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash",
559 [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt",
560 [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home",
561 [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp",
562 [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right",
563 [KEY_END] = "End", [KEY_DOWN] = "Down",
564 [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert",
565 [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro",
566 [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown",
567 [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power",
568 [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus",
569 [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma",
570 [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja",
571 [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta",
572 [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose",
573 [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again",
574 [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo",
575 [KEY_FRONT] = "Front", [KEY_COPY] = "Copy",
576 [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste",
577 [KEY_FIND] = "Find", [KEY_CUT] = "Cut",
578 [KEY_HELP] = "Help", [KEY_MENU] = "Menu",
579 [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup",
580 [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp",
581 [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile",
582 [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer",
583 [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2",
584 [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS",
585 [KEY_COFFEE] = "Coffee", [KEY_DIRECTION] = "Direction",
586 [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail",
587 [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer",
588 [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward",
589 [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD",
590 [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong",
591 [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong",
592 [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record",
593 [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone",
594 [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config",
595 [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh",
596 [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move",
597 [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp",
598 [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis",
599 [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New",
600 [KEY_REDO] = "Redo", [KEY_F13] = "F13",
601 [KEY_F14] = "F14", [KEY_F15] = "F15",
602 [KEY_F16] = "F16", [KEY_F17] = "F17",
603 [KEY_F18] = "F18", [KEY_F19] = "F19",
604 [KEY_F20] = "F20", [KEY_F21] = "F21",
605 [KEY_F22] = "F22", [KEY_F23] = "F23",
606 [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
607 [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
608 [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
609 [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
610 [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
611 [KEY_PRINT] = "Print", [KEY_HP] = "HP",
612 [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound",
613 [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email",
614 [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search",
615 [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance",
616 [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop",
617 [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel",
618 [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp",
619 [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown",
620 [BTN_0] = "Btn0", [BTN_1] = "Btn1",
621 [BTN_2] = "Btn2", [BTN_3] = "Btn3",
622 [BTN_4] = "Btn4", [BTN_5] = "Btn5",
623 [BTN_6] = "Btn6", [BTN_7] = "Btn7",
624 [BTN_8] = "Btn8", [BTN_9] = "Btn9",
625 [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn",
626 [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn",
627 [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn",
628 [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn",
629 [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn",
630 [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn",
631 [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn",
632 [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2",
633 [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4",
634 [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6",
635 [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA",
636 [BTN_B] = "BtnB", [BTN_C] = "BtnC",
637 [BTN_X] = "BtnX", [BTN_Y] = "BtnY",
638 [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL",
639 [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2",
640 [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect",
641 [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode",
642 [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR",
643 [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber",
644 [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil",
645 [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger",
646 [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens",
647 [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus",
648 [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
649 [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_GEAR_DOWN] = "WheelBtn",
650 [BTN_GEAR_UP] = "Gear up", [KEY_OK] = "Ok",
651 [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
652 [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
653 [KEY_OPTION] = "Option", [KEY_INFO] = "Info",
654 [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor",
655 [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program",
656 [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites",
657 [KEY_EPG] = "EPG", [KEY_PVR] = "PVR",
658 [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language",
659 [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle",
660 [KEY_ANGLE] = "Angle", [KEY_ZOOM] = "Zoom",
661 [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard",
662 [KEY_SCREEN] = "Screen", [KEY_PC] = "PC",
663 [KEY_TV] = "TV", [KEY_TV2] = "TV2",
664 [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2",
665 [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2",
666 [KEY_CD] = "CD", [KEY_TAPE] = "Tape",
667 [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner",
668 [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text",
669 [KEY_DVD] = "DVD", [KEY_AUX] = "Aux",
670 [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio",
671 [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory",
672 [KEY_LIST] = "List", [KEY_MEMO] = "Memo",
673 [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red",
674 [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow",
675 [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp",
676 [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First",
677 [KEY_LAST] = "Last", [KEY_AB] = "AB",
678 [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart",
679 [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle",
680 [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous",
681 [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN",
682 [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL",
683 [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine",
684 [KEY_DEL_LINE] = "DeleteLine",
685 [KEY_SEND] = "Send", [KEY_REPLY] = "Reply",
686 [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save",
687 [KEY_DOCUMENTS] = "Documents",
688 [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC",
689 [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2",
690 [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D",
691 [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F",
692 [KEY_FN_S] = "Fn+S",
693 [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2",
694 [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4",
695 [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6",
696 [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8",
697 [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10",
698 [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12",
699 [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle",
700 [KEY_KBDILLUMDOWN] = "KbdIlluminationDown",
701 [KEY_KBDILLUMUP] = "KbdIlluminationUp",
702 [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode",
703};
704
705static char *relatives[REL_MAX + 1] = {
706 [REL_X] = "X", [REL_Y] = "Y",
707 [REL_Z] = "Z", [REL_RX] = "Rx",
708 [REL_RY] = "Ry", [REL_RZ] = "Rz",
709 [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial",
710 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
711};
712
713static char *absolutes[ABS_MAX + 1] = {
714 [ABS_X] = "X", [ABS_Y] = "Y",
715 [ABS_Z] = "Z", [ABS_RX] = "Rx",
716 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
717 [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder",
718 [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas",
719 [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X",
720 [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X",
721 [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X",
722 [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X",
723 [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure",
724 [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt",
725 [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "Tool Width",
726 [ABS_VOLUME] = "Volume", [ABS_MISC] = "Misc",
727};
728
729static char *misc[MSC_MAX + 1] = {
730 [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled",
731 [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData"
732};
733
734static char *leds[LED_MAX + 1] = {
735 [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock",
736 [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose",
737 [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep",
738 [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute",
739 [LED_MISC] = "Misc",
740};
741
742static char *repeats[REP_MAX + 1] = {
743 [REP_DELAY] = "Delay", [REP_PERIOD] = "Period"
744};
745
746static char *sounds[SND_MAX + 1] = {
747 [SND_CLICK] = "Click", [SND_BELL] = "Bell",
748 [SND_TONE] = "Tone"
749};
750
751static char **names[EV_MAX + 1] = {
752 [EV_SYN] = syncs, [EV_KEY] = keys,
753 [EV_REL] = relatives, [EV_ABS] = absolutes,
754 [EV_MSC] = misc, [EV_LED] = leds,
755 [EV_SND] = sounds, [EV_REP] = repeats,
756};
757
758void hid_resolv_event(__u8 type, __u16 code) {
759
760 printk("%s.%s", events[type] ? events[type] : "?",
761 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
762}
763EXPORT_SYMBOL_GPL(hid_resolv_event);
764
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index c7a6833f6821..25d180a24fc4 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -31,9 +31,8 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33 33
34#undef DEBUG
35
36#include <linux/hid.h> 34#include <linux/hid.h>
35#include <linux/hid-debug.h>
37 36
38static int hid_pb_fnmode = 1; 37static int hid_pb_fnmode = 1;
39module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644); 38module_param_named(pb_fnmode, hid_pb_fnmode, int, 0644);
@@ -252,9 +251,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
252 251
253 field->hidinput = hidinput; 252 field->hidinput = hidinput;
254 253
255#ifdef DEBUG 254#ifdef CONFIG_HID_DEBUG
256 printk(KERN_DEBUG "Mapping: "); 255 printk(KERN_DEBUG "Mapping: ");
257 resolv_usage(usage->hid); 256 hid_resolv_usage(usage->hid);
258 printk(" ---> "); 257 printk(" ---> ");
259#endif 258#endif
260 259
@@ -682,14 +681,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
682 field->dpad = usage->code; 681 field->dpad = usage->code;
683 } 682 }
684 683
685#ifdef DEBUG 684 hid_resolv_event(usage->type, usage->code);
686 resolv_event(usage->type, usage->code); 685#ifdef CONFIG_HID_DEBUG
687 printk("\n"); 686 printk("\n");
688#endif 687#endif
689 return; 688 return;
690 689
691ignore: 690ignore:
692#ifdef DEBUG 691#ifdef CONFIG_HID_DEBUG
693 printk("IGNORED\n"); 692 printk("IGNORED\n");
694#endif 693#endif
695 return; 694 return;
@@ -804,6 +803,18 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
804} 803}
805EXPORT_SYMBOL_GPL(hidinput_find_field); 804EXPORT_SYMBOL_GPL(hidinput_find_field);
806 805
806static int hidinput_open(struct input_dev *dev)
807{
808 struct hid_device *hid = dev->private;
809 return hid->hid_open(hid);
810}
811
812static void hidinput_close(struct input_dev *dev)
813{
814 struct hid_device *hid = dev->private;
815 hid->hid_close(hid);
816}
817
807/* 818/*
808 * Register the input device; print a message. 819 * Register the input device; print a message.
809 * Configure the input layer interface 820 * Configure the input layer interface
@@ -816,6 +827,7 @@ int hidinput_connect(struct hid_device *hid)
816 struct hid_input *hidinput = NULL; 827 struct hid_input *hidinput = NULL;
817 struct input_dev *input_dev; 828 struct input_dev *input_dev;
818 int i, j, k; 829 int i, j, k;
830 int max_report_type = HID_OUTPUT_REPORT;
819 831
820 INIT_LIST_HEAD(&hid->inputs); 832 INIT_LIST_HEAD(&hid->inputs);
821 833
@@ -828,7 +840,10 @@ int hidinput_connect(struct hid_device *hid)
828 if (i == hid->maxcollection) 840 if (i == hid->maxcollection)
829 return -1; 841 return -1;
830 842
831 for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) 843 if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
844 max_report_type = HID_INPUT_REPORT;
845
846 for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
832 list_for_each_entry(report, &hid->report_enum[k].report_list, list) { 847 list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
833 848
834 if (!report->maxfield) 849 if (!report->maxfield)
@@ -846,8 +861,8 @@ int hidinput_connect(struct hid_device *hid)
846 861
847 input_dev->private = hid; 862 input_dev->private = hid;
848 input_dev->event = hid->hidinput_input_event; 863 input_dev->event = hid->hidinput_input_event;
849 input_dev->open = hid->hidinput_open; 864 input_dev->open = hidinput_open;
850 input_dev->close = hid->hidinput_close; 865 input_dev->close = hidinput_close;
851 866
852 input_dev->name = hid->name; 867 input_dev->name = hid->name;
853 input_dev->phys = hid->phys; 868 input_dev->phys = hid->phys;
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/hwmon/ams/ams-input.c
index f126aa485134..18210164e307 100644
--- a/drivers/hwmon/ams/ams-input.c
+++ b/drivers/hwmon/ams/ams-input.c
@@ -153,7 +153,7 @@ int ams_input_init(void)
153} 153}
154 154
155/* Call with ams_info.lock held! */ 155/* Call with ams_info.lock held! */
156void ams_input_exit() 156void ams_input_exit(void)
157{ 157{
158 ams_input_disable(); 158 ams_input_disable();
159 device_remove_file(&ams_info.of_dev->dev, &dev_attr_joystick); 159 device_remove_file(&ams_info.of_dev->dev, &dev_attr_joystick);
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index ccdf3e90862b..9fafadb92510 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -27,7 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/usb_ch9.h> 30#include <linux/usb/ch9.h>
31#include <linux/usb_gadget.h> 31#include <linux/usb_gadget.h>
32#include <linux/usb.h> 32#include <linux/usb.h>
33#include <linux/usb/otg.h> 33#include <linux/usb/otg.h>
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 3f828052f8d2..ec03341d2bd8 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -167,6 +167,13 @@ config BLK_DEV_IDECS
167 Support for Compact Flash cards, outboard IDE disks, tape drives, 167 Support for Compact Flash cards, outboard IDE disks, tape drives,
168 and CD-ROM drives connected through a PCMCIA card. 168 and CD-ROM drives connected through a PCMCIA card.
169 169
170config BLK_DEV_DELKIN
171 tristate "Cardbus IDE support (Delkin/ASKA/Workbit)"
172 depends on CARDBUS && PCI
173 help
174 Support for Delkin, ASKA, and Workbit Cardbus CompactFlash
175 Adapters. This may also work for similar SD and XD adapters.
176
170config BLK_DEV_IDECD 177config BLK_DEV_IDECD
171 tristate "Include IDE/ATAPI CDROM support" 178 tristate "Include IDE/ATAPI CDROM support"
172 ---help--- 179 ---help---
@@ -264,6 +271,13 @@ config BLK_DEV_IDESCSI
264 If both this SCSI emulation and native ATAPI support are compiled 271 If both this SCSI emulation and native ATAPI support are compiled
265 into the kernel, the native support will be used. 272 into the kernel, the native support will be used.
266 273
274config BLK_DEV_IDEACPI
275 bool "IDE ACPI support"
276 depends on ACPI
277 ---help---
278 Implement ACPI support for generic IDE devices. On modern
279 machines ACPI support is required to properly handle ACPI S3 states.
280
267config IDE_TASK_IOCTL 281config IDE_TASK_IOCTL
268 bool "IDE Taskfile Access" 282 bool "IDE Taskfile Access"
269 help 283 help
@@ -606,6 +620,11 @@ config BLK_DEV_PIIX
606 the kernel to change PIO, DMA and UDMA speeds and to configure 620 the kernel to change PIO, DMA and UDMA speeds and to configure
607 the chip to optimum performance. 621 the chip to optimum performance.
608 622
623config BLK_DEV_IT8213
624 tristate "IT8213 IDE support"
625 help
626 This driver adds support for the ITE 8213 IDE controller.
627
609config BLK_DEV_IT821X 628config BLK_DEV_IT821X
610 tristate "IT821X IDE support" 629 tristate "IT821X IDE support"
611 help 630 help
@@ -742,6 +761,11 @@ config BLK_DEV_VIA82CXXX
742 This allows the kernel to change PIO, DMA and UDMA speeds and to 761 This allows the kernel to change PIO, DMA and UDMA speeds and to
743 configure the chip to optimum performance. 762 configure the chip to optimum performance.
744 763
764config BLK_DEV_TC86C001
765 tristate "Toshiba TC86C001 support"
766 help
767 This driver adds support for Toshiba TC86C001 GOKU-S chip.
768
745endif 769endif
746 770
747config BLK_DEV_IDE_PMAC 771config BLK_DEV_IDE_PMAC
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 569fae717503..d9f029e8ff74 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -22,6 +22,7 @@ ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
22ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o 22ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
23ide-core-$(CONFIG_PROC_FS) += ide-proc.o 23ide-core-$(CONFIG_PROC_FS) += ide-proc.o
24ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o 24ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
25ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
25 26
26# built-in only drivers from arm/ 27# built-in only drivers from arm/
27ide-core-$(CONFIG_IDE_ARM) += arm/ide_arm.o 28ide-core-$(CONFIG_IDE_ARM) += arm/ide_arm.o
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
new file mode 100644
index 000000000000..17aea65d7dd2
--- /dev/null
+++ b/drivers/ide/ide-acpi.c
@@ -0,0 +1,697 @@
1/*
2 * ide-acpi.c
3 * Provides ACPI support for IDE drives.
4 *
5 * Copyright (C) 2005 Intel Corp.
6 * Copyright (C) 2005 Randy Dunlap
7 * Copyright (C) 2006 SUSE Linux Products GmbH
8 * Copyright (C) 2006 Hannes Reinecke
9 */
10
11#include <linux/ata.h>
12#include <linux/delay.h>
13#include <linux/device.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <acpi/acpi.h>
17#include <linux/ide.h>
18#include <linux/pci.h>
19
20#include <acpi/acpi_bus.h>
21#include <acpi/acnames.h>
22#include <acpi/acnamesp.h>
23#include <acpi/acparser.h>
24#include <acpi/acexcep.h>
25#include <acpi/acmacros.h>
26#include <acpi/actypes.h>
27
28#define REGS_PER_GTF 7
29struct taskfile_array {
30 u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
31};
32
33struct GTM_buffer {
34 u32 PIO_speed0;
35 u32 DMA_speed0;
36 u32 PIO_speed1;
37 u32 DMA_speed1;
38 u32 GTM_flags;
39};
40
41struct ide_acpi_drive_link {
42 ide_drive_t *drive;
43 acpi_handle obj_handle;
44 u8 idbuff[512];
45};
46
47struct ide_acpi_hwif_link {
48 ide_hwif_t *hwif;
49 acpi_handle obj_handle;
50 struct GTM_buffer gtm;
51 struct ide_acpi_drive_link master;
52 struct ide_acpi_drive_link slave;
53};
54
55#undef DEBUGGING
56/* note: adds function name and KERN_DEBUG */
57#ifdef DEBUGGING
58#define DEBPRINT(fmt, args...) \
59 printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args)
60#else
61#define DEBPRINT(fmt, args...) do {} while (0)
62#endif /* DEBUGGING */
63
64extern int ide_noacpi;
65extern int ide_noacpitfs;
66extern int ide_noacpionboot;
67
68/**
69 * ide_get_dev_handle - finds acpi_handle and PCI device.function
70 * @dev: device to locate
71 * @handle: returned acpi_handle for @dev
72 * @pcidevfn: return PCI device.func for @dev
73 *
74 * Returns the ACPI object handle to the corresponding PCI device.
75 *
76 * Returns 0 on success, <0 on error.
77 */
78static int ide_get_dev_handle(struct device *dev, acpi_handle *handle,
79 acpi_integer *pcidevfn)
80{
81 struct pci_dev *pdev = to_pci_dev(dev);
82 unsigned int bus, devnum, func;
83 acpi_integer addr;
84 acpi_handle dev_handle;
85 struct acpi_buffer buffer = {.length = ACPI_ALLOCATE_BUFFER,
86 .pointer = NULL};
87 acpi_status status;
88 struct acpi_device_info *dinfo = NULL;
89 int ret = -ENODEV;
90
91 bus = pdev->bus->number;
92 devnum = PCI_SLOT(pdev->devfn);
93 func = PCI_FUNC(pdev->devfn);
94 /* ACPI _ADR encoding for PCI bus: */
95 addr = (acpi_integer)(devnum << 16 | func);
96
97 DEBPRINT("ENTER: pci %02x:%02x.%01x\n", bus, devnum, func);
98
99 dev_handle = DEVICE_ACPI_HANDLE(dev);
100 if (!dev_handle) {
101 DEBPRINT("no acpi handle for device\n");
102 goto err;
103 }
104
105 status = acpi_get_object_info(dev_handle, &buffer);
106 if (ACPI_FAILURE(status)) {
107 DEBPRINT("get_object_info for device failed\n");
108 goto err;
109 }
110 dinfo = buffer.pointer;
111 if (dinfo && (dinfo->valid & ACPI_VALID_ADR) &&
112 dinfo->address == addr) {
113 *pcidevfn = addr;
114 *handle = dev_handle;
115 } else {
116 DEBPRINT("get_object_info for device has wrong "
117 " address: %llu, should be %u\n",
118 dinfo ? (unsigned long long)dinfo->address : -1ULL,
119 (unsigned int)addr);
120 goto err;
121 }
122
123 DEBPRINT("for dev=0x%x.%x, addr=0x%llx, *handle=0x%p\n",
124 devnum, func, (unsigned long long)addr, *handle);
125 ret = 0;
126err:
127 kfree(dinfo);
128 return ret;
129}
130
131/**
132 * ide_acpi_hwif_get_handle - Get ACPI object handle for a given hwif
133 * @hwif: device to locate
134 *
135 * Retrieves the object handle for a given hwif.
136 *
137 * Returns handle on success, 0 on error.
138 */
139static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
140{
141 struct device *dev = hwif->gendev.parent;
142 acpi_handle dev_handle;
143 acpi_integer pcidevfn;
144 acpi_handle chan_handle;
145 int err;
146
147 DEBPRINT("ENTER: device %s\n", hwif->name);
148
149 if (!dev) {
150 DEBPRINT("no PCI device for %s\n", hwif->name);
151 return NULL;
152 }
153
154 err = ide_get_dev_handle(dev, &dev_handle, &pcidevfn);
155 if (err < 0) {
156 DEBPRINT("ide_get_dev_handle failed (%d)\n", err);
157 return NULL;
158 }
159
160 /* get child objects of dev_handle == channel objects,
161 * + _their_ children == drive objects */
162 /* channel is hwif->channel */
163 chan_handle = acpi_get_child(dev_handle, hwif->channel);
164 DEBPRINT("chan adr=%d: handle=0x%p\n",
165 hwif->channel, chan_handle);
166
167 return chan_handle;
168}
169
170/**
171 * ide_acpi_drive_get_handle - Get ACPI object handle for a given drive
172 * @drive: device to locate
173 *
174 * Retrieves the object handle of a given drive. According to the ACPI
175 * spec the drive is a child of the hwif.
176 *
177 * Returns handle on success, 0 on error.
178 */
179static acpi_handle ide_acpi_drive_get_handle(ide_drive_t *drive)
180{
181 ide_hwif_t *hwif = HWIF(drive);
182 int port;
183 acpi_handle drive_handle;
184
185 if (!hwif->acpidata)
186 return NULL;
187
188 if (!hwif->acpidata->obj_handle)
189 return NULL;
190
191 port = hwif->channel ? drive->dn - 2: drive->dn;
192
193 DEBPRINT("ENTER: %s at channel#: %d port#: %d\n",
194 drive->name, hwif->channel, port);
195
196
197 /* TBD: could also check ACPI object VALID bits */
198 drive_handle = acpi_get_child(hwif->acpidata->obj_handle, port);
199 DEBPRINT("drive %s handle 0x%p\n", drive->name, drive_handle);
200
201 return drive_handle;
202}
203
204/**
205 * do_drive_get_GTF - get the drive bootup default taskfile settings
206 * @drive: the drive for which the taskfile settings should be retrieved
207 * @gtf_length: number of bytes of _GTF data returned at @gtf_address
208 * @gtf_address: buffer containing _GTF taskfile arrays
209 *
210 * The _GTF method has no input parameters.
211 * It returns a variable number of register set values (registers
212 * hex 1F1..1F7, taskfiles).
213 * The <variable number> is not known in advance, so have ACPI-CA
214 * allocate the buffer as needed and return it, then free it later.
215 *
216 * The returned @gtf_length and @gtf_address are only valid if the
217 * function return value is 0.
218 */
219static int do_drive_get_GTF(ide_drive_t *drive,
220 unsigned int *gtf_length, unsigned long *gtf_address,
221 unsigned long *obj_loc)
222{
223 acpi_status status;
224 struct acpi_buffer output;
225 union acpi_object *out_obj;
226 ide_hwif_t *hwif = HWIF(drive);
227 struct device *dev = hwif->gendev.parent;
228 int err = -ENODEV;
229 int port;
230
231 *gtf_length = 0;
232 *gtf_address = 0UL;
233 *obj_loc = 0UL;
234
235 if (ide_noacpi)
236 return 0;
237
238 if (!dev) {
239 DEBPRINT("no PCI device for %s\n", hwif->name);
240 goto out;
241 }
242
243 if (!hwif->acpidata) {
244 DEBPRINT("no ACPI data for %s\n", hwif->name);
245 goto out;
246 }
247
248 port = hwif->channel ? drive->dn - 2: drive->dn;
249
250 if (!drive->acpidata) {
251 if (port == 0) {
252 drive->acpidata = &hwif->acpidata->master;
253 hwif->acpidata->master.drive = drive;
254 } else {
255 drive->acpidata = &hwif->acpidata->slave;
256 hwif->acpidata->slave.drive = drive;
257 }
258 }
259
260 DEBPRINT("ENTER: %s at %s, port#: %d, hard_port#: %d\n",
261 hwif->name, dev->bus_id, port, hwif->channel);
262
263 if (!drive->present) {
264 DEBPRINT("%s drive %d:%d not present\n",
265 hwif->name, hwif->channel, port);
266 goto out;
267 }
268
269 /* Get this drive's _ADR info. if not already known. */
270 if (!drive->acpidata->obj_handle) {
271 drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive);
272 if (!drive->acpidata->obj_handle) {
273 DEBPRINT("No ACPI object found for %s\n",
274 drive->name);
275 goto out;
276 }
277 }
278
279 /* Setting up output buffer */
280 output.length = ACPI_ALLOCATE_BUFFER;
281 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
282
283 /* _GTF has no input parameters */
284 err = -EIO;
285 status = acpi_evaluate_object(drive->acpidata->obj_handle, "_GTF",
286 NULL, &output);
287 if (ACPI_FAILURE(status)) {
288 printk(KERN_DEBUG
289 "%s: Run _GTF error: status = 0x%x\n",
290 __FUNCTION__, status);
291 goto out;
292 }
293
294 if (!output.length || !output.pointer) {
295 DEBPRINT("Run _GTF: "
296 "length or ptr is NULL (0x%llx, 0x%p)\n",
297 (unsigned long long)output.length,
298 output.pointer);
299 goto out;
300 }
301
302 out_obj = output.pointer;
303 if (out_obj->type != ACPI_TYPE_BUFFER) {
304 DEBPRINT("Run _GTF: error: "
305 "expected object type of ACPI_TYPE_BUFFER, "
306 "got 0x%x\n", out_obj->type);
307 err = -ENOENT;
308 kfree(output.pointer);
309 goto out;
310 }
311
312 if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
313 out_obj->buffer.length % REGS_PER_GTF) {
314 printk(KERN_ERR
315 "%s: unexpected GTF length (%d) or addr (0x%p)\n",
316 __FUNCTION__, out_obj->buffer.length,
317 out_obj->buffer.pointer);
318 err = -ENOENT;
319 kfree(output.pointer);
320 goto out;
321 }
322
323 *gtf_length = out_obj->buffer.length;
324 *gtf_address = (unsigned long)out_obj->buffer.pointer;
325 *obj_loc = (unsigned long)out_obj;
326 DEBPRINT("returning gtf_length=%d, gtf_address=0x%lx, obj_loc=0x%lx\n",
327 *gtf_length, *gtf_address, *obj_loc);
328 err = 0;
329out:
330 return err;
331}
332
333/**
334 * taskfile_load_raw - send taskfile registers to drive
335 * @drive: drive to which output is sent
336 * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
337 *
338 * Outputs IDE taskfile to the drive.
339 */
340static int taskfile_load_raw(ide_drive_t *drive,
341 const struct taskfile_array *gtf)
342{
343 ide_task_t args;
344 int err = 0;
345
346 DEBPRINT("(0x1f1-1f7): hex: "
347 "%02x %02x %02x %02x %02x %02x %02x\n",
348 gtf->tfa[0], gtf->tfa[1], gtf->tfa[2],
349 gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]);
350
351 memset(&args, 0, sizeof(ide_task_t));
352 args.command_type = IDE_DRIVE_TASK_NO_DATA;
353 args.data_phase = TASKFILE_IN;
354 args.handler = &task_no_data_intr;
355
356 /* convert gtf to IDE Taskfile */
357 args.tfRegister[1] = gtf->tfa[0]; /* 0x1f1 */
358 args.tfRegister[2] = gtf->tfa[1]; /* 0x1f2 */
359 args.tfRegister[3] = gtf->tfa[2]; /* 0x1f3 */
360 args.tfRegister[4] = gtf->tfa[3]; /* 0x1f4 */
361 args.tfRegister[5] = gtf->tfa[4]; /* 0x1f5 */
362 args.tfRegister[6] = gtf->tfa[5]; /* 0x1f6 */
363 args.tfRegister[7] = gtf->tfa[6]; /* 0x1f7 */
364
365 if (ide_noacpitfs) {
366 DEBPRINT("_GTF execution disabled\n");
367 return err;
368 }
369
370 err = ide_raw_taskfile(drive, &args, NULL);
371 if (err)
372 printk(KERN_ERR "%s: ide_raw_taskfile failed: %u\n",
373 __FUNCTION__, err);
374
375 return err;
376}
377
378/**
379 * do_drive_set_taskfiles - write the drive taskfile settings from _GTF
380 * @drive: the drive to which the taskfile command should be sent
381 * @gtf_length: total number of bytes of _GTF taskfiles
382 * @gtf_address: location of _GTF taskfile arrays
383 *
384 * Write {gtf_address, length gtf_length} in groups of
385 * REGS_PER_GTF bytes.
386 */
387static int do_drive_set_taskfiles(ide_drive_t *drive,
388 unsigned int gtf_length,
389 unsigned long gtf_address)
390{
391 int rc = -ENODEV, err;
392 int gtf_count = gtf_length / REGS_PER_GTF;
393 int ix;
394 struct taskfile_array *gtf;
395
396 if (ide_noacpi)
397 return 0;
398
399 DEBPRINT("ENTER: %s, hard_port#: %d\n", drive->name, drive->dn);
400
401 if (!drive->present)
402 goto out;
403 if (!gtf_count) /* shouldn't be here */
404 goto out;
405
406 DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n",
407 gtf_length, gtf_length, gtf_count, gtf_address);
408
409 if (gtf_length % REGS_PER_GTF) {
410 printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
411 __FUNCTION__, gtf_length);
412 goto out;
413 }
414
415 rc = 0;
416 for (ix = 0; ix < gtf_count; ix++) {
417 gtf = (struct taskfile_array *)
418 (gtf_address + ix * REGS_PER_GTF);
419
420 /* send all TaskFile registers (0x1f1-0x1f7) *in*that*order* */
421 err = taskfile_load_raw(drive, gtf);
422 if (err)
423 rc = err;
424 }
425
426out:
427 return rc;
428}
429
430/**
431 * ide_acpi_exec_tfs - get then write drive taskfile settings
432 * @drive: the drive for which the taskfile settings should be
433 * written.
434 *
435 * According to the ACPI spec this should be called after _STM
436 * has been evaluated for the interface. Some ACPI vendors interpret
437 * that as a hard requirement and modify the taskfile according
438 * to the Identify Drive information passed down with _STM.
439 * So one should really make sure to call this only after _STM has
440 * been executed.
441 */
442int ide_acpi_exec_tfs(ide_drive_t *drive)
443{
444 int ret;
445 unsigned int gtf_length;
446 unsigned long gtf_address;
447 unsigned long obj_loc;
448
449 if (ide_noacpi)
450 return 0;
451
452 DEBPRINT("call get_GTF, drive=%s port=%d\n", drive->name, drive->dn);
453
454 ret = do_drive_get_GTF(drive, &gtf_length, &gtf_address, &obj_loc);
455 if (ret < 0) {
456 DEBPRINT("get_GTF error (%d)\n", ret);
457 return ret;
458 }
459
460 DEBPRINT("call set_taskfiles, drive=%s\n", drive->name);
461
462 ret = do_drive_set_taskfiles(drive, gtf_length, gtf_address);
463 kfree((void *)obj_loc);
464 if (ret < 0) {
465 DEBPRINT("set_taskfiles error (%d)\n", ret);
466 }
467
468 DEBPRINT("ret=%d\n", ret);
469
470 return ret;
471}
472EXPORT_SYMBOL_GPL(ide_acpi_exec_tfs);
473
474/**
475 * ide_acpi_get_timing - get the channel (controller) timings
476 * @hwif: target IDE interface (channel)
477 *
478 * This function executes the _GTM ACPI method for the target channel.
479 *
480 */
481void ide_acpi_get_timing(ide_hwif_t *hwif)
482{
483 acpi_status status;
484 struct acpi_buffer output;
485 union acpi_object *out_obj;
486
487 if (ide_noacpi)
488 return;
489
490 DEBPRINT("ENTER:\n");
491
492 if (!hwif->acpidata) {
493 DEBPRINT("no ACPI data for %s\n", hwif->name);
494 return;
495 }
496
497 /* Setting up output buffer for _GTM */
498 output.length = ACPI_ALLOCATE_BUFFER;
499 output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
500
501 /* _GTM has no input parameters */
502 status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_GTM",
503 NULL, &output);
504
505 DEBPRINT("_GTM status: %d, outptr: 0x%p, outlen: 0x%llx\n",
506 status, output.pointer,
507 (unsigned long long)output.length);
508
509 if (ACPI_FAILURE(status)) {
510 DEBPRINT("Run _GTM error: status = 0x%x\n", status);
511 return;
512 }
513
514 if (!output.length || !output.pointer) {
515 DEBPRINT("Run _GTM: length or ptr is NULL (0x%llx, 0x%p)\n",
516 (unsigned long long)output.length,
517 output.pointer);
518 kfree(output.pointer);
519 return;
520 }
521
522 out_obj = output.pointer;
523 if (out_obj->type != ACPI_TYPE_BUFFER) {
524 kfree(output.pointer);
525 DEBPRINT("Run _GTM: error: "
526 "expected object type of ACPI_TYPE_BUFFER, "
527 "got 0x%x\n", out_obj->type);
528 return;
529 }
530
531 if (!out_obj->buffer.length || !out_obj->buffer.pointer ||
532 out_obj->buffer.length != sizeof(struct GTM_buffer)) {
533 kfree(output.pointer);
534 printk(KERN_ERR
535 "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
536 "addr (0x%p)\n",
537 __FUNCTION__, out_obj->buffer.length,
538 sizeof(struct GTM_buffer), out_obj->buffer.pointer);
539 return;
540 }
541
542 memcpy(&hwif->acpidata->gtm, out_obj->buffer.pointer,
543 sizeof(struct GTM_buffer));
544
545 DEBPRINT("_GTM info: ptr: 0x%p, len: 0x%x, exp.len: 0x%Zx\n",
546 out_obj->buffer.pointer, out_obj->buffer.length,
547 sizeof(struct GTM_buffer));
548
549 DEBPRINT("_GTM fields: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
550 hwif->acpidata->gtm.PIO_speed0,
551 hwif->acpidata->gtm.DMA_speed0,
552 hwif->acpidata->gtm.PIO_speed1,
553 hwif->acpidata->gtm.DMA_speed1,
554 hwif->acpidata->gtm.GTM_flags);
555
556 kfree(output.pointer);
557}
558EXPORT_SYMBOL_GPL(ide_acpi_get_timing);
559
560/**
561 * ide_acpi_push_timing - set the channel (controller) timings
562 * @hwif: target IDE interface (channel)
563 *
564 * This function executes the _STM ACPI method for the target channel.
565 *
566 * _STM requires Identify Drive data, which has to passed as an argument.
567 * Unfortunately hd_driveid is a mangled version which we can't readily
568 * use; hence we'll get the information afresh.
569 */
570void ide_acpi_push_timing(ide_hwif_t *hwif)
571{
572 acpi_status status;
573 struct acpi_object_list input;
574 union acpi_object in_params[3];
575 struct ide_acpi_drive_link *master = &hwif->acpidata->master;
576 struct ide_acpi_drive_link *slave = &hwif->acpidata->slave;
577
578 if (ide_noacpi)
579 return;
580
581 DEBPRINT("ENTER:\n");
582
583 if (!hwif->acpidata) {
584 DEBPRINT("no ACPI data for %s\n", hwif->name);
585 return;
586 }
587
588 /* Give the GTM buffer + drive Identify data to the channel via the
589 * _STM method: */
590 /* setup input parameters buffer for _STM */
591 input.count = 3;
592 input.pointer = in_params;
593 in_params[0].type = ACPI_TYPE_BUFFER;
594 in_params[0].buffer.length = sizeof(struct GTM_buffer);
595 in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
596 in_params[1].type = ACPI_TYPE_BUFFER;
597 in_params[1].buffer.length = sizeof(struct hd_driveid);
598 in_params[1].buffer.pointer = (u8 *)&master->idbuff;
599 in_params[2].type = ACPI_TYPE_BUFFER;
600 in_params[2].buffer.length = sizeof(struct hd_driveid);
601 in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
602 /* Output buffer: _STM has no output */
603
604 status = acpi_evaluate_object(hwif->acpidata->obj_handle, "_STM",
605 &input, NULL);
606
607 if (ACPI_FAILURE(status)) {
608 DEBPRINT("Run _STM error: status = 0x%x\n", status);
609 }
610 DEBPRINT("_STM status: %d\n", status);
611}
612EXPORT_SYMBOL_GPL(ide_acpi_push_timing);
613
614/**
615 * ide_acpi_init - initialize the ACPI link for an IDE interface
616 * @hwif: target IDE interface (channel)
617 *
618 * The ACPI spec is not quite clear when the drive identify buffer
619 * should be obtained. Calling IDENTIFY DEVICE during shutdown
620 * is not the best of ideas as the drive might already being put to
621 * sleep. And obviously we can't call it during resume.
622 * So we get the information during startup; but this means that
623 * any changes during run-time will be lost after resume.
624 */
625void ide_acpi_init(ide_hwif_t *hwif)
626{
627 int unit;
628 int err;
629 struct ide_acpi_drive_link *master;
630 struct ide_acpi_drive_link *slave;
631
632 hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL);
633 if (!hwif->acpidata)
634 return;
635
636 hwif->acpidata->obj_handle = ide_acpi_hwif_get_handle(hwif);
637 if (!hwif->acpidata->obj_handle) {
638 DEBPRINT("no ACPI object for %s found\n", hwif->name);
639 kfree(hwif->acpidata);
640 hwif->acpidata = NULL;
641 return;
642 }
643
644 /*
645 * The ACPI spec mandates that we send information
646 * for both drives, regardless whether they are connected
647 * or not.
648 */
649 hwif->acpidata->master.drive = &hwif->drives[0];
650 hwif->drives[0].acpidata = &hwif->acpidata->master;
651 master = &hwif->acpidata->master;
652
653 hwif->acpidata->slave.drive = &hwif->drives[1];
654 hwif->drives[1].acpidata = &hwif->acpidata->slave;
655 slave = &hwif->acpidata->slave;
656
657
658 /*
659 * Send IDENTIFY for each drive
660 */
661 if (master->drive->present) {
662 err = taskfile_lib_get_identify(master->drive, master->idbuff);
663 if (err) {
664 DEBPRINT("identify device %s failed (%d)\n",
665 master->drive->name, err);
666 }
667 }
668
669 if (slave->drive->present) {
670 err = taskfile_lib_get_identify(slave->drive, slave->idbuff);
671 if (err) {
672 DEBPRINT("identify device %s failed (%d)\n",
673 slave->drive->name, err);
674 }
675 }
676
677 if (ide_noacpionboot) {
678 DEBPRINT("ACPI methods disabled on boot\n");
679 return;
680 }
681
682 /*
683 * ACPI requires us to call _STM on startup
684 */
685 ide_acpi_get_timing(hwif);
686 ide_acpi_push_timing(hwif);
687
688 for (unit = 0; unit < MAX_DRIVES; ++unit) {
689 ide_drive_t *drive = &hwif->drives[unit];
690
691 if (drive->present) {
692 /* Execute ACPI startup code */
693 ide_acpi_exec_tfs(drive);
694 }
695 }
696}
697EXPORT_SYMBOL_GPL(ide_acpi_init);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5a5c565a32a8..176bbc850d6b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1384,6 +1384,9 @@ static int hwif_init(ide_hwif_t *hwif)
1384 1384
1385done: 1385done:
1386 init_gendisk(hwif); 1386 init_gendisk(hwif);
1387
1388 ide_acpi_init(hwif);
1389
1387 hwif->present = 1; /* success */ 1390 hwif->present = 1; /* success */
1388 return 1; 1391 return 1;
1389 1392
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 3b334af0c7b9..c750f6ce770a 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -187,6 +187,12 @@ int noautodma = 1;
187 187
188EXPORT_SYMBOL(noautodma); 188EXPORT_SYMBOL(noautodma);
189 189
190#ifdef CONFIG_BLK_DEV_IDEACPI
191int ide_noacpi = 0;
192int ide_noacpitfs = 1;
193int ide_noacpionboot = 1;
194#endif
195
190/* 196/*
191 * This is declared extern in ide.h, for access by other IDE modules: 197 * This is declared extern in ide.h, for access by other IDE modules:
192 */ 198 */
@@ -1214,10 +1220,15 @@ EXPORT_SYMBOL(system_bus_clock);
1214static int generic_ide_suspend(struct device *dev, pm_message_t mesg) 1220static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
1215{ 1221{
1216 ide_drive_t *drive = dev->driver_data; 1222 ide_drive_t *drive = dev->driver_data;
1223 ide_hwif_t *hwif = HWIF(drive);
1217 struct request rq; 1224 struct request rq;
1218 struct request_pm_state rqpm; 1225 struct request_pm_state rqpm;
1219 ide_task_t args; 1226 ide_task_t args;
1220 1227
1228 /* Call ACPI _GTM only once */
1229 if (!(drive->dn % 2))
1230 ide_acpi_get_timing(hwif);
1231
1221 memset(&rq, 0, sizeof(rq)); 1232 memset(&rq, 0, sizeof(rq));
1222 memset(&rqpm, 0, sizeof(rqpm)); 1233 memset(&rqpm, 0, sizeof(rqpm));
1223 memset(&args, 0, sizeof(args)); 1234 memset(&args, 0, sizeof(args));
@@ -1235,10 +1246,17 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
1235static int generic_ide_resume(struct device *dev) 1246static int generic_ide_resume(struct device *dev)
1236{ 1247{
1237 ide_drive_t *drive = dev->driver_data; 1248 ide_drive_t *drive = dev->driver_data;
1249 ide_hwif_t *hwif = HWIF(drive);
1238 struct request rq; 1250 struct request rq;
1239 struct request_pm_state rqpm; 1251 struct request_pm_state rqpm;
1240 ide_task_t args; 1252 ide_task_t args;
1241 1253
1254 /* Call ACPI _STM only once */
1255 if (!(drive->dn % 2))
1256 ide_acpi_push_timing(hwif);
1257
1258 ide_acpi_exec_tfs(drive);
1259
1242 memset(&rq, 0, sizeof(rq)); 1260 memset(&rq, 0, sizeof(rq));
1243 memset(&rqpm, 0, sizeof(rqpm)); 1261 memset(&rqpm, 0, sizeof(rqpm));
1244 memset(&args, 0, sizeof(args)); 1262 memset(&args, 0, sizeof(args));
@@ -1543,6 +1561,24 @@ static int __init ide_setup(char *s)
1543 } 1561 }
1544#endif /* CONFIG_BLK_DEV_IDEPCI */ 1562#endif /* CONFIG_BLK_DEV_IDEPCI */
1545 1563
1564#ifdef CONFIG_BLK_DEV_IDEACPI
1565 if (!strcmp(s, "ide=noacpi")) {
1566 //printk(" : Disable IDE ACPI support.\n");
1567 ide_noacpi = 1;
1568 return 1;
1569 }
1570 if (!strcmp(s, "ide=acpigtf")) {
1571 //printk(" : Enable IDE ACPI _GTF support.\n");
1572 ide_noacpitfs = 0;
1573 return 1;
1574 }
1575 if (!strcmp(s, "ide=acpionboot")) {
1576 //printk(" : Call IDE ACPI methods on boot.\n");
1577 ide_noacpionboot = 0;
1578 return 1;
1579 }
1580#endif /* CONFIG_BLK_DEV_IDEACPI */
1581
1546 /* 1582 /*
1547 * Look for drive options: "hdx=" 1583 * Look for drive options: "hdx="
1548 */ 1584 */
@@ -1781,9 +1817,9 @@ done:
1781 return 1; 1817 return 1;
1782} 1818}
1783 1819
1784extern void pnpide_init(void); 1820extern void __init pnpide_init(void);
1785extern void pnpide_exit(void); 1821extern void __exit pnpide_exit(void);
1786extern void h8300_ide_init(void); 1822extern void __init h8300_ide_init(void);
1787 1823
1788/* 1824/*
1789 * probe_for_hwifs() finds/initializes "known" IDE interfaces 1825 * probe_for_hwifs() finds/initializes "known" IDE interfaces
@@ -2088,7 +2124,7 @@ int __init init_module (void)
2088 return ide_init(); 2124 return ide_init();
2089} 2125}
2090 2126
2091void cleanup_module (void) 2127void __exit cleanup_module (void)
2092{ 2128{
2093 int index; 2129 int index;
2094 2130
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index fef08960aa4c..6591ff4753cb 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -9,9 +9,10 @@ obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
9obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o 9obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
10obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o 10obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
11obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o 11obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
12obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
12obj-$(CONFIG_BLK_DEV_HPT34X) += hpt34x.o 13obj-$(CONFIG_BLK_DEV_HPT34X) += hpt34x.o
13obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o 14obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
14#obj-$(CONFIG_BLK_DEV_HPT37X) += hpt37x.o 15obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
15obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o 16obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
16obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o 17obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
17obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o 18obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
26obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o 27obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
27obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o 28obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
28obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o 29obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
30obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
29obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o 31obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
30obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o 32obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
31obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o 33obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
new file mode 100644
index 000000000000..e2672fc65d30
--- /dev/null
+++ b/drivers/ide/pci/delkin_cb.c
@@ -0,0 +1,140 @@
1/*
2 * linux/drivers/ide/pci/delkin_cb.c
3 *
4 * Created 20 Oct 2004 by Mark Lord
5 *
6 * Basic support for Delkin/ASKA/Workbit Cardbus CompactFlash adapter
7 *
8 * Modeled after the 16-bit PCMCIA driver: ide-cs.c
9 *
10 * This is slightly peculiar, in that it is a PCI driver,
11 * but is NOT an IDE PCI driver -- the IDE layer does not directly
12 * support hot insertion/removal of PCI interfaces, so this driver
13 * is unable to use the IDE PCI interfaces. Instead, it uses the
14 * same interfaces as the ide-cs (PCMCIA) driver uses.
15 * On the plus side, the driver is also smaller/simpler this way.
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file COPYING in the main directory of this archive for
19 * more details.
20 */
21#include <linux/autoconf.h>
22#include <linux/types.h>
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/blkdev.h>
26#include <linux/hdreg.h>
27#include <linux/ide.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <asm/io.h>
31
32/*
33 * No chip documentation has yet been found,
34 * so these configuration values were pulled from
35 * a running Win98 system using "debug".
36 * This gives around 3MByte/second read performance,
37 * which is about 2/3 of what the chip is capable of.
38 *
39 * There is also a 4KByte mmio region on the card,
40 * but its purpose has yet to be reverse-engineered.
41 */
42static const u8 setup[] = {
43 0x00, 0x05, 0xbe, 0x01, 0x20, 0x8f, 0x00, 0x00,
44 0xa4, 0x1f, 0xb3, 0x1b, 0x00, 0x00, 0x00, 0x80,
45 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
46 0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
47};
48
49static int __devinit
50delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
51{
52 unsigned long base;
53 hw_regs_t hw;
54 ide_hwif_t *hwif = NULL;
55 ide_drive_t *drive;
56 int i, rc;
57
58 rc = pci_enable_device(dev);
59 if (rc) {
60 printk(KERN_ERR "delkin_cb: pci_enable_device failed (%d)\n", rc);
61 return rc;
62 }
63 rc = pci_request_regions(dev, "delkin_cb");
64 if (rc) {
65 printk(KERN_ERR "delkin_cb: pci_request_regions failed (%d)\n", rc);
66 pci_disable_device(dev);
67 return rc;
68 }
69 base = pci_resource_start(dev, 0);
70 outb(0x02, base + 0x1e); /* set nIEN to block interrupts */
71 inb(base + 0x17); /* read status to clear interrupts */
72 for (i = 0; i < sizeof(setup); ++i) {
73 if (setup[i])
74 outb(setup[i], base + i);
75 }
76 pci_release_regions(dev); /* IDE layer handles regions itself */
77
78 memset(&hw, 0, sizeof(hw));
79 ide_std_init_ports(&hw, base + 0x10, base + 0x1e);
80 hw.irq = dev->irq;
81 hw.chipset = ide_pci; /* this enables IRQ sharing */
82
83 rc = ide_register_hw_with_fixup(&hw, &hwif, ide_undecoded_slave);
84 if (rc < 0) {
85 printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
86 pci_disable_device(dev);
87 return -ENODEV;
88 }
89 pci_set_drvdata(dev, hwif);
90 hwif->pci_dev = dev;
91 drive = &hwif->drives[0];
92 if (drive->present) {
93 drive->io_32bit = 1;
94 drive->unmask = 1;
95 }
96 return 0;
97}
98
99static void
100delkin_cb_remove (struct pci_dev *dev)
101{
102 ide_hwif_t *hwif = pci_get_drvdata(dev);
103
104 if (hwif)
105 ide_unregister(hwif->index);
106 pci_disable_device(dev);
107}
108
109static struct pci_device_id delkin_cb_pci_tbl[] __devinitdata = {
110 { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
111 { 0, },
112};
113MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
114
115static struct pci_driver driver = {
116 .name = "Delkin-ASKA-Workbit Cardbus IDE",
117 .id_table = delkin_cb_pci_tbl,
118 .probe = delkin_cb_probe,
119 .remove = delkin_cb_remove,
120};
121
122static int
123delkin_cb_init (void)
124{
125 return pci_module_init(&driver);
126}
127
128static void
129delkin_cb_exit (void)
130{
131 pci_unregister_driver(&driver);
132}
133
134module_init(delkin_cb_init);
135module_exit(delkin_cb_exit);
136
137MODULE_AUTHOR("Mark Lord");
138MODULE_DESCRIPTION("Basic support for Delkin/ASKA/Workbit Cardbus IDE");
139MODULE_LICENSE("GPL");
140
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index b486442dd5d7..05be8fadda7a 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003 2 * linux/drivers/ide/pci/hpt366.c Version 1.01 Dec 23, 2006
3 * 3 *
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
@@ -60,13 +60,10 @@
60 * channel caused the cached register value to get out of sync with the 60 * channel caused the cached register value to get out of sync with the
61 * actual one, the channels weren't serialized, the turnaround shouldn't 61 * actual one, the channels weren't serialized, the turnaround shouldn't
62 * be done on 66 MHz PCI bus 62 * be done on 66 MHz PCI bus
63 * - avoid calibrating PLL twice as the second time results in a wrong PCI 63 * - disable UltraATA/100 for HPT370 by default as the 33 MHz clock being used
64 * frequency and thus in the wrong timings for the secondary channel 64 * does not allow for this speed anyway
65 * - disable UltraATA/133 for HPT372 by default (50 MHz DPLL clock do not 65 * - avoid touching disabled channels (e.g. HPT371/N are single channel chips,
66 * allow for this speed anyway) 66 * their primary channel is kind of virtual, it isn't tied to any pins)
67 * - add support for HPT302N and HPT371N clocking (the same as for HPT372N)
68 * - HPT371/N are single channel chips, so avoid touching the primary channel
69 * which exists only virtually (there's no pins for it)
70 * - fix/remove bad/unused timing tables and use one set of tables for the whole 67 * - fix/remove bad/unused timing tables and use one set of tables for the whole
71 * HPT37x chip family; save space by introducing the separate transfer mode 68 * HPT37x chip family; save space by introducing the separate transfer mode
72 * table in which the mode lookup is done 69 * table in which the mode lookup is done
@@ -76,11 +73,47 @@
76 * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead 73 * and for HPT36x the obsolete HDIO_TRISTATE_HWIF handler was called instead
77 * - pass to init_chipset() handlers a copy of the IDE PCI device structure as 74 * - pass to init_chipset() handlers a copy of the IDE PCI device structure as
78 * they tamper with its fields 75 * they tamper with its fields
79 * <source@mvista.com> 76 * - pass to the init_setup handlers a copy of the ide_pci_device_t structure
80 * 77 * since they may tamper with its fields
78 * - prefix the driver startup messages with the real chip name
79 * - claim the extra 240 bytes of I/O space for all chips
80 * - optimize the rate masking/filtering and the drive list lookup code
81 * - use pci_get_slot() to get to the function 1 of HPT36x/374
82 * - cache offset of the channel's misc. control registers (MCRs) being used
83 * throughout the driver
84 * - only touch the relevant MCR when detecting the cable type on HPT374's
85 * function 1
86 * - rename all the register related variables consistently
87 * - move all the interrupt twiddling code from the speedproc handlers into
88 * init_hwif_hpt366(), also grouping all the DMA related code together there
89 * - merge two HPT37x speedproc handlers, fix the PIO timing register mask and
90 * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings
91 * when setting an UltraDMA mode
92 * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select
93 * the best possible one
94 * - clean up DMA timeout handling for HPT370
95 * - switch to using the enumeration type to differ between the numerous chip
96 * variants, matching PCI device/revision ID with the chip type early, at the
97 * init_setup stage
98 * - extend the hpt_info structure to hold the DPLL and PCI clock frequencies,
99 * stop duplicating it for each channel by storing the pointer in the pci_dev
100 * structure: first, at the init_setup stage, point it to a static "template"
101 * with only the chip type and its specific base DPLL frequency, the highest
102 * supported DMA mode, and the chip settings table pointer filled, then, at
103 * the init_chipset stage, allocate per-chip instance and fill it with the
104 * rest of the necessary information
105 * - get rid of the constant thresholds in the HPT37x PCI clock detection code,
106 * switch to calculating PCI clock frequency based on the chip's base DPLL
107 * frequency
108 * - switch to using the DPLL clock and enable UltraATA/133 mode by default on
109 * anything newer than HPT370/A
110 * - fold PCI clock detection and DPLL setup code into init_chipset_hpt366(),
111 * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips;
112 * unify HPT36x/37x timing setup code and the speedproc handlers by joining
113 * the register setting lists into the table indexed by the clock selected
114 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
81 */ 115 */
82 116
83
84#include <linux/types.h> 117#include <linux/types.h>
85#include <linux/module.h> 118#include <linux/module.h>
86#include <linux/kernel.h> 119#include <linux/kernel.h>
@@ -332,93 +365,159 @@ static u32 sixty_six_base_hpt37x[] = {
332}; 365};
333 366
334#define HPT366_DEBUG_DRIVE_INFO 0 367#define HPT366_DEBUG_DRIVE_INFO 0
335#define HPT374_ALLOW_ATA133_6 0 368#define HPT374_ALLOW_ATA133_6 1
336#define HPT371_ALLOW_ATA133_6 0 369#define HPT371_ALLOW_ATA133_6 1
337#define HPT302_ALLOW_ATA133_6 0 370#define HPT302_ALLOW_ATA133_6 1
338#define HPT372_ALLOW_ATA133_6 0 371#define HPT372_ALLOW_ATA133_6 1
339#define HPT370_ALLOW_ATA100_5 1 372#define HPT370_ALLOW_ATA100_5 0
340#define HPT366_ALLOW_ATA66_4 1 373#define HPT366_ALLOW_ATA66_4 1
341#define HPT366_ALLOW_ATA66_3 1 374#define HPT366_ALLOW_ATA66_3 1
342#define HPT366_MAX_DEVS 8 375#define HPT366_MAX_DEVS 8
343 376
344#define F_LOW_PCI_33 0x23 377/* Supported ATA clock frequencies */
345#define F_LOW_PCI_40 0x29 378enum ata_clock {
346#define F_LOW_PCI_50 0x2d 379 ATA_CLOCK_25MHZ,
347#define F_LOW_PCI_66 0x42 380 ATA_CLOCK_33MHZ,
381 ATA_CLOCK_40MHZ,
382 ATA_CLOCK_50MHZ,
383 ATA_CLOCK_66MHZ,
384 NUM_ATA_CLOCKS
385};
348 386
349/* 387/*
350 * Hold all the highpoint quirks and revision information in one 388 * Hold all the HighPoint chip information in one place.
351 * place.
352 */ 389 */
353 390
354struct hpt_info 391struct hpt_info {
355{ 392 u8 chip_type; /* Chip type */
356 u8 max_mode; /* Speeds allowed */ 393 u8 max_mode; /* Speeds allowed */
357 int revision; /* Chipset revision */ 394 u8 dpll_clk; /* DPLL clock in MHz */
358 int flags; /* Chipset properties */ 395 u8 pci_clk; /* PCI clock in MHz */
359#define PLL_MODE 1 396 u32 **settings; /* Chipset settings table */
360#define IS_3xxN 2
361#define PCI_66MHZ 4
362 /* Speed table */
363 u32 *speed;
364}; 397};
365 398
366/* 399/* Supported HighPoint chips */
367 * This wants fixing so that we do everything not by classrev 400enum {
368 * (which breaks on the newest chips) but by creating an 401 HPT36x,
369 * enumeration of chip variants and using that 402 HPT370,
370 */ 403 HPT370A,
404 HPT374,
405 HPT372,
406 HPT372A,
407 HPT302,
408 HPT371,
409 HPT372N,
410 HPT302N,
411 HPT371N
412};
413
414static u32 *hpt36x_settings[NUM_ATA_CLOCKS] = {
415 twenty_five_base_hpt36x,
416 thirty_three_base_hpt36x,
417 forty_base_hpt36x,
418 NULL,
419 NULL
420};
421
422static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = {
423 NULL,
424 thirty_three_base_hpt37x,
425 NULL,
426 fifty_base_hpt37x,
427 sixty_six_base_hpt37x
428};
429
430static struct hpt_info hpt36x __devinitdata = {
431 .chip_type = HPT36x,
432 .max_mode = (HPT366_ALLOW_ATA66_4 || HPT366_ALLOW_ATA66_3) ? 2 : 1,
433 .dpll_clk = 0, /* no DPLL */
434 .settings = hpt36x_settings
435};
436
437static struct hpt_info hpt370 __devinitdata = {
438 .chip_type = HPT370,
439 .max_mode = HPT370_ALLOW_ATA100_5 ? 3 : 2,
440 .dpll_clk = 48,
441 .settings = hpt37x_settings
442};
443
444static struct hpt_info hpt370a __devinitdata = {
445 .chip_type = HPT370A,
446 .max_mode = HPT370_ALLOW_ATA100_5 ? 3 : 2,
447 .dpll_clk = 48,
448 .settings = hpt37x_settings
449};
450
451static struct hpt_info hpt374 __devinitdata = {
452 .chip_type = HPT374,
453 .max_mode = HPT374_ALLOW_ATA133_6 ? 4 : 3,
454 .dpll_clk = 48,
455 .settings = hpt37x_settings
456};
457
458static struct hpt_info hpt372 __devinitdata = {
459 .chip_type = HPT372,
460 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3,
461 .dpll_clk = 55,
462 .settings = hpt37x_settings
463};
464
465static struct hpt_info hpt372a __devinitdata = {
466 .chip_type = HPT372A,
467 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3,
468 .dpll_clk = 66,
469 .settings = hpt37x_settings
470};
471
472static struct hpt_info hpt302 __devinitdata = {
473 .chip_type = HPT302,
474 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3,
475 .dpll_clk = 66,
476 .settings = hpt37x_settings
477};
478
479static struct hpt_info hpt371 __devinitdata = {
480 .chip_type = HPT371,
481 .max_mode = HPT371_ALLOW_ATA133_6 ? 4 : 3,
482 .dpll_clk = 66,
483 .settings = hpt37x_settings
484};
485
486static struct hpt_info hpt372n __devinitdata = {
487 .chip_type = HPT372N,
488 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3,
489 .dpll_clk = 77,
490 .settings = hpt37x_settings
491};
492
493static struct hpt_info hpt302n __devinitdata = {
494 .chip_type = HPT302N,
495 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3,
496 .dpll_clk = 77,
497};
371 498
372static __devinit u32 hpt_revision (struct pci_dev *dev) 499static struct hpt_info hpt371n __devinitdata = {
500 .chip_type = HPT371N,
501 .max_mode = HPT371_ALLOW_ATA133_6 ? 4 : 3,
502 .dpll_clk = 77,
503 .settings = hpt37x_settings
504};
505
506static int check_in_drive_list(ide_drive_t *drive, const char **list)
373{ 507{
374 u32 class_rev; 508 struct hd_driveid *id = drive->id;
375 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
376 class_rev &= 0xff;
377
378 switch(dev->device) {
379 /* Remap new 372N onto 372 */
380 case PCI_DEVICE_ID_TTI_HPT372N:
381 class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
382 case PCI_DEVICE_ID_TTI_HPT374:
383 class_rev = PCI_DEVICE_ID_TTI_HPT374; break;
384 case PCI_DEVICE_ID_TTI_HPT371:
385 class_rev = PCI_DEVICE_ID_TTI_HPT371; break;
386 case PCI_DEVICE_ID_TTI_HPT302:
387 class_rev = PCI_DEVICE_ID_TTI_HPT302; break;
388 case PCI_DEVICE_ID_TTI_HPT372:
389 class_rev = PCI_DEVICE_ID_TTI_HPT372; break;
390 default:
391 break;
392 }
393 return class_rev;
394}
395 509
396static int check_in_drive_lists(ide_drive_t *drive, const char **list); 510 while (*list)
511 if (!strcmp(*list++,id->model))
512 return 1;
513 return 0;
514}
397 515
398static u8 hpt3xx_ratemask (ide_drive_t *drive) 516static u8 hpt3xx_ratemask(ide_drive_t *drive)
399{ 517{
400 ide_hwif_t *hwif = drive->hwif; 518 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
401 struct hpt_info *info = ide_get_hwifdata(hwif); 519 u8 mode = info->max_mode;
402 u8 mode = 0; 520
403
404 /* FIXME: TODO - move this to set info->mode once at boot */
405
406 if (info->revision >= 8) { /* HPT374 */
407 mode = (HPT374_ALLOW_ATA133_6) ? 4 : 3;
408 } else if (info->revision >= 7) { /* HPT371 */
409 mode = (HPT371_ALLOW_ATA133_6) ? 4 : 3;
410 } else if (info->revision >= 6) { /* HPT302 */
411 mode = (HPT302_ALLOW_ATA133_6) ? 4 : 3;
412 } else if (info->revision >= 5) { /* HPT372 */
413 mode = (HPT372_ALLOW_ATA133_6) ? 4 : 3;
414 } else if (info->revision >= 4) { /* HPT370A */
415 mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
416 } else if (info->revision >= 3) { /* HPT370 */
417 mode = (HPT370_ALLOW_ATA100_5) ? 3 : 2;
418 mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : mode;
419 } else { /* HPT366 and HPT368 */
420 mode = (check_in_drive_lists(drive, bad_ata33)) ? 0 : 2;
421 }
422 if (!eighty_ninty_three(drive) && mode) 521 if (!eighty_ninty_three(drive) && mode)
423 mode = min(mode, (u8)1); 522 mode = min(mode, (u8)1);
424 return mode; 523 return mode;
@@ -429,75 +528,61 @@ static u8 hpt3xx_ratemask (ide_drive_t *drive)
429 * either PIO or UDMA modes 0,4,5 528 * either PIO or UDMA modes 0,4,5
430 */ 529 */
431 530
432static u8 hpt3xx_ratefilter (ide_drive_t *drive, u8 speed) 531static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
433{ 532{
434 ide_hwif_t *hwif = drive->hwif; 533 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
435 struct hpt_info *info = ide_get_hwifdata(hwif); 534 u8 chip_type = info->chip_type;
436 u8 mode = hpt3xx_ratemask(drive); 535 u8 mode = hpt3xx_ratemask(drive);
437 536
438 if (drive->media != ide_disk) 537 if (drive->media != ide_disk)
439 return min(speed, (u8)XFER_PIO_4); 538 return min(speed, (u8)XFER_PIO_4);
440 539
441 switch(mode) { 540 switch (mode) {
442 case 0x04: 541 case 0x04:
443 speed = min(speed, (u8)XFER_UDMA_6); 542 speed = min_t(u8, speed, XFER_UDMA_6);
444 break; 543 break;
445 case 0x03: 544 case 0x03:
446 speed = min(speed, (u8)XFER_UDMA_5); 545 speed = min_t(u8, speed, XFER_UDMA_5);
447 if (info->revision >= 5) 546 if (chip_type >= HPT374)
448 break; 547 break;
449 if (check_in_drive_lists(drive, bad_ata100_5)) 548 if (!check_in_drive_list(drive, bad_ata100_5))
450 speed = min(speed, (u8)XFER_UDMA_4); 549 goto check_bad_ata33;
451 break; 550 /* fall thru */
452 case 0x02: 551 case 0x02:
453 speed = min(speed, (u8)XFER_UDMA_4); 552 speed = min_t(u8, speed, XFER_UDMA_4);
454 /* 553
455 * CHECK ME, Does this need to be set to 5 ?? 554 /*
456 */ 555 * CHECK ME, Does this need to be changed to HPT374 ??
457 if (info->revision >= 3) 556 */
458 break; 557 if (chip_type >= HPT370)
459 if ((check_in_drive_lists(drive, bad_ata66_4)) || 558 goto check_bad_ata33;
460 (!(HPT366_ALLOW_ATA66_4))) 559 if (HPT366_ALLOW_ATA66_4 &&
461 speed = min(speed, (u8)XFER_UDMA_3); 560 !check_in_drive_list(drive, bad_ata66_4))
462 if ((check_in_drive_lists(drive, bad_ata66_3)) || 561 goto check_bad_ata33;
463 (!(HPT366_ALLOW_ATA66_3))) 562
464 speed = min(speed, (u8)XFER_UDMA_2); 563 speed = min_t(u8, speed, XFER_UDMA_3);
465 break; 564 if (HPT366_ALLOW_ATA66_3 &&
565 !check_in_drive_list(drive, bad_ata66_3))
566 goto check_bad_ata33;
567 /* fall thru */
466 case 0x01: 568 case 0x01:
467 speed = min(speed, (u8)XFER_UDMA_2); 569 speed = min_t(u8, speed, XFER_UDMA_2);
468 /* 570
469 * CHECK ME, Does this need to be set to 5 ?? 571 check_bad_ata33:
470 */ 572 if (chip_type >= HPT370A)
471 if (info->revision >= 3)
472 break; 573 break;
473 if (check_in_drive_lists(drive, bad_ata33)) 574 if (!check_in_drive_list(drive, bad_ata33))
474 speed = min(speed, (u8)XFER_MW_DMA_2); 575 break;
475 break; 576 /* fall thru */
476 case 0x00: 577 case 0x00:
477 default: 578 default:
478 speed = min(speed, (u8)XFER_MW_DMA_2); 579 speed = min_t(u8, speed, XFER_MW_DMA_2);
479 break; 580 break;
480 } 581 }
481 return speed; 582 return speed;
482} 583}
483 584
484static int check_in_drive_lists (ide_drive_t *drive, const char **list) 585static u32 get_speed_setting(u8 speed, struct hpt_info *info)
485{
486 struct hd_driveid *id = drive->id;
487
488 if (quirk_drives == list) {
489 while (*list)
490 if (strstr(id->model, *list++))
491 return 1;
492 } else {
493 while (*list)
494 if (!strcmp(*list++,id->model))
495 return 1;
496 }
497 return 0;
498}
499
500static u32 pci_bus_clock_list(u8 speed, u32 *chipset_table)
501{ 586{
502 int i; 587 int i;
503 588
@@ -510,228 +595,161 @@ static u32 pci_bus_clock_list(u8 speed, u32 *chipset_table)
510 for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++) 595 for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++)
511 if (xfer_speeds[i] == speed) 596 if (xfer_speeds[i] == speed)
512 break; 597 break;
513 return chipset_table[i]; 598 /*
599 * NOTE: info->settings only points to the pointer
600 * to the list of the actual register values
601 */
602 return (*info->settings)[i];
514} 603}
515 604
516static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed) 605static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
517{ 606{
518 ide_hwif_t *hwif = drive->hwif; 607 ide_hwif_t *hwif = HWIF(drive);
519 struct pci_dev *dev = hwif->pci_dev; 608 struct pci_dev *dev = hwif->pci_dev;
520 struct hpt_info *info = ide_get_hwifdata(hwif); 609 struct hpt_info *info = pci_get_drvdata(dev);
521 u8 speed = hpt3xx_ratefilter(drive, xferspeed); 610 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
522 u8 regtime = (drive->select.b.unit & 0x01) ? 0x44 : 0x40; 611 u8 itr_addr = drive->dn ? 0x44 : 0x40;
523 u8 regfast = (hwif->channel) ? 0x55 : 0x51; 612 u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
524 u8 drive_fast = 0; 613 (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff);
525 u32 reg1 = 0, reg2 = 0; 614 u32 new_itr = get_speed_setting(speed, info);
526 615 u32 old_itr = 0;
527 /*
528 * Disable the "fast interrupt" prediction.
529 */
530 pci_read_config_byte(dev, regfast, &drive_fast);
531 if (drive_fast & 0x80)
532 pci_write_config_byte(dev, regfast, drive_fast & ~0x80);
533
534 reg2 = pci_bus_clock_list(speed, info->speed);
535 616
536 /* 617 /*
537 * Disable on-chip PIO FIFO/buffer 618 * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
538 * (to avoid problems handling I/O errors later) 619 * to avoid problems handling I/O errors later
539 */ 620 */
540 pci_read_config_dword(dev, regtime, &reg1); 621 pci_read_config_dword(dev, itr_addr, &old_itr);
541 if (speed >= XFER_MW_DMA_0) { 622 new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
542 reg2 = (reg2 & ~0xc0000000) | (reg1 & 0xc0000000); 623 new_itr &= ~0xc0000000;
543 } else {
544 reg2 = (reg2 & ~0x30070000) | (reg1 & 0x30070000);
545 }
546 reg2 &= ~0x80000000;
547 624
548 pci_write_config_dword(dev, regtime, reg2); 625 pci_write_config_dword(dev, itr_addr, new_itr);
549 626
550 return ide_config_drive_speed(drive, speed); 627 return ide_config_drive_speed(drive, speed);
551} 628}
552 629
553static int hpt370_tune_chipset(ide_drive_t *drive, u8 xferspeed) 630static int hpt37x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
554{ 631{
555 ide_hwif_t *hwif = drive->hwif; 632 ide_hwif_t *hwif = HWIF(drive);
556 struct pci_dev *dev = hwif->pci_dev; 633 struct pci_dev *dev = hwif->pci_dev;
557 struct hpt_info *info = ide_get_hwifdata(hwif); 634 struct hpt_info *info = pci_get_drvdata(dev);
558 u8 speed = hpt3xx_ratefilter(drive, xferspeed); 635 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
559 u8 regfast = (drive->hwif->channel) ? 0x55 : 0x51; 636 u8 itr_addr = 0x40 + (drive->dn * 4);
560 u8 drive_pci = 0x40 + (drive->dn * 4); 637 u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
561 u8 new_fast = 0, drive_fast = 0; 638 (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff);
562 u32 list_conf = 0, drive_conf = 0; 639 u32 new_itr = get_speed_setting(speed, info);
563 u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000; 640 u32 old_itr = 0;
564 641
565 /* 642 pci_read_config_dword(dev, itr_addr, &old_itr);
566 * Disable the "fast interrupt" prediction. 643 new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
567 * don't holdoff on interrupts. (== 0x01 despite what the docs say)
568 */
569 pci_read_config_byte(dev, regfast, &drive_fast);
570 new_fast = drive_fast;
571 if (new_fast & 0x02)
572 new_fast &= ~0x02;
573
574#ifdef HPT_DELAY_INTERRUPT
575 if (new_fast & 0x01)
576 new_fast &= ~0x01;
577#else
578 if ((new_fast & 0x01) == 0)
579 new_fast |= 0x01;
580#endif
581 if (new_fast != drive_fast)
582 pci_write_config_byte(dev, regfast, new_fast);
583
584 list_conf = pci_bus_clock_list(speed, info->speed);
585
586 pci_read_config_dword(dev, drive_pci, &drive_conf);
587 list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
588 644
589 if (speed < XFER_MW_DMA_0) 645 if (speed < XFER_MW_DMA_0)
590 list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */ 646 new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
591 pci_write_config_dword(dev, drive_pci, list_conf); 647 pci_write_config_dword(dev, itr_addr, new_itr);
592 648
593 return ide_config_drive_speed(drive, speed); 649 return ide_config_drive_speed(drive, speed);
594} 650}
595 651
596static int hpt372_tune_chipset(ide_drive_t *drive, u8 xferspeed) 652static int hpt3xx_tune_chipset(ide_drive_t *drive, u8 speed)
597{ 653{
598 ide_hwif_t *hwif = drive->hwif; 654 ide_hwif_t *hwif = HWIF(drive);
599 struct pci_dev *dev = hwif->pci_dev; 655 struct hpt_info *info = pci_get_drvdata(hwif->pci_dev);
600 struct hpt_info *info = ide_get_hwifdata(hwif);
601 u8 speed = hpt3xx_ratefilter(drive, xferspeed);
602 u8 regfast = (drive->hwif->channel) ? 0x55 : 0x51;
603 u8 drive_fast = 0, drive_pci = 0x40 + (drive->dn * 4);
604 u32 list_conf = 0, drive_conf = 0;
605 u32 conf_mask = (speed >= XFER_MW_DMA_0) ? 0xc0000000 : 0x30070000;
606
607 /*
608 * Disable the "fast interrupt" prediction.
609 * don't holdoff on interrupts. (== 0x01 despite what the docs say)
610 */
611 pci_read_config_byte(dev, regfast, &drive_fast);
612 drive_fast &= ~0x07;
613 pci_write_config_byte(dev, regfast, drive_fast);
614
615 list_conf = pci_bus_clock_list(speed, info->speed);
616 pci_read_config_dword(dev, drive_pci, &drive_conf);
617 list_conf = (list_conf & ~conf_mask) | (drive_conf & conf_mask);
618 if (speed < XFER_MW_DMA_0)
619 list_conf &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */
620 pci_write_config_dword(dev, drive_pci, list_conf);
621
622 return ide_config_drive_speed(drive, speed);
623}
624 656
625static int hpt3xx_tune_chipset (ide_drive_t *drive, u8 speed) 657 if (info->chip_type >= HPT370)
626{ 658 return hpt37x_tune_chipset(drive, speed);
627 ide_hwif_t *hwif = drive->hwif;
628 struct hpt_info *info = ide_get_hwifdata(hwif);
629
630 if (info->revision >= 8)
631 return hpt372_tune_chipset(drive, speed); /* not a typo */
632 else if (info->revision >= 5)
633 return hpt372_tune_chipset(drive, speed);
634 else if (info->revision >= 3)
635 return hpt370_tune_chipset(drive, speed);
636 else /* hpt368: hpt_minimum_revision(dev, 2) */ 659 else /* hpt368: hpt_minimum_revision(dev, 2) */
637 return hpt36x_tune_chipset(drive, speed); 660 return hpt36x_tune_chipset(drive, speed);
638} 661}
639 662
640static void hpt3xx_tune_drive (ide_drive_t *drive, u8 pio) 663static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio)
641{ 664{
642 pio = ide_get_best_pio_mode(drive, 255, pio, NULL); 665 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
643 (void) hpt3xx_tune_chipset(drive, (XFER_PIO_0 + pio)); 666 (void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio);
644} 667}
645 668
646/* 669/*
647 * This allows the configuration of ide_pci chipset registers 670 * This allows the configuration of ide_pci chipset registers
648 * for cards that learn about the drive's UDMA, DMA, PIO capabilities 671 * for cards that learn about the drive's UDMA, DMA, PIO capabilities
649 * after the drive is reported by the OS. Initially for designed for 672 * after the drive is reported by the OS. Initially designed for
650 * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc. 673 * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc.
651 * 674 *
652 * check_in_drive_lists(drive, bad_ata66_4)
653 * check_in_drive_lists(drive, bad_ata66_3)
654 * check_in_drive_lists(drive, bad_ata33)
655 *
656 */ 675 */
657static int config_chipset_for_dma (ide_drive_t *drive) 676static int config_chipset_for_dma(ide_drive_t *drive)
658{ 677{
659 u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive)); 678 u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive));
660 ide_hwif_t *hwif = drive->hwif;
661 struct hpt_info *info = ide_get_hwifdata(hwif);
662 679
663 if (!speed) 680 if (!speed)
664 return 0; 681 return 0;
665 682
666 /* If we don't have any timings we can't do a lot */
667 if (info->speed == NULL)
668 return 0;
669
670 (void) hpt3xx_tune_chipset(drive, speed); 683 (void) hpt3xx_tune_chipset(drive, speed);
671 return ide_dma_enable(drive); 684 return ide_dma_enable(drive);
672} 685}
673 686
674static int hpt3xx_quirkproc (ide_drive_t *drive) 687static int hpt3xx_quirkproc(ide_drive_t *drive)
675{ 688{
676 return ((int) check_in_drive_lists(drive, quirk_drives)); 689 struct hd_driveid *id = drive->id;
690 const char **list = quirk_drives;
691
692 while (*list)
693 if (strstr(id->model, *list++))
694 return 1;
695 return 0;
677} 696}
678 697
679static void hpt3xx_intrproc (ide_drive_t *drive) 698static void hpt3xx_intrproc(ide_drive_t *drive)
680{ 699{
681 ide_hwif_t *hwif = drive->hwif; 700 ide_hwif_t *hwif = HWIF(drive);
682 701
683 if (drive->quirk_list) 702 if (drive->quirk_list)
684 return; 703 return;
685 /* drives in the quirk_list may not like intr setups/cleanups */ 704 /* drives in the quirk_list may not like intr setups/cleanups */
686 hwif->OUTB(drive->ctl|2, IDE_CONTROL_REG); 705 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
687} 706}
688 707
689static void hpt3xx_maskproc (ide_drive_t *drive, int mask) 708static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
690{ 709{
691 ide_hwif_t *hwif = drive->hwif; 710 ide_hwif_t *hwif = HWIF(drive);
692 struct hpt_info *info = ide_get_hwifdata(hwif); 711 struct pci_dev *dev = hwif->pci_dev;
693 struct pci_dev *dev = hwif->pci_dev; 712 struct hpt_info *info = pci_get_drvdata(dev);
694 713
695 if (drive->quirk_list) { 714 if (drive->quirk_list) {
696 if (info->revision >= 3) { 715 if (info->chip_type >= HPT370) {
697 u8 reg5a = 0; 716 u8 scr1 = 0;
698 pci_read_config_byte(dev, 0x5a, &reg5a); 717
699 if (((reg5a & 0x10) >> 4) != mask) 718 pci_read_config_byte(dev, 0x5a, &scr1);
700 pci_write_config_byte(dev, 0x5a, mask ? (reg5a | 0x10) : (reg5a & ~0x10)); 719 if (((scr1 & 0x10) >> 4) != mask) {
720 if (mask)
721 scr1 |= 0x10;
722 else
723 scr1 &= ~0x10;
724 pci_write_config_byte(dev, 0x5a, scr1);
725 }
701 } else { 726 } else {
702 if (mask) { 727 if (mask)
703 disable_irq(hwif->irq); 728 disable_irq(hwif->irq);
704 } else { 729 else
705 enable_irq(hwif->irq); 730 enable_irq (hwif->irq);
706 }
707 } 731 }
708 } else { 732 } else
709 if (IDE_CONTROL_REG) 733 hwif->OUTB(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
710 hwif->OUTB(mask ? (drive->ctl | 2) : 734 IDE_CONTROL_REG);
711 (drive->ctl & ~2),
712 IDE_CONTROL_REG);
713 }
714} 735}
715 736
716static int hpt366_config_drive_xfer_rate (ide_drive_t *drive) 737static int hpt366_config_drive_xfer_rate(ide_drive_t *drive)
717{ 738{
718 ide_hwif_t *hwif = drive->hwif; 739 ide_hwif_t *hwif = HWIF(drive);
719 struct hd_driveid *id = drive->id; 740 struct hd_driveid *id = drive->id;
720 741
721 drive->init_speed = 0; 742 drive->init_speed = 0;
722 743
723 if ((id->capability & 1) && drive->autodma) { 744 if ((id->capability & 1) && drive->autodma) {
724 745 if (ide_use_dma(drive) && config_chipset_for_dma(drive))
725 if (ide_use_dma(drive)) { 746 return hwif->ide_dma_on(drive);
726 if (config_chipset_for_dma(drive))
727 return hwif->ide_dma_on(drive);
728 }
729 747
730 goto fast_ata_pio; 748 goto fast_ata_pio;
731 749
732 } else if ((id->capability & 8) || (id->field_valid & 2)) { 750 } else if ((id->capability & 8) || (id->field_valid & 2)) {
733fast_ata_pio: 751fast_ata_pio:
734 hpt3xx_tune_drive(drive, 5); 752 hpt3xx_tune_drive(drive, 255);
735 return hwif->ide_dma_off_quietly(drive); 753 return hwif->ide_dma_off_quietly(drive);
736 } 754 }
737 /* IORDY not supported */ 755 /* IORDY not supported */
@@ -739,31 +757,48 @@ fast_ata_pio:
739} 757}
740 758
741/* 759/*
742 * This is specific to the HPT366 UDMA bios chipset 760 * This is specific to the HPT366 UDMA chipset
743 * by HighPoint|Triones Technologies, Inc. 761 * by HighPoint|Triones Technologies, Inc.
744 */ 762 */
745static int hpt366_ide_dma_lostirq (ide_drive_t *drive) 763static int hpt366_ide_dma_lostirq(ide_drive_t *drive)
746{ 764{
747 struct pci_dev *dev = HWIF(drive)->pci_dev; 765 struct pci_dev *dev = HWIF(drive)->pci_dev;
748 u8 reg50h = 0, reg52h = 0, reg5ah = 0; 766 u8 mcr1 = 0, mcr3 = 0, scr1 = 0;
749 767
750 pci_read_config_byte(dev, 0x50, &reg50h); 768 pci_read_config_byte(dev, 0x50, &mcr1);
751 pci_read_config_byte(dev, 0x52, &reg52h); 769 pci_read_config_byte(dev, 0x52, &mcr3);
752 pci_read_config_byte(dev, 0x5a, &reg5ah); 770 pci_read_config_byte(dev, 0x5a, &scr1);
753 printk("%s: (%s) reg50h=0x%02x, reg52h=0x%02x, reg5ah=0x%02x\n", 771 printk("%s: (%s) mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
754 drive->name, __FUNCTION__, reg50h, reg52h, reg5ah); 772 drive->name, __FUNCTION__, mcr1, mcr3, scr1);
755 if (reg5ah & 0x10) 773 if (scr1 & 0x10)
756 pci_write_config_byte(dev, 0x5a, reg5ah & ~0x10); 774 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
757 return __ide_dma_lostirq(drive); 775 return __ide_dma_lostirq(drive);
758} 776}
759 777
760static void hpt370_clear_engine (ide_drive_t *drive) 778static void hpt370_clear_engine(ide_drive_t *drive)
761{ 779{
762 u8 regstate = HWIF(drive)->channel ? 0x54 : 0x50; 780 ide_hwif_t *hwif = HWIF(drive);
763 pci_write_config_byte(HWIF(drive)->pci_dev, regstate, 0x37); 781
782 pci_write_config_byte(hwif->pci_dev, hwif->select_data, 0x37);
764 udelay(10); 783 udelay(10);
765} 784}
766 785
786static void hpt370_irq_timeout(ide_drive_t *drive)
787{
788 ide_hwif_t *hwif = HWIF(drive);
789 u16 bfifo = 0;
790 u8 dma_cmd;
791
792 pci_read_config_word(hwif->pci_dev, hwif->select_data + 2, &bfifo);
793 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo & 0x1ff);
794
795 /* get DMA command mode */
796 dma_cmd = hwif->INB(hwif->dma_command);
797 /* stop DMA */
798 hwif->OUTB(dma_cmd & ~0x1, hwif->dma_command);
799 hpt370_clear_engine(drive);
800}
801
767static void hpt370_ide_dma_start(ide_drive_t *drive) 802static void hpt370_ide_dma_start(ide_drive_t *drive)
768{ 803{
769#ifdef HPT_RESET_STATE_ENGINE 804#ifdef HPT_RESET_STATE_ENGINE
@@ -772,64 +807,35 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
772 ide_dma_start(drive); 807 ide_dma_start(drive);
773} 808}
774 809
775static int hpt370_ide_dma_end (ide_drive_t *drive) 810static int hpt370_ide_dma_end(ide_drive_t *drive)
776{ 811{
777 ide_hwif_t *hwif = HWIF(drive); 812 ide_hwif_t *hwif = HWIF(drive);
778 u8 dma_stat = hwif->INB(hwif->dma_status); 813 u8 dma_stat = hwif->INB(hwif->dma_status);
779 814
780 if (dma_stat & 0x01) { 815 if (dma_stat & 0x01) {
781 /* wait a little */ 816 /* wait a little */
782 udelay(20); 817 udelay(20);
783 dma_stat = hwif->INB(hwif->dma_status); 818 dma_stat = hwif->INB(hwif->dma_status);
819 if (dma_stat & 0x01)
820 hpt370_irq_timeout(drive);
784 } 821 }
785 if ((dma_stat & 0x01) != 0)
786 /* fallthrough */
787 (void) HWIF(drive)->ide_dma_timeout(drive);
788
789 return __ide_dma_end(drive); 822 return __ide_dma_end(drive);
790} 823}
791 824
792static void hpt370_lostirq_timeout (ide_drive_t *drive) 825static int hpt370_ide_dma_timeout(ide_drive_t *drive)
793{ 826{
794 ide_hwif_t *hwif = HWIF(drive); 827 hpt370_irq_timeout(drive);
795 u8 bfifo = 0, reginfo = hwif->channel ? 0x56 : 0x52;
796 u8 dma_stat = 0, dma_cmd = 0;
797
798 pci_read_config_byte(HWIF(drive)->pci_dev, reginfo, &bfifo);
799 printk(KERN_DEBUG "%s: %d bytes in FIFO\n", drive->name, bfifo);
800 hpt370_clear_engine(drive);
801 /* get dma command mode */
802 dma_cmd = hwif->INB(hwif->dma_command);
803 /* stop dma */
804 hwif->OUTB(dma_cmd & ~0x1, hwif->dma_command);
805 dma_stat = hwif->INB(hwif->dma_status);
806 /* clear errors */
807 hwif->OUTB(dma_stat | 0x6, hwif->dma_status);
808}
809
810static int hpt370_ide_dma_timeout (ide_drive_t *drive)
811{
812 hpt370_lostirq_timeout(drive);
813 hpt370_clear_engine(drive);
814 return __ide_dma_timeout(drive); 828 return __ide_dma_timeout(drive);
815} 829}
816 830
817static int hpt370_ide_dma_lostirq (ide_drive_t *drive)
818{
819 hpt370_lostirq_timeout(drive);
820 hpt370_clear_engine(drive);
821 return __ide_dma_lostirq(drive);
822}
823
824/* returns 1 if DMA IRQ issued, 0 otherwise */ 831/* returns 1 if DMA IRQ issued, 0 otherwise */
825static int hpt374_ide_dma_test_irq(ide_drive_t *drive) 832static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
826{ 833{
827 ide_hwif_t *hwif = HWIF(drive); 834 ide_hwif_t *hwif = HWIF(drive);
828 u16 bfifo = 0; 835 u16 bfifo = 0;
829 u8 reginfo = hwif->channel ? 0x56 : 0x52; 836 u8 dma_stat;
830 u8 dma_stat;
831 837
832 pci_read_config_word(hwif->pci_dev, reginfo, &bfifo); 838 pci_read_config_word(hwif->pci_dev, hwif->select_data + 2, &bfifo);
833 if (bfifo & 0x1FF) { 839 if (bfifo & 0x1FF) {
834// printk("%s: %d bytes in FIFO\n", drive->name, bfifo); 840// printk("%s: %d bytes in FIFO\n", drive->name, bfifo);
835 return 0; 841 return 0;
@@ -837,7 +843,7 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
837 843
838 dma_stat = hwif->INB(hwif->dma_status); 844 dma_stat = hwif->INB(hwif->dma_status);
839 /* return 1 if INTR asserted */ 845 /* return 1 if INTR asserted */
840 if ((dma_stat & 4) == 4) 846 if (dma_stat & 4)
841 return 1; 847 return 1;
842 848
843 if (!drive->waiting_for_dma) 849 if (!drive->waiting_for_dma)
@@ -846,17 +852,17 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
846 return 0; 852 return 0;
847} 853}
848 854
849static int hpt374_ide_dma_end (ide_drive_t *drive) 855static int hpt374_ide_dma_end(ide_drive_t *drive)
850{ 856{
851 struct pci_dev *dev = HWIF(drive)->pci_dev;
852 ide_hwif_t *hwif = HWIF(drive); 857 ide_hwif_t *hwif = HWIF(drive);
853 u8 msc_stat = 0, mscreg = hwif->channel ? 0x54 : 0x50; 858 struct pci_dev *dev = hwif->pci_dev;
854 u8 bwsr_stat = 0, bwsr_mask = hwif->channel ? 0x02 : 0x01; 859 u8 mcr = 0, mcr_addr = hwif->select_data;
855 860 u8 bwsr = 0, mask = hwif->channel ? 0x02 : 0x01;
856 pci_read_config_byte(dev, 0x6a, &bwsr_stat); 861
857 pci_read_config_byte(dev, mscreg, &msc_stat); 862 pci_read_config_byte(dev, 0x6a, &bwsr);
858 if ((bwsr_stat & bwsr_mask) == bwsr_mask) 863 pci_read_config_byte(dev, mcr_addr, &mcr);
859 pci_write_config_byte(dev, mscreg, msc_stat|0x30); 864 if (bwsr & mask)
865 pci_write_config_byte(dev, mcr_addr, mcr | 0x30);
860 return __ide_dma_end(drive); 866 return __ide_dma_end(drive);
861} 867}
862 868
@@ -866,40 +872,37 @@ static int hpt374_ide_dma_end (ide_drive_t *drive)
866 * @mode: clocking mode (0x21 for write, 0x23 otherwise) 872 * @mode: clocking mode (0x21 for write, 0x23 otherwise)
867 * 873 *
868 * Switch the DPLL clock on the HPT3xxN devices. This is a right mess. 874 * Switch the DPLL clock on the HPT3xxN devices. This is a right mess.
869 * NOTE: avoid touching the disabled primary channel on HPT371N -- it
870 * doesn't physically exist anyway...
871 */ 875 */
872 876
873static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode) 877static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
874{ 878{
875 u8 mcr1, scr2 = hwif->INB(hwif->dma_master + 0x7b); 879 u8 scr2 = hwif->INB(hwif->dma_master + 0x7b);
876 880
877 if ((scr2 & 0x7f) == mode) 881 if ((scr2 & 0x7f) == mode)
878 return; 882 return;
879 883
880 /* MISC. control register 1 has the channel enable bit... */
881 mcr1 = hwif->INB(hwif->dma_master + 0x70);
882
883 /* Tristate the bus */ 884 /* Tristate the bus */
884 if (mcr1 & 0x04) 885 hwif->OUTB(0x80, hwif->dma_master + 0x73);
885 hwif->OUTB(0x80, hwif->dma_master + 0x73);
886 hwif->OUTB(0x80, hwif->dma_master + 0x77); 886 hwif->OUTB(0x80, hwif->dma_master + 0x77);
887 887
888 /* Switch clock and reset channels */ 888 /* Switch clock and reset channels */
889 hwif->OUTB(mode, hwif->dma_master + 0x7b); 889 hwif->OUTB(mode, hwif->dma_master + 0x7b);
890 hwif->OUTB(0xc0, hwif->dma_master + 0x79); 890 hwif->OUTB(0xc0, hwif->dma_master + 0x79);
891 891
892 /* Reset state machines */ 892 /*
893 if (mcr1 & 0x04) 893 * Reset the state machines.
894 hwif->OUTB(0x37, hwif->dma_master + 0x70); 894 * NOTE: avoid accidentally enabling the disabled channels.
895 hwif->OUTB(0x37, hwif->dma_master + 0x74); 895 */
896 hwif->OUTB(hwif->INB(hwif->dma_master + 0x70) | 0x32,
897 hwif->dma_master + 0x70);
898 hwif->OUTB(hwif->INB(hwif->dma_master + 0x74) | 0x32,
899 hwif->dma_master + 0x74);
896 900
897 /* Complete reset */ 901 /* Complete reset */
898 hwif->OUTB(0x00, hwif->dma_master + 0x79); 902 hwif->OUTB(0x00, hwif->dma_master + 0x79);
899 903
900 /* Reconnect channels to bus */ 904 /* Reconnect channels to bus */
901 if (mcr1 & 0x04) 905 hwif->OUTB(0x00, hwif->dma_master + 0x73);
902 hwif->OUTB(0x00, hwif->dma_master + 0x73);
903 hwif->OUTB(0x00, hwif->dma_master + 0x77); 906 hwif->OUTB(0x00, hwif->dma_master + 0x77);
904} 907}
905 908
@@ -914,14 +917,12 @@ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)
914 917
915static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq) 918static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
916{ 919{
917 ide_hwif_t *hwif = HWIF(drive); 920 hpt3xxn_set_clock(HWIF(drive), rq_data_dir(rq) ? 0x23 : 0x21);
918 u8 wantclock = rq_data_dir(rq) ? 0x23 : 0x21;
919
920 hpt3xxn_set_clock(hwif, wantclock);
921} 921}
922 922
923/* 923/*
924 * Set/get power state for a drive. 924 * Set/get power state for a drive.
925 * NOTE: affects both drives on each channel.
925 * 926 *
926 * When we turn the power back on, we need to re-initialize things. 927 * When we turn the power back on, we need to re-initialize things.
927 */ 928 */
@@ -929,26 +930,18 @@ static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
929 930
930static int hpt3xx_busproc(ide_drive_t *drive, int state) 931static int hpt3xx_busproc(ide_drive_t *drive, int state)
931{ 932{
932 ide_hwif_t *hwif = drive->hwif; 933 ide_hwif_t *hwif = HWIF(drive);
933 struct pci_dev *dev = hwif->pci_dev; 934 struct pci_dev *dev = hwif->pci_dev;
934 u8 tristate, resetmask, bus_reg = 0; 935 u8 mcr_addr = hwif->select_data + 2;
935 u16 tri_reg = 0; 936 u8 resetmask = hwif->channel ? 0x80 : 0x40;
937 u8 bsr2 = 0;
938 u16 mcr = 0;
936 939
937 hwif->bus_state = state; 940 hwif->bus_state = state;
938 941
939 if (hwif->channel) {
940 /* secondary channel */
941 tristate = 0x56;
942 resetmask = 0x80;
943 } else {
944 /* primary channel */
945 tristate = 0x52;
946 resetmask = 0x40;
947 }
948
949 /* Grab the status. */ 942 /* Grab the status. */
950 pci_read_config_word(dev, tristate, &tri_reg); 943 pci_read_config_word(dev, mcr_addr, &mcr);
951 pci_read_config_byte(dev, 0x59, &bus_reg); 944 pci_read_config_byte(dev, 0x59, &bsr2);
952 945
953 /* 946 /*
954 * Set the state. We don't set it if we don't need to do so. 947 * Set the state. We don't set it if we don't need to do so.
@@ -956,22 +949,22 @@ static int hpt3xx_busproc(ide_drive_t *drive, int state)
956 */ 949 */
957 switch (state) { 950 switch (state) {
958 case BUSSTATE_ON: 951 case BUSSTATE_ON:
959 if (!(bus_reg & resetmask)) 952 if (!(bsr2 & resetmask))
960 return 0; 953 return 0;
961 hwif->drives[0].failures = hwif->drives[1].failures = 0; 954 hwif->drives[0].failures = hwif->drives[1].failures = 0;
962 955
963 pci_write_config_byte(dev, 0x59, bus_reg & ~resetmask); 956 pci_write_config_byte(dev, 0x59, bsr2 & ~resetmask);
964 pci_write_config_word(dev, tristate, tri_reg & ~TRISTATE_BIT); 957 pci_write_config_word(dev, mcr_addr, mcr & ~TRISTATE_BIT);
965 return 0; 958 return 0;
966 case BUSSTATE_OFF: 959 case BUSSTATE_OFF:
967 if ((bus_reg & resetmask) && !(tri_reg & TRISTATE_BIT)) 960 if ((bsr2 & resetmask) && !(mcr & TRISTATE_BIT))
968 return 0; 961 return 0;
969 tri_reg &= ~TRISTATE_BIT; 962 mcr &= ~TRISTATE_BIT;
970 break; 963 break;
971 case BUSSTATE_TRISTATE: 964 case BUSSTATE_TRISTATE:
972 if ((bus_reg & resetmask) && (tri_reg & TRISTATE_BIT)) 965 if ((bsr2 & resetmask) && (mcr & TRISTATE_BIT))
973 return 0; 966 return 0;
974 tri_reg |= TRISTATE_BIT; 967 mcr |= TRISTATE_BIT;
975 break; 968 break;
976 default: 969 default:
977 return -EINVAL; 970 return -EINVAL;
@@ -980,268 +973,320 @@ static int hpt3xx_busproc(ide_drive_t *drive, int state)
980 hwif->drives[0].failures = hwif->drives[0].max_failures + 1; 973 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
981 hwif->drives[1].failures = hwif->drives[1].max_failures + 1; 974 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
982 975
983 pci_write_config_word(dev, tristate, tri_reg); 976 pci_write_config_word(dev, mcr_addr, mcr);
984 pci_write_config_byte(dev, 0x59, bus_reg | resetmask); 977 pci_write_config_byte(dev, 0x59, bsr2 | resetmask);
985 return 0; 978 return 0;
986} 979}
987 980
988static void __devinit hpt366_clocking(ide_hwif_t *hwif) 981/**
982 * hpt37x_calibrate_dpll - calibrate the DPLL
983 * @dev: PCI device
984 *
985 * Perform a calibration cycle on the DPLL.
986 * Returns 1 if this succeeds
987 */
988static int __devinit hpt37x_calibrate_dpll(struct pci_dev *dev, u16 f_low, u16 f_high)
989{ 989{
990 u32 reg1 = 0; 990 u32 dpll = (f_high << 16) | f_low | 0x100;
991 struct hpt_info *info = ide_get_hwifdata(hwif); 991 u8 scr2;
992 int i;
992 993
993 pci_read_config_dword(hwif->pci_dev, 0x40, &reg1); 994 pci_write_config_dword(dev, 0x5c, dpll);
994 995
995 /* detect bus speed by looking at control reg timing: */ 996 /* Wait for oscillator ready */
996 switch((reg1 >> 8) & 7) { 997 for(i = 0; i < 0x5000; ++i) {
997 case 5: 998 udelay(50);
998 info->speed = forty_base_hpt36x; 999 pci_read_config_byte(dev, 0x5b, &scr2);
999 break; 1000 if (scr2 & 0x80)
1000 case 9:
1001 info->speed = twenty_five_base_hpt36x;
1002 break;
1003 case 7:
1004 default:
1005 info->speed = thirty_three_base_hpt36x;
1006 break; 1001 break;
1007 } 1002 }
1003 /* See if it stays ready (we'll just bail out if it's not yet) */
1004 for(i = 0; i < 0x1000; ++i) {
1005 pci_read_config_byte(dev, 0x5b, &scr2);
1006 /* DPLL destabilized? */
1007 if(!(scr2 & 0x80))
1008 return 0;
1009 }
1010 /* Turn off tuning, we have the DPLL set */
1011 pci_read_config_dword (dev, 0x5c, &dpll);
1012 pci_write_config_dword(dev, 0x5c, (dpll & ~0x100));
1013 return 1;
1008} 1014}
1009 1015
1010static void __devinit hpt37x_clocking(ide_hwif_t *hwif) 1016static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const char *name)
1011{ 1017{
1012 struct hpt_info *info = ide_get_hwifdata(hwif); 1018 struct hpt_info *info = kmalloc(sizeof(struct hpt_info), GFP_KERNEL);
1013 struct pci_dev *dev = hwif->pci_dev; 1019 unsigned long io_base = pci_resource_start(dev, 4);
1014 int adjust, i; 1020 u8 pci_clk, dpll_clk = 0; /* PCI and DPLL clock in MHz */
1015 u16 freq = 0; 1021 enum ata_clock clock;
1016 u32 pll, temp = 0; 1022
1017 u8 reg5bh = 0, mcr1 = 0; 1023 if (info == NULL) {
1018 1024 printk(KERN_ERR "%s: out of memory!\n", name);
1025 return -ENOMEM;
1026 }
1027
1019 /* 1028 /*
1020 * default to pci clock. make sure MA15/16 are set to output 1029 * Copy everything from a static "template" structure
1021 * to prevent drives having problems with 40-pin cables. Needed 1030 * to just allocated per-chip hpt_info structure.
1022 * for some drives such as IBM-DTLA which will not enter ready
1023 * state on reset when PDIAG is a input.
1024 *
1025 * ToDo: should we set 0x21 when using PLL mode ?
1026 */ 1031 */
1027 pci_write_config_byte(dev, 0x5b, 0x23); 1032 *info = *(struct hpt_info *)pci_get_drvdata(dev);
1028 1033
1029 /* 1034 /*
1030 * We'll have to read f_CNT value in order to determine 1035 * FIXME: Not portable. Also, why do we enable the ROM in the first place?
1031 * the PCI clock frequency according to the following ratio: 1036 * We don't seem to be using it.
1032 *
1033 * f_CNT = Fpci * 192 / Fdpll
1034 *
1035 * First try reading the register in which the HighPoint BIOS
1036 * saves f_CNT value before reprogramming the DPLL from its
1037 * default setting (which differs for the various chips).
1038 * NOTE: This register is only accessible via I/O space.
1039 *
1040 * In case the signature check fails, we'll have to resort to
1041 * reading the f_CNT register itself in hopes that nobody has
1042 * touched the DPLL yet...
1043 */ 1037 */
1044 temp = inl(pci_resource_start(dev, 4) + 0x90); 1038 if (dev->resource[PCI_ROM_RESOURCE].start)
1045 if ((temp & 0xFFFFF000) != 0xABCDE000) { 1039 pci_write_config_dword(dev, PCI_ROM_ADDRESS,
1046 printk(KERN_WARNING "HPT37X: no clock data saved by BIOS\n"); 1040 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
1047 1041
1048 /* Calculate the average value of f_CNT */ 1042 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
1049 for (temp = i = 0; i < 128; i++) { 1043 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
1050 pci_read_config_word(dev, 0x78, &freq); 1044 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
1051 temp += freq & 0x1ff; 1045 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1052 mdelay(1);
1053 }
1054 freq = temp / 128;
1055 } else
1056 freq = temp & 0x1ff;
1057 1046
1058 /* 1047 /*
1059 * HPT3xxN chips use different PCI clock information. 1048 * First, try to estimate the PCI clock frequency...
1060 * Currently we always set up the PLL for them.
1061 */ 1049 */
1050 if (info->chip_type >= HPT370) {
1051 u8 scr1 = 0;
1052 u16 f_cnt = 0;
1053 u32 temp = 0;
1062 1054
1063 if (info->flags & IS_3xxN) { 1055 /* Interrupt force enable. */
1064 if(freq < 0x55) 1056 pci_read_config_byte(dev, 0x5a, &scr1);
1065 pll = F_LOW_PCI_33; 1057 if (scr1 & 0x10)
1066 else if(freq < 0x70) 1058 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
1067 pll = F_LOW_PCI_40;
1068 else if(freq < 0x7F)
1069 pll = F_LOW_PCI_50;
1070 else
1071 pll = F_LOW_PCI_66;
1072 1059
1073 printk(KERN_INFO "HPT3xxN detected, FREQ: %d, PLL: %d\n", freq, pll); 1060 /*
1074 } 1061 * HighPoint does this for HPT372A.
1075 else 1062 * NOTE: This register is only writeable via I/O space.
1076 { 1063 */
1077 if(freq < 0x9C) 1064 if (info->chip_type == HPT372A)
1078 pll = F_LOW_PCI_33; 1065 outb(0x0e, io_base + 0x9c);
1079 else if(freq < 0xb0) 1066
1080 pll = F_LOW_PCI_40; 1067 /*
1081 else if(freq <0xc8) 1068 * Default to PCI clock. Make sure MA15/16 are set to output
1082 pll = F_LOW_PCI_50; 1069 * to prevent drives having problems with 40-pin cables.
1070 */
1071 pci_write_config_byte(dev, 0x5b, 0x23);
1072
1073 /*
1074 * We'll have to read f_CNT value in order to determine
1075 * the PCI clock frequency according to the following ratio:
1076 *
1077 * f_CNT = Fpci * 192 / Fdpll
1078 *
1079 * First try reading the register in which the HighPoint BIOS
1080 * saves f_CNT value before reprogramming the DPLL from its
1081 * default setting (which differs for the various chips).
1082 * NOTE: This register is only accessible via I/O space.
1083 *
1084 * In case the signature check fails, we'll have to resort to
1085 * reading the f_CNT register itself in hopes that nobody has
1086 * touched the DPLL yet...
1087 */
1088 temp = inl(io_base + 0x90);
1089 if ((temp & 0xFFFFF000) != 0xABCDE000) {
1090 int i;
1091
1092 printk(KERN_WARNING "%s: no clock data saved by BIOS\n",
1093 name);
1094
1095 /* Calculate the average value of f_CNT. */
1096 for (temp = i = 0; i < 128; i++) {
1097 pci_read_config_word(dev, 0x78, &f_cnt);
1098 temp += f_cnt & 0x1ff;
1099 mdelay(1);
1100 }
1101 f_cnt = temp / 128;
1102 } else
1103 f_cnt = temp & 0x1ff;
1104
1105 dpll_clk = info->dpll_clk;
1106 pci_clk = (f_cnt * dpll_clk) / 192;
1107
1108 /* Clamp PCI clock to bands. */
1109 if (pci_clk < 40)
1110 pci_clk = 33;
1111 else if(pci_clk < 45)
1112 pci_clk = 40;
1113 else if(pci_clk < 55)
1114 pci_clk = 50;
1083 else 1115 else
1084 pll = F_LOW_PCI_66; 1116 pci_clk = 66;
1085 1117
1086 if (pll == F_LOW_PCI_33) { 1118 printk(KERN_INFO "%s: DPLL base: %d MHz, f_CNT: %d, "
1087 info->speed = thirty_three_base_hpt37x; 1119 "assuming %d MHz PCI\n", name, dpll_clk, f_cnt, pci_clk);
1088 printk(KERN_DEBUG "HPT37X: using 33MHz PCI clock\n"); 1120 } else {
1089 } else if (pll == F_LOW_PCI_40) { 1121 u32 itr1 = 0;
1090 /* Unsupported */ 1122
1091 } else if (pll == F_LOW_PCI_50) { 1123 pci_read_config_dword(dev, 0x40, &itr1);
1092 info->speed = fifty_base_hpt37x; 1124
1093 printk(KERN_DEBUG "HPT37X: using 50MHz PCI clock\n"); 1125 /* Detect PCI clock by looking at cmd_high_time. */
1094 } else { 1126 switch((itr1 >> 8) & 0x07) {
1095 info->speed = sixty_six_base_hpt37x; 1127 case 0x09:
1096 printk(KERN_DEBUG "HPT37X: using 66MHz PCI clock\n"); 1128 pci_clk = 40;
1129 break;
1130 case 0x05:
1131 pci_clk = 25;
1132 break;
1133 case 0x07:
1134 default:
1135 pci_clk = 33;
1136 break;
1097 } 1137 }
1098 } 1138 }
1099 1139
1100 if (pll == F_LOW_PCI_66) 1140 /* Let's assume we'll use PCI clock for the ATA clock... */
1101 info->flags |= PCI_66MHZ; 1141 switch (pci_clk) {
1142 case 25:
1143 clock = ATA_CLOCK_25MHZ;
1144 break;
1145 case 33:
1146 default:
1147 clock = ATA_CLOCK_33MHZ;
1148 break;
1149 case 40:
1150 clock = ATA_CLOCK_40MHZ;
1151 break;
1152 case 50:
1153 clock = ATA_CLOCK_50MHZ;
1154 break;
1155 case 66:
1156 clock = ATA_CLOCK_66MHZ;
1157 break;
1158 }
1102 1159
1103 /* 1160 /*
1104 * only try the pll if we don't have a table for the clock 1161 * Only try the DPLL if we don't have a table for the PCI clock that
1105 * speed that we're running at. NOTE: the internal PLL will 1162 * we are running at for HPT370/A, always use it for anything newer...
1106 * result in slow reads when using a 33MHz PCI clock. we also
1107 * don't like to use the PLL because it will cause glitches
1108 * on PRST/SRST when the HPT state engine gets reset.
1109 * 1163 *
1110 * ToDo: Use 66MHz PLL when ATA133 devices are present on a 1164 * NOTE: Using the internal DPLL results in slow reads on 33 MHz PCI.
1111 * 372 device so we can get ATA133 support 1165 * We also don't like using the DPLL because this causes glitches
1166 * on PRST-/SRST- when the state engine gets reset...
1112 */ 1167 */
1113 if (info->speed) 1168 if (info->chip_type >= HPT374 || info->settings[clock] == NULL) {
1114 goto init_hpt37X_done; 1169 u16 f_low, delta = pci_clk < 50 ? 2 : 4;
1170 int adjust;
1171
1172 /*
1173 * Select 66 MHz DPLL clock only if UltraATA/133 mode is
1174 * supported/enabled, use 50 MHz DPLL clock otherwise...
1175 */
1176 if (info->max_mode == 0x04) {
1177 dpll_clk = 66;
1178 clock = ATA_CLOCK_66MHZ;
1179 } else if (dpll_clk) { /* HPT36x chips don't have DPLL */
1180 dpll_clk = 50;
1181 clock = ATA_CLOCK_50MHZ;
1182 }
1115 1183
1116 info->flags |= PLL_MODE; 1184 if (info->settings[clock] == NULL) {
1117 1185 printk(KERN_ERR "%s: unknown bus timing!\n", name);
1118 /* 1186 kfree(info);
1119 * Adjust the PLL based upon the PCI clock, enable it, and 1187 return -EIO;
1120 * wait for stabilization...
1121 */
1122 adjust = 0;
1123 freq = (pll < F_LOW_PCI_50) ? 2 : 4;
1124 while (adjust++ < 6) {
1125 pci_write_config_dword(dev, 0x5c, (freq + pll) << 16 |
1126 pll | 0x100);
1127
1128 /* wait for clock stabilization */
1129 for (i = 0; i < 0x50000; i++) {
1130 pci_read_config_byte(dev, 0x5b, &reg5bh);
1131 if (reg5bh & 0x80) {
1132 /* spin looking for the clock to destabilize */
1133 for (i = 0; i < 0x1000; ++i) {
1134 pci_read_config_byte(dev, 0x5b,
1135 &reg5bh);
1136 if ((reg5bh & 0x80) == 0)
1137 goto pll_recal;
1138 }
1139 pci_read_config_dword(dev, 0x5c, &pll);
1140 pci_write_config_dword(dev, 0x5c,
1141 pll & ~0x100);
1142 pci_write_config_byte(dev, 0x5b, 0x21);
1143
1144 info->speed = fifty_base_hpt37x;
1145 printk("HPT37X: using 50MHz internal PLL\n");
1146 goto init_hpt37X_done;
1147 }
1148 } 1188 }
1149pll_recal:
1150 if (adjust & 1)
1151 pll -= (adjust >> 1);
1152 else
1153 pll += (adjust >> 1);
1154 }
1155 1189
1156init_hpt37X_done: 1190 /* Select the DPLL clock. */
1157 if (!info->speed) 1191 pci_write_config_byte(dev, 0x5b, 0x21);
1158 printk(KERN_ERR "HPT37x%s: unknown bus timing [%d %d].\n",
1159 (info->flags & IS_3xxN) ? "N" : "", pll, freq);
1160 /*
1161 * Reset the state engines.
1162 * NOTE: avoid accidentally enabling the primary channel on HPT371N.
1163 */
1164 pci_read_config_byte(dev, 0x50, &mcr1);
1165 if (mcr1 & 0x04)
1166 pci_write_config_byte(dev, 0x50, 0x37);
1167 pci_write_config_byte(dev, 0x54, 0x37);
1168 udelay(100);
1169}
1170 1192
1171static int __devinit init_hpt37x(struct pci_dev *dev) 1193 /*
1172{ 1194 * Adjust the DPLL based upon PCI clock, enable it,
1173 u8 reg5ah; 1195 * and wait for stabilization...
1196 */
1197 f_low = (pci_clk * 48) / dpll_clk;
1174 1198
1175 pci_read_config_byte(dev, 0x5a, &reg5ah); 1199 for (adjust = 0; adjust < 8; adjust++) {
1176 /* interrupt force enable */ 1200 if(hpt37x_calibrate_dpll(dev, f_low, f_low + delta))
1177 pci_write_config_byte(dev, 0x5a, (reg5ah & ~0x10)); 1201 break;
1178 return 0;
1179}
1180 1202
1181static int __devinit init_hpt366(struct pci_dev *dev) 1203 /*
1182{ 1204 * See if it'll settle at a fractionally different clock
1183 u32 reg1 = 0; 1205 */
1184 u8 drive_fast = 0; 1206 if (adjust & 1)
1207 f_low -= adjust >> 1;
1208 else
1209 f_low += adjust >> 1;
1210 }
1211 if (adjust == 8) {
1212 printk(KERN_ERR "%s: DPLL did not stabilize!\n", name);
1213 kfree(info);
1214 return -EIO;
1215 }
1185 1216
1186 /* 1217 printk("%s: using %d MHz DPLL clock\n", name, dpll_clk);
1187 * Disable the "fast interrupt" prediction. 1218 } else {
1188 */ 1219 /* Mark the fact that we're not using the DPLL. */
1189 pci_read_config_byte(dev, 0x51, &drive_fast); 1220 dpll_clk = 0;
1190 if (drive_fast & 0x80)
1191 pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
1192 pci_read_config_dword(dev, 0x40, &reg1);
1193
1194 return 0;
1195}
1196 1221
1197static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const char *name) 1222 printk("%s: using %d MHz PCI clock\n", name, pci_clk);
1198{ 1223 }
1199 int ret = 0;
1200 1224
1201 /* 1225 /*
1202 * FIXME: Not portable. Also, why do we enable the ROM in the first place? 1226 * Advance the table pointer to a slot which points to the list
1203 * We don't seem to be using it. 1227 * of the register values settings matching the clock being used.
1204 */ 1228 */
1205 if (dev->resource[PCI_ROM_RESOURCE].start) 1229 info->settings += clock;
1206 pci_write_config_dword(dev, PCI_ROM_ADDRESS,
1207 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
1208 1230
1209 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4)); 1231 /* Store the clock frequencies. */
1210 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78); 1232 info->dpll_clk = dpll_clk;
1211 pci_write_config_byte(dev, PCI_MIN_GNT, 0x08); 1233 info->pci_clk = pci_clk;
1212 pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
1213 1234
1214 if (hpt_revision(dev) >= 3) 1235 /* Point to this chip's own instance of the hpt_info structure. */
1215 ret = init_hpt37x(dev); 1236 pci_set_drvdata(dev, info);
1216 else
1217 ret = init_hpt366(dev);
1218 1237
1219 if (ret) 1238 if (info->chip_type >= HPT370) {
1220 return ret; 1239 u8 mcr1, mcr4;
1240
1241 /*
1242 * Reset the state engines.
1243 * NOTE: Avoid accidentally enabling the disabled channels.
1244 */
1245 pci_read_config_byte (dev, 0x50, &mcr1);
1246 pci_read_config_byte (dev, 0x54, &mcr4);
1247 pci_write_config_byte(dev, 0x50, (mcr1 | 0x32));
1248 pci_write_config_byte(dev, 0x54, (mcr4 | 0x32));
1249 udelay(100);
1250 }
1251
1252 /*
1253 * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
1254 * the MISC. register to stretch the UltraDMA Tss timing.
1255 * NOTE: This register is only writeable via I/O space.
1256 */
1257 if (info->chip_type == HPT371N && clock == ATA_CLOCK_66MHZ)
1258
1259 outb(inb(io_base + 0x9c) | 0x04, io_base + 0x9c);
1221 1260
1222 return dev->irq; 1261 return dev->irq;
1223} 1262}
1224 1263
1225static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) 1264static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1226{ 1265{
1227 struct pci_dev *dev = hwif->pci_dev; 1266 struct pci_dev *dev = hwif->pci_dev;
1228 struct hpt_info *info = ide_get_hwifdata(hwif); 1267 struct hpt_info *info = pci_get_drvdata(dev);
1229 u8 ata66 = 0, regmask = (hwif->channel) ? 0x01 : 0x02;
1230 int serialize = HPT_SERIALIZE_IO; 1268 int serialize = HPT_SERIALIZE_IO;
1231 1269 u8 scr1 = 0, ata66 = (hwif->channel) ? 0x01 : 0x02;
1270 u8 chip_type = info->chip_type;
1271 u8 new_mcr, old_mcr = 0;
1272
1273 /* Cache the channel's MISC. control registers' offset */
1274 hwif->select_data = hwif->channel ? 0x54 : 0x50;
1275
1232 hwif->tuneproc = &hpt3xx_tune_drive; 1276 hwif->tuneproc = &hpt3xx_tune_drive;
1233 hwif->speedproc = &hpt3xx_tune_chipset; 1277 hwif->speedproc = &hpt3xx_tune_chipset;
1234 hwif->quirkproc = &hpt3xx_quirkproc; 1278 hwif->quirkproc = &hpt3xx_quirkproc;
1235 hwif->intrproc = &hpt3xx_intrproc; 1279 hwif->intrproc = &hpt3xx_intrproc;
1236 hwif->maskproc = &hpt3xx_maskproc; 1280 hwif->maskproc = &hpt3xx_maskproc;
1237 1281 hwif->busproc = &hpt3xx_busproc;
1282
1238 /* 1283 /*
1239 * HPT3xxN chips have some complications: 1284 * HPT3xxN chips have some complications:
1240 * 1285 *
1241 * - on 33 MHz PCI we must clock switch 1286 * - on 33 MHz PCI we must clock switch
1242 * - on 66 MHz PCI we must NOT use the PCI clock 1287 * - on 66 MHz PCI we must NOT use the PCI clock
1243 */ 1288 */
1244 if ((info->flags & (IS_3xxN | PCI_66MHZ)) == IS_3xxN) { 1289 if (chip_type >= HPT372N && info->dpll_clk && info->pci_clk < 66) {
1245 /* 1290 /*
1246 * Clock is shared between the channels, 1291 * Clock is shared between the channels,
1247 * so we'll have to serialize them... :-( 1292 * so we'll have to serialize them... :-(
@@ -1250,200 +1295,171 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1250 hwif->rw_disk = &hpt3xxn_rw_disk; 1295 hwif->rw_disk = &hpt3xxn_rw_disk;
1251 } 1296 }
1252 1297
1298 /* Serialize access to this device if needed */
1299 if (serialize && hwif->mate)
1300 hwif->serialized = hwif->mate->serialized = 1;
1301
1302 /*
1303 * Disable the "fast interrupt" prediction. Don't hold off
1304 * on interrupts. (== 0x01 despite what the docs say)
1305 */
1306 pci_read_config_byte(dev, hwif->select_data + 1, &old_mcr);
1307
1308 if (info->chip_type >= HPT374)
1309 new_mcr = old_mcr & ~0x07;
1310 else if (info->chip_type >= HPT370) {
1311 new_mcr = old_mcr;
1312 new_mcr &= ~0x02;
1313
1314#ifdef HPT_DELAY_INTERRUPT
1315 new_mcr &= ~0x01;
1316#else
1317 new_mcr |= 0x01;
1318#endif
1319 } else /* HPT366 and HPT368 */
1320 new_mcr = old_mcr & ~0x80;
1321
1322 if (new_mcr != old_mcr)
1323 pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
1324
1325 if (!hwif->dma_base) {
1326 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
1327 return;
1328 }
1329
1330 hwif->ultra_mask = 0x7f;
1331 hwif->mwdma_mask = 0x07;
1332
1253 /* 1333 /*
1254 * The HPT37x uses the CBLID pins as outputs for MA15/MA16 1334 * The HPT37x uses the CBLID pins as outputs for MA15/MA16
1255 * address lines to access an external eeprom. To read valid 1335 * address lines to access an external EEPROM. To read valid
1256 * cable detect state the pins must be enabled as inputs. 1336 * cable detect state the pins must be enabled as inputs.
1257 */ 1337 */
1258 if (info->revision >= 8 && (PCI_FUNC(dev->devfn) & 1)) { 1338 if (chip_type == HPT374 && (PCI_FUNC(dev->devfn) & 1)) {
1259 /* 1339 /*
1260 * HPT374 PCI function 1 1340 * HPT374 PCI function 1
1261 * - set bit 15 of reg 0x52 to enable TCBLID as input 1341 * - set bit 15 of reg 0x52 to enable TCBLID as input
1262 * - set bit 15 of reg 0x56 to enable FCBLID as input 1342 * - set bit 15 of reg 0x56 to enable FCBLID as input
1263 */ 1343 */
1264 u16 mcr3, mcr6; 1344 u8 mcr_addr = hwif->select_data + 2;
1265 pci_read_config_word(dev, 0x52, &mcr3); 1345 u16 mcr;
1266 pci_read_config_word(dev, 0x56, &mcr6); 1346
1267 pci_write_config_word(dev, 0x52, mcr3 | 0x8000); 1347 pci_read_config_word (dev, mcr_addr, &mcr);
1268 pci_write_config_word(dev, 0x56, mcr6 | 0x8000); 1348 pci_write_config_word(dev, mcr_addr, (mcr | 0x8000));
1269 /* now read cable id register */ 1349 /* now read cable id register */
1270 pci_read_config_byte(dev, 0x5a, &ata66); 1350 pci_read_config_byte (dev, 0x5a, &scr1);
1271 pci_write_config_word(dev, 0x52, mcr3); 1351 pci_write_config_word(dev, mcr_addr, mcr);
1272 pci_write_config_word(dev, 0x56, mcr6); 1352 } else if (chip_type >= HPT370) {
1273 } else if (info->revision >= 3) {
1274 /* 1353 /*
1275 * HPT370/372 and 374 pcifn 0 1354 * HPT370/372 and 374 pcifn 0
1276 * - clear bit 0 of 0x5b to enable P/SCBLID as inputs 1355 * - clear bit 0 of reg 0x5b to enable P/SCBLID as inputs
1277 */ 1356 */
1278 u8 scr2; 1357 u8 scr2 = 0;
1279 pci_read_config_byte(dev, 0x5b, &scr2);
1280 pci_write_config_byte(dev, 0x5b, scr2 & ~1);
1281 /* now read cable id register */
1282 pci_read_config_byte(dev, 0x5a, &ata66);
1283 pci_write_config_byte(dev, 0x5b, scr2);
1284 } else {
1285 pci_read_config_byte(dev, 0x5a, &ata66);
1286 }
1287 1358
1288#ifdef DEBUG 1359 pci_read_config_byte (dev, 0x5b, &scr2);
1289 printk("HPT366: reg5ah=0x%02x ATA-%s Cable Port%d\n", 1360 pci_write_config_byte(dev, 0x5b, (scr2 & ~1));
1290 ata66, (ata66 & regmask) ? "33" : "66", 1361 /* now read cable id register */
1291 PCI_FUNC(hwif->pci_dev->devfn)); 1362 pci_read_config_byte (dev, 0x5a, &scr1);
1292#endif /* DEBUG */ 1363 pci_write_config_byte(dev, 0x5b, scr2);
1293 1364 } else
1294 /* Serialize access to this device */ 1365 pci_read_config_byte (dev, 0x5a, &scr1);
1295 if (serialize && hwif->mate)
1296 hwif->serialized = hwif->mate->serialized = 1;
1297 1366
1298 /* 1367 if (!hwif->udma_four)
1299 * Set up ioctl for power status. 1368 hwif->udma_four = (scr1 & ata66) ? 0 : 1;
1300 * NOTE: power affects both drives on each channel.
1301 */
1302 hwif->busproc = &hpt3xx_busproc;
1303 1369
1304 if (!hwif->dma_base) { 1370 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate;
1305 hwif->drives[0].autotune = 1;
1306 hwif->drives[1].autotune = 1;
1307 return;
1308 }
1309 1371
1310 hwif->ultra_mask = 0x7f; 1372 if (chip_type >= HPT374) {
1311 hwif->mwdma_mask = 0x07; 1373 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1312 1374 hwif->ide_dma_end = &hpt374_ide_dma_end;
1313 if (!(hwif->udma_four)) 1375 } else if (chip_type >= HPT370) {
1314 hwif->udma_four = ((ata66 & regmask) ? 0 : 1); 1376 hwif->dma_start = &hpt370_ide_dma_start;
1315 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate; 1377 hwif->ide_dma_end = &hpt370_ide_dma_end;
1316 1378 hwif->ide_dma_timeout = &hpt370_ide_dma_timeout;
1317 if (info->revision >= 8) { 1379 } else
1318 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq; 1380 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1319 hwif->ide_dma_end = &hpt374_ide_dma_end;
1320 } else if (info->revision >= 5) {
1321 hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
1322 hwif->ide_dma_end = &hpt374_ide_dma_end;
1323 } else if (info->revision >= 3) {
1324 hwif->dma_start = &hpt370_ide_dma_start;
1325 hwif->ide_dma_end = &hpt370_ide_dma_end;
1326 hwif->ide_dma_timeout = &hpt370_ide_dma_timeout;
1327 hwif->ide_dma_lostirq = &hpt370_ide_dma_lostirq;
1328 } else if (info->revision >= 2)
1329 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1330 else
1331 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq;
1332 1381
1333 if (!noautodma) 1382 if (!noautodma)
1334 hwif->autodma = 1; 1383 hwif->autodma = 1;
1335 hwif->drives[0].autodma = hwif->autodma; 1384 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
1336 hwif->drives[1].autodma = hwif->autodma;
1337} 1385}
1338 1386
1339static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase) 1387static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
1340{ 1388{
1341 struct hpt_info *info = ide_get_hwifdata(hwif); 1389 struct pci_dev *dev = hwif->pci_dev;
1342 u8 masterdma = 0, slavedma = 0; 1390 u8 masterdma = 0, slavedma = 0;
1343 u8 dma_new = 0, dma_old = 0; 1391 u8 dma_new = 0, dma_old = 0;
1344 u8 primary = hwif->channel ? 0x4b : 0x43;
1345 u8 secondary = hwif->channel ? 0x4f : 0x47;
1346 unsigned long flags; 1392 unsigned long flags;
1347 1393
1348 if (!dmabase) 1394 if (!dmabase)
1349 return; 1395 return;
1350 1396
1351 if(info->speed == NULL) { 1397 dma_old = hwif->INB(dmabase + 2);
1352 printk(KERN_WARNING "hpt366: no known IDE timings, disabling DMA.\n");
1353 return;
1354 }
1355
1356 dma_old = hwif->INB(dmabase+2);
1357 1398
1358 local_irq_save(flags); 1399 local_irq_save(flags);
1359 1400
1360 dma_new = dma_old; 1401 dma_new = dma_old;
1361 pci_read_config_byte(hwif->pci_dev, primary, &masterdma); 1402 pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
1362 pci_read_config_byte(hwif->pci_dev, secondary, &slavedma); 1403 pci_read_config_byte(dev, hwif->channel ? 0x4f : 0x47, &slavedma);
1363 1404
1364 if (masterdma & 0x30) dma_new |= 0x20; 1405 if (masterdma & 0x30) dma_new |= 0x20;
1365 if (slavedma & 0x30) dma_new |= 0x40; 1406 if ( slavedma & 0x30) dma_new |= 0x40;
1366 if (dma_new != dma_old) 1407 if (dma_new != dma_old)
1367 hwif->OUTB(dma_new, dmabase+2); 1408 hwif->OUTB(dma_new, dmabase + 2);
1368 1409
1369 local_irq_restore(flags); 1410 local_irq_restore(flags);
1370 1411
1371 ide_setup_dma(hwif, dmabase, 8); 1412 ide_setup_dma(hwif, dmabase, 8);
1372} 1413}
1373 1414
1374/*
1375 * We "borrow" this hook in order to set the data structures
1376 * up early enough before dma or init_hwif calls are made.
1377 */
1378
1379static void __devinit init_iops_hpt366(ide_hwif_t *hwif)
1380{
1381 struct hpt_info *info = kzalloc(sizeof(struct hpt_info), GFP_KERNEL);
1382 struct pci_dev *dev = hwif->pci_dev;
1383 u16 did = dev->device;
1384 u8 rid = 0;
1385
1386 if(info == NULL) {
1387 printk(KERN_WARNING "hpt366: out of memory.\n");
1388 return;
1389 }
1390 ide_set_hwifdata(hwif, info);
1391
1392 /* Avoid doing the same thing twice. */
1393 if (hwif->channel && hwif->mate) {
1394 memcpy(info, ide_get_hwifdata(hwif->mate), sizeof(struct hpt_info));
1395 return;
1396 }
1397
1398 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rid);
1399
1400 if (( did == PCI_DEVICE_ID_TTI_HPT366 && rid == 6) ||
1401 ((did == PCI_DEVICE_ID_TTI_HPT372 ||
1402 did == PCI_DEVICE_ID_TTI_HPT302 ||
1403 did == PCI_DEVICE_ID_TTI_HPT371) && rid > 1) ||
1404 did == PCI_DEVICE_ID_TTI_HPT372N)
1405 info->flags |= IS_3xxN;
1406
1407 info->revision = hpt_revision(dev);
1408
1409 if (info->revision >= 3)
1410 hpt37x_clocking(hwif);
1411 else
1412 hpt366_clocking(hwif);
1413}
1414
1415static int __devinit init_setup_hpt374(struct pci_dev *dev, ide_pci_device_t *d) 1415static int __devinit init_setup_hpt374(struct pci_dev *dev, ide_pci_device_t *d)
1416{ 1416{
1417 struct pci_dev *findev = NULL; 1417 struct pci_dev *dev2;
1418 1418
1419 if (PCI_FUNC(dev->devfn) & 1) 1419 if (PCI_FUNC(dev->devfn) & 1)
1420 return -ENODEV; 1420 return -ENODEV;
1421 1421
1422 while ((findev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, findev)) != NULL) { 1422 pci_set_drvdata(dev, &hpt374);
1423 if ((findev->vendor == dev->vendor) && 1423
1424 (findev->device == dev->device) && 1424 if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) {
1425 ((findev->devfn - dev->devfn) == 1) && 1425 int ret;
1426 (PCI_FUNC(findev->devfn) & 1)) { 1426
1427 if (findev->irq != dev->irq) { 1427 pci_set_drvdata(dev2, &hpt374);
1428 /* FIXME: we need a core pci_set_interrupt() */ 1428
1429 findev->irq = dev->irq; 1429 if (dev2->irq != dev->irq) {
1430 printk(KERN_WARNING "%s: pci-config space interrupt " 1430 /* FIXME: we need a core pci_set_interrupt() */
1431 "fixed.\n", d->name); 1431 dev2->irq = dev->irq;
1432 } 1432 printk(KERN_WARNING "%s: PCI config space interrupt "
1433 return ide_setup_pci_devices(dev, findev, d); 1433 "fixed.\n", d->name);
1434 } 1434 }
1435 ret = ide_setup_pci_devices(dev, dev2, d);
1436 if (ret < 0)
1437 pci_dev_put(dev2);
1438 return ret;
1435 } 1439 }
1436 return ide_setup_pci_device(dev, d); 1440 return ide_setup_pci_device(dev, d);
1437} 1441}
1438 1442
1439static int __devinit init_setup_hpt37x(struct pci_dev *dev, ide_pci_device_t *d) 1443static int __devinit init_setup_hpt372n(struct pci_dev *dev, ide_pci_device_t *d)
1440{ 1444{
1445 pci_set_drvdata(dev, &hpt372n);
1446
1441 return ide_setup_pci_device(dev, d); 1447 return ide_setup_pci_device(dev, d);
1442} 1448}
1443 1449
1444static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d) 1450static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
1445{ 1451{
1446 u8 mcr1 = 0; 1452 struct hpt_info *info;
1453 u8 rev = 0, mcr1 = 0;
1454
1455 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1456
1457 if (rev > 1) {
1458 d->name = "HPT371N";
1459
1460 info = &hpt371n;
1461 } else
1462 info = &hpt371;
1447 1463
1448 /* 1464 /*
1449 * HPT371 chips physically have only one channel, the secondary one, 1465 * HPT371 chips physically have only one channel, the secondary one,
@@ -1453,59 +1469,94 @@ static int __devinit init_setup_hpt371(struct pci_dev *dev, ide_pci_device_t *d)
1453 */ 1469 */
1454 pci_read_config_byte(dev, 0x50, &mcr1); 1470 pci_read_config_byte(dev, 0x50, &mcr1);
1455 if (mcr1 & 0x04) 1471 if (mcr1 & 0x04)
1456 pci_write_config_byte(dev, 0x50, (mcr1 & ~0x04)); 1472 pci_write_config_byte(dev, 0x50, mcr1 & ~0x04);
1473
1474 pci_set_drvdata(dev, info);
1475
1476 return ide_setup_pci_device(dev, d);
1477}
1478
1479static int __devinit init_setup_hpt372a(struct pci_dev *dev, ide_pci_device_t *d)
1480{
1481 struct hpt_info *info;
1482 u8 rev = 0;
1483
1484 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1485
1486 if (rev > 1) {
1487 d->name = "HPT372N";
1488
1489 info = &hpt372n;
1490 } else
1491 info = &hpt372a;
1492 pci_set_drvdata(dev, info);
1493
1494 return ide_setup_pci_device(dev, d);
1495}
1496
1497static int __devinit init_setup_hpt302(struct pci_dev *dev, ide_pci_device_t *d)
1498{
1499 struct hpt_info *info;
1500 u8 rev = 0;
1501
1502 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1503
1504 if (rev > 1) {
1505 d->name = "HPT302N";
1506
1507 info = &hpt302n;
1508 } else
1509 info = &hpt302;
1510 pci_set_drvdata(dev, info);
1457 1511
1458 return ide_setup_pci_device(dev, d); 1512 return ide_setup_pci_device(dev, d);
1459} 1513}
1460 1514
1461static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d) 1515static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
1462{ 1516{
1463 struct pci_dev *findev = NULL; 1517 struct pci_dev *dev2;
1464 u8 pin1 = 0, pin2 = 0; 1518 u8 rev = 0;
1465 unsigned int class_rev; 1519 static char *chipset_names[] = { "HPT366", "HPT366", "HPT368",
1466 char *chipset_names[] = {"HPT366", "HPT366", "HPT368", 1520 "HPT370", "HPT370A", "HPT372",
1467 "HPT370", "HPT370A", "HPT372", 1521 "HPT372N" };
1468 "HPT372N" }; 1522 static struct hpt_info *info[] = { &hpt36x, &hpt36x, &hpt36x,
1523 &hpt370, &hpt370a, &hpt372,
1524 &hpt372n };
1469 1525
1470 if (PCI_FUNC(dev->devfn) & 1) 1526 if (PCI_FUNC(dev->devfn) & 1)
1471 return -ENODEV; 1527 return -ENODEV;
1472 1528
1473 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); 1529 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1474 class_rev &= 0xff;
1475 1530
1476 if(dev->device == PCI_DEVICE_ID_TTI_HPT372N) 1531 if (rev > 6)
1477 class_rev = 6; 1532 rev = 6;
1478 1533
1479 if(class_rev <= 6) 1534 d->name = chipset_names[rev];
1480 d->name = chipset_names[class_rev]; 1535
1481 1536 pci_set_drvdata(dev, info[rev]);
1482 switch(class_rev) { 1537
1483 case 6: 1538 if (rev > 2)
1484 case 5: 1539 goto init_single;
1485 case 4:
1486 case 3:
1487 goto init_single;
1488 default:
1489 break;
1490 }
1491 1540
1492 d->channels = 1; 1541 d->channels = 1;
1493 1542
1494 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1); 1543 if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) {
1495 while ((findev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, findev)) != NULL) { 1544 u8 pin1 = 0, pin2 = 0;
1496 if ((findev->vendor == dev->vendor) && 1545 int ret;
1497 (findev->device == dev->device) && 1546
1498 ((findev->devfn - dev->devfn) == 1) && 1547 pci_set_drvdata(dev2, info[rev]);
1499 (PCI_FUNC(findev->devfn) & 1)) { 1548
1500 pci_read_config_byte(findev, PCI_INTERRUPT_PIN, &pin2); 1549 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin1);
1501 if ((pin1 != pin2) && (dev->irq == findev->irq)) { 1550 pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin2);
1502 d->bootable = ON_BOARD; 1551 if (pin1 != pin2 && dev->irq == dev2->irq) {
1503 printk("%s: onboard version of chipset, " 1552 d->bootable = ON_BOARD;
1504 "pin1=%d pin2=%d\n", d->name, 1553 printk("%s: onboard version of chipset, pin1=%d pin2=%d\n",
1505 pin1, pin2); 1554 d->name, pin1, pin2);
1506 }
1507 return ide_setup_pci_devices(dev, findev, d);
1508 } 1555 }
1556 ret = ide_setup_pci_devices(dev, dev2, d);
1557 if (ret < 0)
1558 pci_dev_put(dev2);
1559 return ret;
1509 } 1560 }
1510init_single: 1561init_single:
1511 return ide_setup_pci_device(dev, d); 1562 return ide_setup_pci_device(dev, d);
@@ -1516,64 +1567,68 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1516 .name = "HPT366", 1567 .name = "HPT366",
1517 .init_setup = init_setup_hpt366, 1568 .init_setup = init_setup_hpt366,
1518 .init_chipset = init_chipset_hpt366, 1569 .init_chipset = init_chipset_hpt366,
1519 .init_iops = init_iops_hpt366,
1520 .init_hwif = init_hwif_hpt366, 1570 .init_hwif = init_hwif_hpt366,
1521 .init_dma = init_dma_hpt366, 1571 .init_dma = init_dma_hpt366,
1522 .channels = 2, 1572 .channels = 2,
1523 .autodma = AUTODMA, 1573 .autodma = AUTODMA,
1574 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1524 .bootable = OFF_BOARD, 1575 .bootable = OFF_BOARD,
1525 .extra = 240 1576 .extra = 240
1526 },{ /* 1 */ 1577 },{ /* 1 */
1527 .name = "HPT372A", 1578 .name = "HPT372A",
1528 .init_setup = init_setup_hpt37x, 1579 .init_setup = init_setup_hpt372a,
1529 .init_chipset = init_chipset_hpt366, 1580 .init_chipset = init_chipset_hpt366,
1530 .init_iops = init_iops_hpt366,
1531 .init_hwif = init_hwif_hpt366, 1581 .init_hwif = init_hwif_hpt366,
1532 .init_dma = init_dma_hpt366, 1582 .init_dma = init_dma_hpt366,
1533 .channels = 2, 1583 .channels = 2,
1534 .autodma = AUTODMA, 1584 .autodma = AUTODMA,
1585 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1535 .bootable = OFF_BOARD, 1586 .bootable = OFF_BOARD,
1587 .extra = 240
1536 },{ /* 2 */ 1588 },{ /* 2 */
1537 .name = "HPT302", 1589 .name = "HPT302",
1538 .init_setup = init_setup_hpt37x, 1590 .init_setup = init_setup_hpt302,
1539 .init_chipset = init_chipset_hpt366, 1591 .init_chipset = init_chipset_hpt366,
1540 .init_iops = init_iops_hpt366,
1541 .init_hwif = init_hwif_hpt366, 1592 .init_hwif = init_hwif_hpt366,
1542 .init_dma = init_dma_hpt366, 1593 .init_dma = init_dma_hpt366,
1543 .channels = 2, 1594 .channels = 2,
1544 .autodma = AUTODMA, 1595 .autodma = AUTODMA,
1596 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1545 .bootable = OFF_BOARD, 1597 .bootable = OFF_BOARD,
1598 .extra = 240
1546 },{ /* 3 */ 1599 },{ /* 3 */
1547 .name = "HPT371", 1600 .name = "HPT371",
1548 .init_setup = init_setup_hpt371, 1601 .init_setup = init_setup_hpt371,
1549 .init_chipset = init_chipset_hpt366, 1602 .init_chipset = init_chipset_hpt366,
1550 .init_iops = init_iops_hpt366,
1551 .init_hwif = init_hwif_hpt366, 1603 .init_hwif = init_hwif_hpt366,
1552 .init_dma = init_dma_hpt366, 1604 .init_dma = init_dma_hpt366,
1553 .channels = 2, 1605 .channels = 2,
1554 .autodma = AUTODMA, 1606 .autodma = AUTODMA,
1555 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1607 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1556 .bootable = OFF_BOARD, 1608 .bootable = OFF_BOARD,
1609 .extra = 240
1557 },{ /* 4 */ 1610 },{ /* 4 */
1558 .name = "HPT374", 1611 .name = "HPT374",
1559 .init_setup = init_setup_hpt374, 1612 .init_setup = init_setup_hpt374,
1560 .init_chipset = init_chipset_hpt366, 1613 .init_chipset = init_chipset_hpt366,
1561 .init_iops = init_iops_hpt366,
1562 .init_hwif = init_hwif_hpt366, 1614 .init_hwif = init_hwif_hpt366,
1563 .init_dma = init_dma_hpt366, 1615 .init_dma = init_dma_hpt366,
1564 .channels = 2, /* 4 */ 1616 .channels = 2, /* 4 */
1565 .autodma = AUTODMA, 1617 .autodma = AUTODMA,
1618 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1566 .bootable = OFF_BOARD, 1619 .bootable = OFF_BOARD,
1620 .extra = 240
1567 },{ /* 5 */ 1621 },{ /* 5 */
1568 .name = "HPT372N", 1622 .name = "HPT372N",
1569 .init_setup = init_setup_hpt37x, 1623 .init_setup = init_setup_hpt372n,
1570 .init_chipset = init_chipset_hpt366, 1624 .init_chipset = init_chipset_hpt366,
1571 .init_iops = init_iops_hpt366,
1572 .init_hwif = init_hwif_hpt366, 1625 .init_hwif = init_hwif_hpt366,
1573 .init_dma = init_dma_hpt366, 1626 .init_dma = init_dma_hpt366,
1574 .channels = 2, /* 4 */ 1627 .channels = 2, /* 4 */
1575 .autodma = AUTODMA, 1628 .autodma = AUTODMA,
1629 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1576 .bootable = OFF_BOARD, 1630 .bootable = OFF_BOARD,
1631 .extra = 240
1577 } 1632 }
1578}; 1633};
1579 1634
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
new file mode 100644
index 000000000000..63248b6909fa
--- /dev/null
+++ b/drivers/ide/pci/it8213.c
@@ -0,0 +1,362 @@
1/*
2 * ITE 8213 IDE driver
3 *
4 * Copyright (C) 2006 Jack Lee
5 * Copyright (C) 2006 Alan Cox
6 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
7 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/delay.h>
14#include <linux/hdreg.h>
15#include <linux/ide.h>
16#include <linux/init.h>
17
18#include <asm/io.h>
19
20/*
21 * it8213_ratemask - Compute available modes
22 * @drive: IDE drive
23 *
24 * Compute the available speeds for the devices on the interface. This
25 * is all modes to ATA133 clipped by drive cable setup.
26 */
27
28static u8 it8213_ratemask (ide_drive_t *drive)
29{
30 u8 mode = 4;
31 if (!eighty_ninty_three(drive))
32 mode = min_t(u8, mode, 1);
33 return mode;
34}
35
36/**
37 * it8213_dma_2_pio - return the PIO mode matching DMA
38 * @xfer_rate: transfer speed
39 *
40 * Returns the nearest equivalent PIO timing for the PIO or DMA
41 * mode requested by the controller.
42 */
43
44static u8 it8213_dma_2_pio (u8 xfer_rate) {
45 switch(xfer_rate) {
46 case XFER_UDMA_6:
47 case XFER_UDMA_5:
48 case XFER_UDMA_4:
49 case XFER_UDMA_3:
50 case XFER_UDMA_2:
51 case XFER_UDMA_1:
52 case XFER_UDMA_0:
53 case XFER_MW_DMA_2:
54 case XFER_PIO_4:
55 return 4;
56 case XFER_MW_DMA_1:
57 case XFER_PIO_3:
58 return 3;
59 case XFER_SW_DMA_2:
60 case XFER_PIO_2:
61 return 2;
62 case XFER_MW_DMA_0:
63 case XFER_SW_DMA_1:
64 case XFER_SW_DMA_0:
65 case XFER_PIO_1:
66 case XFER_PIO_0:
67 case XFER_PIO_SLOW:
68 default:
69 return 0;
70 }
71}
72
73/*
74 * it8213_tuneproc - tune a drive
75 * @drive: drive to tune
76 * @pio: desired PIO mode
77 *
78 * Set the interface PIO mode.
79 */
80
81static void it8213_tuneproc (ide_drive_t *drive, u8 pio)
82{
83 ide_hwif_t *hwif = HWIF(drive);
84 struct pci_dev *dev = hwif->pci_dev;
85 int is_slave = drive->dn & 1;
86 int master_port = 0x40;
87 int slave_port = 0x44;
88 unsigned long flags;
89 u16 master_data;
90 u8 slave_data;
91 static DEFINE_SPINLOCK(tune_lock);
92 int control = 0;
93
94 static const u8 timings[][2]= {
95 { 0, 0 },
96 { 0, 0 },
97 { 1, 0 },
98 { 2, 1 },
99 { 2, 3 }, };
100
101 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
102
103 spin_lock_irqsave(&tune_lock, flags);
104 pci_read_config_word(dev, master_port, &master_data);
105
106 if (pio > 1)
107 control |= 1; /* Programmable timing on */
108 if (drive->media != ide_disk)
109 control |= 4; /* ATAPI */
110 if (pio > 2)
111 control |= 2; /* IORDY */
112 if (is_slave) {
113 master_data |= 0x4000;
114 master_data &= ~0x0070;
115 if (pio > 1)
116 master_data = master_data | (control << 4);
117 pci_read_config_byte(dev, slave_port, &slave_data);
118 slave_data = slave_data & 0xf0;
119 slave_data = slave_data | (timings[pio][0] << 2) | timings[pio][1];
120 } else {
121 master_data &= ~0x3307;
122 if (pio > 1)
123 master_data = master_data | control;
124 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
125 }
126 pci_write_config_word(dev, master_port, master_data);
127 if (is_slave)
128 pci_write_config_byte(dev, slave_port, slave_data);
129 spin_unlock_irqrestore(&tune_lock, flags);
130}
131
132/**
133 * it8213_tune_chipset - set controller timings
134 * @drive: Drive to set up
135 * @xferspeed: speed we want to achieve
136 *
137 * Tune the ITE chipset for the desired mode. If we can't achieve
138 * the desired mode then tune for a lower one, but ultimately
139 * make the thing work.
140 */
141
142static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
143{
144
145 ide_hwif_t *hwif = HWIF(drive);
146 struct pci_dev *dev = hwif->pci_dev;
147 u8 maslave = 0x40;
148 u8 speed = ide_rate_filter(it8213_ratemask(drive), xferspeed);
149 int a_speed = 3 << (drive->dn * 4);
150 int u_flag = 1 << drive->dn;
151 int v_flag = 0x01 << drive->dn;
152 int w_flag = 0x10 << drive->dn;
153 int u_speed = 0;
154 u16 reg4042, reg4a;
155 u8 reg48, reg54, reg55;
156
157 pci_read_config_word(dev, maslave, &reg4042);
158 pci_read_config_byte(dev, 0x48, &reg48);
159 pci_read_config_word(dev, 0x4a, &reg4a);
160 pci_read_config_byte(dev, 0x54, &reg54);
161 pci_read_config_byte(dev, 0x55, &reg55);
162
163 switch(speed) {
164 case XFER_UDMA_6:
165 case XFER_UDMA_4:
166 case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break;
167 case XFER_UDMA_5:
168 case XFER_UDMA_3:
169 case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break;
170 case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break;
171 break;
172 case XFER_MW_DMA_2:
173 case XFER_MW_DMA_1:
174 case XFER_SW_DMA_2:
175 break;
176 case XFER_PIO_4:
177 case XFER_PIO_3:
178 case XFER_PIO_2:
179 case XFER_PIO_1:
180 case XFER_PIO_0:
181 break;
182 default:
183 return -1;
184 }
185
186 if (speed >= XFER_UDMA_0) {
187 if (!(reg48 & u_flag))
188 pci_write_config_byte(dev, 0x48, reg48 | u_flag);
189 if (speed >= XFER_UDMA_5) {
190 pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
191 } else {
192 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
193 }
194
195 if ((reg4a & a_speed) != u_speed)
196 pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
197 if (speed > XFER_UDMA_2) {
198 if (!(reg54 & v_flag))
199 pci_write_config_byte(dev, 0x54, reg54 | v_flag);
200 } else
201 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
202 } else {
203 if (reg48 & u_flag)
204 pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
205 if (reg4a & a_speed)
206 pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
207 if (reg54 & v_flag)
208 pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
209 if (reg55 & w_flag)
210 pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
211 }
212 it8213_tuneproc(drive, it8213_dma_2_pio(speed));
213 return ide_config_drive_speed(drive, speed);
214}
215
216/*
217 * config_chipset_for_dma - configure for DMA
218 * @drive: drive to configure
219 *
220 * Called by the IDE layer when it wants the timings set up.
221 */
222
223static int config_chipset_for_dma (ide_drive_t *drive)
224{
225 u8 speed = ide_dma_speed(drive, it8213_ratemask(drive));
226
227 if (!speed)
228 return 0;
229
230 it8213_tune_chipset(drive, speed);
231
232 return ide_dma_enable(drive);
233}
234
235/**
236 * it8213_configure_drive_for_dma - set up for DMA transfers
237 * @drive: drive we are going to set up
238 *
239 * Set up the drive for DMA, tune the controller and drive as
240 * required. If the drive isn't suitable for DMA or we hit
241 * other problems then we will drop down to PIO and set up
242 * PIO appropriately
243 */
244
245static int it8213_config_drive_for_dma (ide_drive_t *drive)
246{
247 ide_hwif_t *hwif = drive->hwif;
248
249 if (ide_use_dma(drive)) {
250 if (config_chipset_for_dma(drive))
251 return hwif->ide_dma_on(drive);
252 }
253
254 hwif->speedproc(drive, XFER_PIO_0
255 + ide_get_best_pio_mode(drive, 255, 4, NULL));
256
257 return hwif->ide_dma_off_quietly(drive);
258}
259
260/**
261 * init_hwif_it8213 - set up hwif structs
262 * @hwif: interface to set up
263 *
264 * We do the basic set up of the interface structure. The IT8212
265 * requires several custom handlers so we override the default
266 * ide DMA handlers appropriately
267 */
268
269static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
270{
271 u8 reg42h = 0, ata66 = 0;
272
273 hwif->speedproc = &it8213_tune_chipset;
274 hwif->tuneproc = &it8213_tuneproc;
275
276 hwif->autodma = 0;
277
278 hwif->drives[0].autotune = 1;
279 hwif->drives[1].autotune = 1;
280
281 if (!hwif->dma_base)
282 return;
283
284 hwif->atapi_dma = 1;
285 hwif->ultra_mask = 0x7f;
286 hwif->mwdma_mask = 0x06;
287 hwif->swdma_mask = 0x04;
288
289 pci_read_config_byte(hwif->pci_dev, 0x42, &reg42h);
290 ata66 = (reg42h & 0x02) ? 0 : 1;
291
292 hwif->ide_dma_check = &it8213_config_drive_for_dma;
293 if (!(hwif->udma_four))
294 hwif->udma_four = ata66;
295
296 /*
297 * The BIOS often doesn't set up DMA on this controller
298 * so we always do it.
299 */
300 if (!noautodma)
301 hwif->autodma = 1;
302
303 hwif->drives[0].autodma = hwif->autodma;
304 hwif->drives[1].autodma = hwif->autodma;
305}
306
307
308#define DECLARE_ITE_DEV(name_str) \
309 { \
310 .name = name_str, \
311 .init_hwif = init_hwif_it8213, \
312 .channels = 1, \
313 .autodma = AUTODMA, \
314 .enablebits = {{0x41,0x80,0x80}}, \
315 .bootable = ON_BOARD, \
316 }
317
318static ide_pci_device_t it8213_chipsets[] __devinitdata = {
319 /* 0 */ DECLARE_ITE_DEV("IT8213"),
320};
321
322
323/**
324 * it8213_init_one - pci layer discovery entry
325 * @dev: PCI device
326 * @id: ident table entry
327 *
328 * Called by the PCI code when it finds an ITE8213 controller. As
329 * this device follows the standard interfaces we can use the
330 * standard helper functions to do almost all the work for us.
331 */
332
333static int __devinit it8213_init_one(struct pci_dev *dev, const struct pci_device_id *id)
334{
335 ide_setup_pci_device(dev, &it8213_chipsets[id->driver_data]);
336 return 0;
337}
338
339
340static struct pci_device_id it8213_pci_tbl[] = {
341 { PCI_VENDOR_ID_ITE, 0x8213, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
342 { 0, },
343};
344
345MODULE_DEVICE_TABLE(pci, it8213_pci_tbl);
346
347static struct pci_driver driver = {
348 .name = "ITE8213_IDE",
349 .id_table = it8213_pci_tbl,
350 .probe = it8213_init_one,
351};
352
353static int __init it8213_ide_init(void)
354{
355 return ide_pci_register_driver(&driver);
356}
357
358module_init(it8213_ide_init);
359
360MODULE_AUTHOR("Jack Lee, Alan Cox");
361MODULE_DESCRIPTION("PCI driver module for the ITE 8213");
362MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 77a9aaa7dab9..236a03144a27 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -92,26 +92,6 @@ static u8 pdcnew_ratemask(ide_drive_t *drive)
92 return mode; 92 return mode;
93} 93}
94 94
95static int check_in_drive_lists(ide_drive_t *drive, const char **list)
96{
97 struct hd_driveid *id = drive->id;
98
99 if (pdc_quirk_drives == list) {
100 while (*list) {
101 if (strstr(id->model, *list++)) {
102 return 2;
103 }
104 }
105 } else {
106 while (*list) {
107 if (!strcmp(*list++,id->model)) {
108 return 1;
109 }
110 }
111 }
112 return 0;
113}
114
115/** 95/**
116 * get_indexed_reg - Get indexed register 96 * get_indexed_reg - Get indexed register
117 * @hwif: for the port address 97 * @hwif: for the port address
@@ -249,13 +229,6 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed)
249 return err; 229 return err;
250} 230}
251 231
252/* 0 1 2 3 4 5 6 7 8
253 * 960, 480, 390, 300, 240, 180, 120, 90, 60
254 * 180, 150, 120, 90, 60
255 * DMA_Speed
256 * 180, 120, 90, 90, 90, 60, 30
257 * 11, 5, 4, 3, 2, 1, 0
258 */
259static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio) 232static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio)
260{ 233{
261 pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 234 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
@@ -313,12 +286,10 @@ static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive)
313 286
314 drive->init_speed = 0; 287 drive->init_speed = 0;
315 288
316 if (id && (id->capability & 1) && drive->autodma) { 289 if ((id->capability & 1) && drive->autodma) {
317 290
318 if (ide_use_dma(drive)) { 291 if (ide_use_dma(drive) && config_chipset_for_dma(drive))
319 if (config_chipset_for_dma(drive)) 292 return hwif->ide_dma_on(drive);
320 return hwif->ide_dma_on(drive);
321 }
322 293
323 goto fast_ata_pio; 294 goto fast_ata_pio;
324 295
@@ -333,21 +304,12 @@ fast_ata_pio:
333 304
334static int pdcnew_quirkproc(ide_drive_t *drive) 305static int pdcnew_quirkproc(ide_drive_t *drive)
335{ 306{
336 return check_in_drive_lists(drive, pdc_quirk_drives); 307 const char **list, *model = drive->id->model;
337}
338 308
339static int pdcnew_ide_dma_lostirq(ide_drive_t *drive) 309 for (list = pdc_quirk_drives; *list != NULL; list++)
340{ 310 if (strstr(model, *list) != NULL)
341 if (HWIF(drive)->resetproc != NULL) 311 return 2;
342 HWIF(drive)->resetproc(drive); 312 return 0;
343 return __ide_dma_lostirq(drive);
344}
345
346static int pdcnew_ide_dma_timeout(ide_drive_t *drive)
347{
348 if (HWIF(drive)->resetproc != NULL)
349 HWIF(drive)->resetproc(drive);
350 return __ide_dma_timeout(drive);
351} 313}
352 314
353static void pdcnew_reset(ide_drive_t *drive) 315static void pdcnew_reset(ide_drive_t *drive)
@@ -599,8 +561,6 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
599 hwif->err_stops_fifo = 1; 561 hwif->err_stops_fifo = 1;
600 562
601 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate; 563 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate;
602 hwif->ide_dma_lostirq = &pdcnew_ide_dma_lostirq;
603 hwif->ide_dma_timeout = &pdcnew_ide_dma_timeout;
604 564
605 if (!hwif->udma_four) 565 if (!hwif->udma_four)
606 hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1; 566 hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1;
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 143239c093d5..730e8d1ec2f5 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -123,26 +123,6 @@ static u8 pdc202xx_ratemask (ide_drive_t *drive)
123 return mode; 123 return mode;
124} 124}
125 125
126static int check_in_drive_lists (ide_drive_t *drive, const char **list)
127{
128 struct hd_driveid *id = drive->id;
129
130 if (pdc_quirk_drives == list) {
131 while (*list) {
132 if (strstr(id->model, *list++)) {
133 return 2;
134 }
135 }
136 } else {
137 while (*list) {
138 if (!strcmp(*list++,id->model)) {
139 return 1;
140 }
141 }
142 }
143 return 0;
144}
145
146static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) 126static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed)
147{ 127{
148 ide_hwif_t *hwif = HWIF(drive); 128 ide_hwif_t *hwif = HWIF(drive);
@@ -377,7 +357,12 @@ fast_ata_pio:
377 357
378static int pdc202xx_quirkproc (ide_drive_t *drive) 358static int pdc202xx_quirkproc (ide_drive_t *drive)
379{ 359{
380 return ((int) check_in_drive_lists(drive, pdc_quirk_drives)); 360 const char **list, *model = drive->id->model;
361
362 for (list = pdc_quirk_drives; *list != NULL; list++)
363 if (strstr(model, *list) != NULL)
364 return 2;
365 return 0;
381} 366}
382 367
383static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) 368static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index edb37f3d558d..52cfc2ac22c1 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/piix.c Version 0.45 May 12, 2006 2 * linux/drivers/ide/pci/piix.c Version 0.46 December 3, 2006
3 * 3 *
4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
@@ -163,7 +163,7 @@ static u8 piix_ratemask (ide_drive_t *drive)
163 * if the drive cannot see an 80pin cable. 163 * if the drive cannot see an 80pin cable.
164 */ 164 */
165 if (!eighty_ninty_three(drive)) 165 if (!eighty_ninty_three(drive))
166 mode = min(mode, (u8)1); 166 mode = min_t(u8, mode, 1);
167 return mode; 167 return mode;
168} 168}
169 169
@@ -216,7 +216,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
216{ 216{
217 ide_hwif_t *hwif = HWIF(drive); 217 ide_hwif_t *hwif = HWIF(drive);
218 struct pci_dev *dev = hwif->pci_dev; 218 struct pci_dev *dev = hwif->pci_dev;
219 int is_slave = (&hwif->drives[1] == drive); 219 int is_slave = drive->dn & 1;
220 int master_port = hwif->channel ? 0x42 : 0x40; 220 int master_port = hwif->channel ? 0x42 : 0x40;
221 int slave_port = 0x44; 221 int slave_port = 0x44;
222 unsigned long flags; 222 unsigned long flags;
@@ -225,7 +225,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
225 static DEFINE_SPINLOCK(tune_lock); 225 static DEFINE_SPINLOCK(tune_lock);
226 int control = 0; 226 int control = 0;
227 227
228 /* ISP RTC */ 228 /* ISP RTC */
229 static const u8 timings[][2]= { 229 static const u8 timings[][2]= {
230 { 0, 0 }, 230 { 0, 0 },
231 { 0, 0 }, 231 { 0, 0 },
@@ -233,7 +233,7 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
233 { 2, 1 }, 233 { 2, 1 },
234 { 2, 3 }, }; 234 { 2, 3 }, };
235 235
236 pio = ide_get_best_pio_mode(drive, pio, 5, NULL); 236 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
237 237
238 /* 238 /*
239 * Master vs slave is synchronized above us but the slave register is 239 * Master vs slave is synchronized above us but the slave register is
@@ -243,25 +243,24 @@ static void piix_tune_drive (ide_drive_t *drive, u8 pio)
243 spin_lock_irqsave(&tune_lock, flags); 243 spin_lock_irqsave(&tune_lock, flags);
244 pci_read_config_word(dev, master_port, &master_data); 244 pci_read_config_word(dev, master_port, &master_data);
245 245
246 if (pio >= 2) 246 if (pio > 1)
247 control |= 1; /* Programmable timing on */ 247 control |= 1; /* Programmable timing on */
248 if (drive->media == ide_disk) 248 if (drive->media == ide_disk)
249 control |= 4; /* Prefetch, post write */ 249 control |= 4; /* Prefetch, post write */
250 if (pio >= 3) 250 if (pio > 2)
251 control |= 2; /* IORDY */ 251 control |= 2; /* IORDY */
252 if (is_slave) { 252 if (is_slave) {
253 master_data = master_data | 0x4000; 253 master_data |= 0x4000;
254 master_data &= ~0x0070;
254 if (pio > 1) { 255 if (pio > 1) {
255 /* enable PPE, IE and TIME */ 256 /* enable PPE, IE and TIME */
256 master_data = master_data | (control << 4); 257 master_data = master_data | (control << 4);
257 } else {
258 master_data &= ~0x0070;
259 } 258 }
260 pci_read_config_byte(dev, slave_port, &slave_data); 259 pci_read_config_byte(dev, slave_port, &slave_data);
261 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); 260 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0);
262 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); 261 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0));
263 } else { 262 } else {
264 master_data = master_data & 0xccf8; 263 master_data &= ~0x3307;
265 if (pio > 1) { 264 if (pio > 1) {
266 /* enable PPE, IE and TIME */ 265 /* enable PPE, IE and TIME */
267 master_data = master_data | control; 266 master_data = master_data | control;
@@ -539,13 +538,19 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = {
539 /* 0 */ DECLARE_PIIX_DEV("PIIXa"), 538 /* 0 */ DECLARE_PIIX_DEV("PIIXa"),
540 /* 1 */ DECLARE_PIIX_DEV("PIIXb"), 539 /* 1 */ DECLARE_PIIX_DEV("PIIXb"),
541 540
542 { /* 2 */ 541 /* 2 */
542 { /*
543 * MPIIX actually has only a single IDE channel mapped to
544 * the primary or secondary ports depending on the value
545 * of the bit 14 of the IDETIM register at offset 0x6c
546 */
543 .name = "MPIIX", 547 .name = "MPIIX",
544 .init_hwif = init_hwif_piix, 548 .init_hwif = init_hwif_piix,
545 .channels = 2, 549 .channels = 2,
546 .autodma = NODMA, 550 .autodma = NODMA,
547 .enablebits = {{0x6D,0x80,0x80}, {0x6F,0x80,0x80}}, 551 .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
548 .bootable = ON_BOARD, 552 .bootable = ON_BOARD,
553 .flags = IDEPCI_FLAG_ISA_PORTS
549 }, 554 },
550 555
551 /* 3 */ DECLARE_PIIX_DEV("PIIX3"), 556 /* 3 */ DECLARE_PIIX_DEV("PIIX3"),
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 90e79c0844d2..2663ddbd9b67 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/slc90e66.c Version 0.12 May 12, 2006 2 * linux/drivers/ide/pci/slc90e66.c Version 0.13 December 30, 2006
3 * 3 *
4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com> 5 * Copyright (C) 2006 MontaVista Software, Inc. <source@mvista.com>
@@ -26,7 +26,7 @@ static u8 slc90e66_ratemask (ide_drive_t *drive)
26 u8 mode = 2; 26 u8 mode = 2;
27 27
28 if (!eighty_ninty_three(drive)) 28 if (!eighty_ninty_three(drive))
29 mode = min(mode, (u8)1); 29 mode = min_t(u8, mode, 1);
30 return mode; 30 return mode;
31} 31}
32 32
@@ -65,36 +65,47 @@ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio)
65{ 65{
66 ide_hwif_t *hwif = HWIF(drive); 66 ide_hwif_t *hwif = HWIF(drive);
67 struct pci_dev *dev = hwif->pci_dev; 67 struct pci_dev *dev = hwif->pci_dev;
68 int is_slave = (&hwif->drives[1] == drive); 68 int is_slave = drive->dn & 1;
69 int master_port = hwif->channel ? 0x42 : 0x40; 69 int master_port = hwif->channel ? 0x42 : 0x40;
70 int slave_port = 0x44; 70 int slave_port = 0x44;
71 unsigned long flags; 71 unsigned long flags;
72 u16 master_data; 72 u16 master_data;
73 u8 slave_data; 73 u8 slave_data;
74 /* ISP RTC */ 74 int control = 0;
75 /* ISP RTC */
75 static const u8 timings[][2]= { 76 static const u8 timings[][2]= {
76 { 0, 0 }, 77 { 0, 0 },
77 { 0, 0 }, 78 { 0, 0 },
78 { 1, 0 }, 79 { 1, 0 },
79 { 2, 1 }, 80 { 2, 1 },
80 { 2, 3 }, }; 81 { 2, 3 }, };
81 82
82 pio = ide_get_best_pio_mode(drive, pio, 5, NULL); 83 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
83 spin_lock_irqsave(&ide_lock, flags); 84 spin_lock_irqsave(&ide_lock, flags);
84 pci_read_config_word(dev, master_port, &master_data); 85 pci_read_config_word(dev, master_port, &master_data);
86
87 if (pio > 1)
88 control |= 1; /* Programmable timing on */
89 if (drive->media == ide_disk)
90 control |= 4; /* Prefetch, post write */
91 if (pio > 2)
92 control |= 2; /* IORDY */
85 if (is_slave) { 93 if (is_slave) {
86 master_data = master_data | 0x4000; 94 master_data |= 0x4000;
87 if (pio > 1) 95 master_data &= ~0x0070;
96 if (pio > 1) {
88 /* enable PPE, IE and TIME */ 97 /* enable PPE, IE and TIME */
89 master_data = master_data | 0x0070; 98 master_data = master_data | (control << 4);
99 }
90 pci_read_config_byte(dev, slave_port, &slave_data); 100 pci_read_config_byte(dev, slave_port, &slave_data);
91 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); 101 slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0);
92 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); 102 slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0));
93 } else { 103 } else {
94 master_data = master_data & 0xccf8; 104 master_data &= ~0x3307;
95 if (pio > 1) 105 if (pio > 1) {
96 /* enable PPE, IE and TIME */ 106 /* enable PPE, IE and TIME */
97 master_data = master_data | 0x0007; 107 master_data = master_data | control;
108 }
98 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8); 109 master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8);
99 } 110 }
100 pci_write_config_word(dev, master_port, master_data); 111 pci_write_config_word(dev, master_port, master_data);
@@ -173,7 +184,7 @@ static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive)
173 184
174 drive->init_speed = 0; 185 drive->init_speed = 0;
175 186
176 if (id && (id->capability & 1) && drive->autodma) { 187 if ((id->capability & 1) && drive->autodma) {
177 188
178 if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive)) 189 if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive))
179 return hwif->ide_dma_on(drive); 190 return hwif->ide_dma_on(drive);
@@ -201,7 +212,7 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
201 hwif->irq = hwif->channel ? 15 : 14; 212 hwif->irq = hwif->channel ? 15 : 14;
202 213
203 hwif->speedproc = &slc90e66_tune_chipset; 214 hwif->speedproc = &slc90e66_tune_chipset;
204 hwif->tuneproc = &slc90e66_tune_drive; 215 hwif->tuneproc = &slc90e66_tune_drive;
205 216
206 pci_read_config_byte(hwif->pci_dev, 0x47, &reg47); 217 pci_read_config_byte(hwif->pci_dev, 0x47, &reg47);
207 218
@@ -213,14 +224,16 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
213 224
214 hwif->atapi_dma = 1; 225 hwif->atapi_dma = 1;
215 hwif->ultra_mask = 0x1f; 226 hwif->ultra_mask = 0x1f;
216 hwif->mwdma_mask = 0x07; 227 hwif->mwdma_mask = 0x06;
217 hwif->swdma_mask = 0x07; 228 hwif->swdma_mask = 0x04;
218 229
219 if (!(hwif->udma_four)) 230 if (!hwif->udma_four) {
220 /* bit[0(1)]: 0:80, 1:40 */ 231 /* bit[0(1)]: 0:80, 1:40 */
221 hwif->udma_four = (reg47 & mask) ? 0 : 1; 232 hwif->udma_four = (reg47 & mask) ? 0 : 1;
233 }
222 234
223 hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate; 235 hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate;
236
224 if (!noautodma) 237 if (!noautodma)
225 hwif->autodma = 1; 238 hwif->autodma = 1;
226 hwif->drives[0].autodma = hwif->autodma; 239 hwif->drives[0].autodma = hwif->autodma;
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
new file mode 100644
index 000000000000..2ad72bbda342
--- /dev/null
+++ b/drivers/ide/pci/tc86c001.c
@@ -0,0 +1,309 @@
1/*
2 * drivers/ide/pci/tc86c001.c Version 1.00 Dec 12, 2006
3 *
4 * Copyright (C) 2002 Toshiba Corporation
5 * Copyright (C) 2005-2006 MontaVista Software, Inc. <source@mvista.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <linux/ide.h>
15
16static inline u8 tc86c001_ratemask(ide_drive_t *drive)
17{
18 return eighty_ninty_three(drive) ? 2 : 1;
19}
20
21static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed)
22{
23 ide_hwif_t *hwif = HWIF(drive);
24 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
25 u16 mode, scr = hwif->INW(scr_port);
26
27 speed = ide_rate_filter(tc86c001_ratemask(drive), speed);
28
29 switch (speed) {
30 case XFER_UDMA_4: mode = 0x00c0; break;
31 case XFER_UDMA_3: mode = 0x00b0; break;
32 case XFER_UDMA_2: mode = 0x00a0; break;
33 case XFER_UDMA_1: mode = 0x0090; break;
34 case XFER_UDMA_0: mode = 0x0080; break;
35 case XFER_MW_DMA_2: mode = 0x0070; break;
36 case XFER_MW_DMA_1: mode = 0x0060; break;
37 case XFER_MW_DMA_0: mode = 0x0050; break;
38 case XFER_PIO_4: mode = 0x0400; break;
39 case XFER_PIO_3: mode = 0x0300; break;
40 case XFER_PIO_2: mode = 0x0200; break;
41 case XFER_PIO_1: mode = 0x0100; break;
42 case XFER_PIO_0:
43 default: mode = 0x0000; break;
44 }
45
46 scr &= (speed < XFER_MW_DMA_0) ? 0xf8ff : 0xff0f;
47 scr |= mode;
48 hwif->OUTW(scr, scr_port);
49
50 return ide_config_drive_speed(drive, speed);
51}
52
53static void tc86c001_tune_drive(ide_drive_t *drive, u8 pio)
54{
55 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
56 (void) tc86c001_tune_chipset(drive, XFER_PIO_0 + pio);
57}
58
59/*
60 * HACKITY HACK
61 *
62 * This is a workaround for the limitation 5 of the TC86C001 IDE controller:
63 * if a DMA transfer terminates prematurely, the controller leaves the device's
64 * interrupt request (INTRQ) pending and does not generate a PCI interrupt (or
65 * set the interrupt bit in the DMA status register), thus no PCI interrupt
66 * will occur until a DMA transfer has been successfully completed.
67 *
68 * We work around this by initiating dummy, zero-length DMA transfer on
69 * a DMA timeout expiration. I found no better way to do this with the current
70 * IDE core than to temporarily replace a higher level driver's timer expiry
71 * handler with our own backing up to that handler in case our recovery fails.
72 */
73static int tc86c001_timer_expiry(ide_drive_t *drive)
74{
75 ide_hwif_t *hwif = HWIF(drive);
76 ide_expiry_t *expiry = ide_get_hwifdata(hwif);
77 ide_hwgroup_t *hwgroup = HWGROUP(drive);
78 u8 dma_stat = hwif->INB(hwif->dma_status);
79
80 /* Restore a higher level driver's expiry handler first. */
81 hwgroup->expiry = expiry;
82
83 if ((dma_stat & 5) == 1) { /* DMA active and no interrupt */
84 unsigned long sc_base = hwif->config_data;
85 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
86 u8 dma_cmd = hwif->INB(hwif->dma_command);
87
88 printk(KERN_WARNING "%s: DMA interrupt possibly stuck, "
89 "attempting recovery...\n", drive->name);
90
91 /* Stop DMA */
92 hwif->OUTB(dma_cmd & ~0x01, hwif->dma_command);
93
94 /* Setup the dummy DMA transfer */
95 hwif->OUTW(0, sc_base + 0x0a); /* Sector Count */
96 hwif->OUTW(0, twcr_port); /* Transfer Word Count 1 or 2 */
97
98 /* Start the dummy DMA transfer */
99 hwif->OUTB(0x00, hwif->dma_command); /* clear R_OR_WCTR for write */
100 hwif->OUTB(0x01, hwif->dma_command); /* set START_STOPBM */
101
102 /*
103 * If an interrupt was pending, it should come thru shortly.
104 * If not, a higher level driver's expiry handler should
105 * eventually cause some kind of recovery from the DMA stall.
106 */
107 return WAIT_MIN_SLEEP;
108 }
109
110 /* Chain to the restored expiry handler if DMA wasn't active. */
111 if (likely(expiry != NULL))
112 return expiry(drive);
113
114 /* If there was no handler, "emulate" that for ide_timer_expiry()... */
115 return -1;
116}
117
118static void tc86c001_dma_start(ide_drive_t *drive)
119{
120 ide_hwif_t *hwif = HWIF(drive);
121 ide_hwgroup_t *hwgroup = HWGROUP(drive);
122 unsigned long sc_base = hwif->config_data;
123 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
124 unsigned long nsectors = hwgroup->rq->nr_sectors;
125
126 /*
127 * We have to manually load the sector count and size into
128 * the appropriate system control registers for DMA to work
129 * with LBA48 and ATAPI devices...
130 */
131 hwif->OUTW(nsectors, sc_base + 0x0a); /* Sector Count */
132 hwif->OUTW(SECTOR_SIZE / 2, twcr_port); /* Transfer Word Count 1/2 */
133
134 /* Install our timeout expiry hook, saving the current handler... */
135 ide_set_hwifdata(hwif, hwgroup->expiry);
136 hwgroup->expiry = &tc86c001_timer_expiry;
137
138 ide_dma_start(drive);
139}
140
141static int tc86c001_busproc(ide_drive_t *drive, int state)
142{
143 ide_hwif_t *hwif = HWIF(drive);
144 unsigned long sc_base = hwif->config_data;
145 u16 scr1;
146
147 /* System Control 1 Register bit 11 (ATA Hard Reset) read */
148 scr1 = hwif->INW(sc_base + 0x00);
149
150 switch (state) {
151 case BUSSTATE_ON:
152 if (!(scr1 & 0x0800))
153 return 0;
154 scr1 &= ~0x0800;
155
156 hwif->drives[0].failures = hwif->drives[1].failures = 0;
157 break;
158 case BUSSTATE_OFF:
159 if (scr1 & 0x0800)
160 return 0;
161 scr1 |= 0x0800;
162
163 hwif->drives[0].failures = hwif->drives[0].max_failures + 1;
164 hwif->drives[1].failures = hwif->drives[1].max_failures + 1;
165 break;
166 default:
167 return -EINVAL;
168 }
169
170 /* System Control 1 Register bit 11 (ATA Hard Reset) write */
171 hwif->OUTW(scr1, sc_base + 0x00);
172 return 0;
173}
174
175static int config_chipset_for_dma(ide_drive_t *drive)
176{
177 u8 speed = ide_dma_speed(drive, tc86c001_ratemask(drive));
178
179 if (!speed)
180 return 0;
181
182 (void) tc86c001_tune_chipset(drive, speed);
183 return ide_dma_enable(drive);
184}
185
186static int tc86c001_config_drive_xfer_rate(ide_drive_t *drive)
187{
188 ide_hwif_t *hwif = HWIF(drive);
189 struct hd_driveid *id = drive->id;
190
191 if ((id->capability & 1) && drive->autodma) {
192
193 if (ide_use_dma(drive) && config_chipset_for_dma(drive))
194 return hwif->ide_dma_on(drive);
195
196 goto fast_ata_pio;
197
198 } else if ((id->capability & 8) || (id->field_valid & 2)) {
199fast_ata_pio:
200 tc86c001_tune_drive(drive, 255);
201 return hwif->ide_dma_off_quietly(drive);
202 }
203 /* IORDY not supported */
204 return 0;
205}
206
207static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
208{
209 unsigned long sc_base = pci_resource_start(hwif->pci_dev, 5);
210 u16 scr1 = hwif->INW(sc_base + 0x00);;
211
212 /* System Control 1 Register bit 15 (Soft Reset) set */
213 hwif->OUTW(scr1 | 0x8000, sc_base + 0x00);
214
215 /* System Control 1 Register bit 14 (FIFO Reset) set */
216 hwif->OUTW(scr1 | 0x4000, sc_base + 0x00);
217
218 /* System Control 1 Register: reset clear */
219 hwif->OUTW(scr1 & ~0xc000, sc_base + 0x00);
220
221 /* Store the system control register base for convenience... */
222 hwif->config_data = sc_base;
223
224 hwif->tuneproc = &tc86c001_tune_drive;
225 hwif->speedproc = &tc86c001_tune_chipset;
226 hwif->busproc = &tc86c001_busproc;
227
228 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
229
230 if (!hwif->dma_base)
231 return;
232
233 /*
234 * Sector Count Control Register bits 0 and 1 set:
235 * software sets Sector Count Register for master and slave device
236 */
237 hwif->OUTW(0x0003, sc_base + 0x0c);
238
239 /* Sector Count Register limit */
240 hwif->rqsize = 0xffff;
241
242 hwif->atapi_dma = 1;
243 hwif->ultra_mask = 0x1f;
244 hwif->mwdma_mask = 0x07;
245
246 hwif->ide_dma_check = &tc86c001_config_drive_xfer_rate;
247 hwif->dma_start = &tc86c001_dma_start;
248
249 if (!hwif->udma_four) {
250 /*
251 * System Control 1 Register bit 13 (PDIAGN):
252 * 0=80-pin cable, 1=40-pin cable
253 */
254 scr1 = hwif->INW(sc_base + 0x00);
255 hwif->udma_four = (scr1 & 0x2000) ? 0 : 1;
256 }
257
258 if (!noautodma)
259 hwif->autodma = 1;
260 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
261}
262
263static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
264 const char *name)
265{
266 int err = pci_request_region(dev, 5, name);
267
268 if (err)
269 printk(KERN_ERR "%s: system control regs already in use", name);
270 return err;
271}
272
273static ide_pci_device_t tc86c001_chipset __devinitdata = {
274 .name = "TC86C001",
275 .init_chipset = init_chipset_tc86c001,
276 .init_hwif = init_hwif_tc86c001,
277 .channels = 1,
278 .autodma = AUTODMA,
279 .bootable = OFF_BOARD
280};
281
282static int __devinit tc86c001_init_one(struct pci_dev *dev,
283 const struct pci_device_id *id)
284{
285 return ide_setup_pci_device(dev, &tc86c001_chipset);
286}
287
288static struct pci_device_id tc86c001_pci_tbl[] = {
289 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
290 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
291 { 0, }
292};
293MODULE_DEVICE_TABLE(pci, tc86c001_pci_tbl);
294
295static struct pci_driver driver = {
296 .name = "TC86C001",
297 .id_table = tc86c001_pci_tbl,
298 .probe = tc86c001_init_one
299};
300
301static int __init tc86c001_ide_init(void)
302{
303 return ide_pci_register_driver(&driver);
304}
305module_init(tc86c001_ide_init);
306
307MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
308MODULE_DESCRIPTION("PCI driver module for TC86C001 IDE");
309MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index a98b4d38b9dd..6fb6e50b8231 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -78,7 +78,7 @@ static struct via_isa_bridge {
78 u8 rev_max; 78 u8 rev_max;
79 u16 flags; 79 u16 flags;
80} via_isa_bridges[] = { 80} via_isa_bridges[] = {
81 { "cx7000", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 81 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
82 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 82 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
83 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 83 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
84 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 84 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 695e23904d30..a52c80fe7d3e 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -783,10 +783,11 @@ static LIST_HEAD(ide_pci_drivers);
783 * Returns are the same as for pci_register_driver 783 * Returns are the same as for pci_register_driver
784 */ 784 */
785 785
786int __ide_pci_register_driver(struct pci_driver *driver, struct module *module) 786int __ide_pci_register_driver(struct pci_driver *driver, struct module *module,
787 const char *mod_name)
787{ 788{
788 if(!pre_init) 789 if(!pre_init)
789 return __pci_register_driver(driver, module); 790 return __pci_register_driver(driver, module, mod_name);
790 driver->driver.owner = module; 791 driver->driver.owner = module;
791 list_add_tail(&driver->node, &ide_pci_drivers); 792 list_add_tail(&driver->node, &ide_pci_drivers);
792 return 0; 793 return 0;
@@ -862,6 +863,6 @@ void __init ide_scan_pcibus (int scan_direction)
862 { 863 {
863 list_del(l); 864 list_del(l);
864 d = list_entry(l, struct pci_driver, node); 865 d = list_entry(l, struct pci_driver, node);
865 __pci_register_driver(d, d->driver.owner); 866 __pci_register_driver(d, d->driver.owner, d->driver.mod_name);
866 } 867 }
867} 868}
diff --git a/drivers/ieee1394/.gitignore b/drivers/ieee1394/.gitignore
deleted file mode 100644
index 33da10a25323..000000000000
--- a/drivers/ieee1394/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
1oui.c
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index e7d56573fe56..b8a47342cd2c 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -35,20 +35,6 @@ config IEEE1394_VERBOSEDEBUG
35 Say Y if you really want or need the debugging output, everyone 35 Say Y if you really want or need the debugging output, everyone
36 else says N. 36 else says N.
37 37
38config IEEE1394_OUI_DB
39 bool "OUI Database built-in (deprecated)"
40 depends on IEEE1394
41 help
42 If you say Y here, then an OUI list (vendor unique ID's) will be
43 compiled into the ieee1394 module. This doesn't really do much
44 except being able to display the vendor of a hardware node. The
45 downside is that it adds about 300k to the size of the module,
46 or kernel (depending on whether you compile ieee1394 as a
47 module, or static in the kernel).
48
49 This option is not needed for userspace programs like gscanbus
50 to show this information.
51
52config IEEE1394_EXTRA_CONFIG_ROMS 38config IEEE1394_EXTRA_CONFIG_ROMS
53 bool "Build in extra config rom entries for certain functionality" 39 bool "Build in extra config rom entries for certain functionality"
54 depends on IEEE1394 40 depends on IEEE1394
@@ -66,13 +52,6 @@ config IEEE1394_CONFIG_ROM_IP1394
66 with MacOSX and WinXP IP-over-1394), enable this option and the 52 with MacOSX and WinXP IP-over-1394), enable this option and the
67 eth1394 option below. 53 eth1394 option below.
68 54
69config IEEE1394_EXPORT_FULL_API
70 bool "Export all symbols of ieee1394's API (deprecated)"
71 depends on IEEE1394
72 default n
73 help
74 This option will be removed soon. Don't worry, say N.
75
76comment "Device Drivers" 55comment "Device Drivers"
77 depends on IEEE1394 56 depends on IEEE1394
78 57
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
index d9650d3d77a0..489c133664d5 100644
--- a/drivers/ieee1394/Makefile
+++ b/drivers/ieee1394/Makefile
@@ -5,9 +5,6 @@
5ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \ 5ieee1394-objs := ieee1394_core.o ieee1394_transactions.o hosts.o \
6 highlevel.o csr.o nodemgr.o dma.o iso.o \ 6 highlevel.o csr.o nodemgr.o dma.o iso.o \
7 csr1212.o config_roms.o 7 csr1212.o config_roms.o
8ifdef CONFIG_IEEE1394_OUI_DB
9ieee1394-objs += oui.o
10endif
11 8
12obj-$(CONFIG_IEEE1394) += ieee1394.o 9obj-$(CONFIG_IEEE1394) += ieee1394.o
13obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o 10obj-$(CONFIG_IEEE1394_PCILYNX) += pcilynx.o
@@ -18,10 +15,3 @@ obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
18obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o 15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
19obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o 16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
20 17
21quiet_cmd_oui2c = OUI2C $@
22 cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@
23
24targets := oui.c
25$(obj)/oui.o: $(obj)/oui.c
26$(obj)/oui.c: $(src)/oui.db $(src)/oui2c.sh FORCE
27 $(call if_changed,oui2c)
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index 586f71e7346a..c28f639823d2 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -47,14 +47,14 @@
47#define __D (1 << CSR1212_KV_TYPE_DIRECTORY) 47#define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
48#define __L (1 << CSR1212_KV_TYPE_LEAF) 48#define __L (1 << CSR1212_KV_TYPE_LEAF)
49static const u_int8_t csr1212_key_id_type_map[0x30] = { 49static const u_int8_t csr1212_key_id_type_map[0x30] = {
50 0, /* Reserved */ 50 __C, /* used by Apple iSight */
51 __D | __L, /* Descriptor */ 51 __D | __L, /* Descriptor */
52 __I | __D | __L, /* Bus_Dependent_Info */ 52 __I | __D | __L, /* Bus_Dependent_Info */
53 __I | __D | __L, /* Vendor */ 53 __I | __D | __L, /* Vendor */
54 __I, /* Hardware_Version */ 54 __I, /* Hardware_Version */
55 0, 0, /* Reserved */ 55 0, 0, /* Reserved */
56 __D | __L, /* Module */ 56 __D | __L | __I, /* Module */
57 0, 0, 0, 0, /* Reserved */ 57 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
58 __I, /* Node_Capabilities */ 58 __I, /* Node_Capabilities */
59 __L, /* EUI_64 */ 59 __L, /* EUI_64 */
60 0, 0, 0, /* Reserved */ 60 0, 0, 0, /* Reserved */
@@ -1234,6 +1234,12 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1234 csr->private); 1234 csr->private);
1235 if (ret != CSR1212_SUCCESS) 1235 if (ret != CSR1212_SUCCESS)
1236 return ret; 1236 return ret;
1237
1238 /* check ROM header's info_length */
1239 if (i == 0 &&
1240 CSR1212_BE32_TO_CPU(csr->cache_head->data[0]) >> 24 !=
1241 bytes_to_quads(csr->bus_info_len) - 1)
1242 return CSR1212_EINVAL;
1237 } 1243 }
1238 1244
1239 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data; 1245 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
@@ -1250,9 +1256,6 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1250 return ret; 1256 return ret;
1251 } 1257 }
1252 1258
1253 if (bytes_to_quads(csr->bus_info_len - sizeof(csr1212_quad_t)) != bi->length)
1254 return CSR1212_EINVAL;
1255
1256#if 0 1259#if 0
1257 /* Apparently there are too many differnt wrong implementations of the 1260 /* Apparently there are too many differnt wrong implementations of the
1258 * CRC algorithm that verifying them is moot. */ 1261 * CRC algorithm that verifying them is moot. */
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 1084da4d88a9..55d6ae664fd6 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2255,49 +2255,37 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
2255 return 0; 2255 return 0;
2256} 2256}
2257 2257
2258static void dv1394_un_init(struct video_card *video) 2258static void dv1394_remove_host(struct hpsb_host *host)
2259{ 2259{
2260 /* obviously nobody has the driver open at this point */ 2260 struct video_card *video, *tmp_video;
2261 do_dv1394_shutdown(video, 1);
2262 kfree(video);
2263}
2264
2265
2266static void dv1394_remove_host (struct hpsb_host *host)
2267{
2268 struct video_card *video;
2269 unsigned long flags; 2261 unsigned long flags;
2270 int id = host->id; 2262 int found_ohci_card = 0;
2271 2263
2272 /* We only work with the OHCI-1394 driver */
2273 if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME))
2274 return;
2275
2276 /* find the corresponding video_cards */
2277 do { 2264 do {
2278 struct video_card *tmp_vid;
2279
2280 video = NULL; 2265 video = NULL;
2281
2282 spin_lock_irqsave(&dv1394_cards_lock, flags); 2266 spin_lock_irqsave(&dv1394_cards_lock, flags);
2283 list_for_each_entry(tmp_vid, &dv1394_cards, list) { 2267 list_for_each_entry(tmp_video, &dv1394_cards, list) {
2284 if ((tmp_vid->id >> 2) == id) { 2268 if ((tmp_video->id >> 2) == host->id) {
2285 list_del(&tmp_vid->list); 2269 list_del(&tmp_video->list);
2286 video = tmp_vid; 2270 video = tmp_video;
2271 found_ohci_card = 1;
2287 break; 2272 break;
2288 } 2273 }
2289 } 2274 }
2290 spin_unlock_irqrestore(&dv1394_cards_lock, flags); 2275 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2291 2276
2292 if (video) 2277 if (video) {
2293 dv1394_un_init(video); 2278 do_dv1394_shutdown(video, 1);
2294 } while (video != NULL); 2279 kfree(video);
2280 }
2281 } while (video);
2295 2282
2296 class_device_destroy(hpsb_protocol_class, 2283 if (found_ohci_card)
2297 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2))); 2284 class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
2285 IEEE1394_MINOR_BLOCK_DV1394 * 16 + (host->id << 2)));
2298} 2286}
2299 2287
2300static void dv1394_add_host (struct hpsb_host *host) 2288static void dv1394_add_host(struct hpsb_host *host)
2301{ 2289{
2302 struct ti_ohci *ohci; 2290 struct ti_ohci *ohci;
2303 int id = host->id; 2291 int id = host->id;
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index ee82a5320bf7..32a130921938 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -190,14 +190,19 @@ int hpsb_add_host(struct hpsb_host *host)
190{ 190{
191 if (hpsb_default_host_entry(host)) 191 if (hpsb_default_host_entry(host))
192 return -ENOMEM; 192 return -ENOMEM;
193
194 hpsb_add_extra_config_roms(host); 193 hpsb_add_extra_config_roms(host);
195
196 highlevel_add_host(host); 194 highlevel_add_host(host);
197
198 return 0; 195 return 0;
199} 196}
200 197
198void hpsb_resume_host(struct hpsb_host *host)
199{
200 if (host->driver->set_hw_config_rom)
201 host->driver->set_hw_config_rom(host,
202 host->csr.rom->bus_info_data);
203 host->driver->devctl(host, RESET_BUS, SHORT_RESET);
204}
205
201void hpsb_remove_host(struct hpsb_host *host) 206void hpsb_remove_host(struct hpsb_host *host)
202{ 207{
203 host->is_shutdown = 1; 208 host->is_shutdown = 1;
@@ -206,9 +211,7 @@ void hpsb_remove_host(struct hpsb_host *host)
206 flush_scheduled_work(); 211 flush_scheduled_work();
207 212
208 host->driver = &dummy_driver; 213 host->driver = &dummy_driver;
209
210 highlevel_remove_host(host); 214 highlevel_remove_host(host);
211
212 hpsb_remove_extra_config_roms(host); 215 hpsb_remove_extra_config_roms(host);
213 216
214 class_device_unregister(&host->class_dev); 217 class_device_unregister(&host->class_dev);
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index d553e38c9543..4bf4fb7f67b7 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -61,9 +61,9 @@ struct hpsb_host {
61 struct device device; 61 struct device device;
62 struct class_device class_dev; 62 struct class_device class_dev;
63 63
64 int update_config_rom;
65 struct delayed_work delayed_reset; 64 struct delayed_work delayed_reset;
66 unsigned int config_roms; 65 unsigned config_roms:31;
66 unsigned update_config_rom:1;
67 67
68 struct list_head addr_space; 68 struct list_head addr_space;
69 u64 low_addr_space; /* upper bound of physical DMA area */ 69 u64 low_addr_space; /* upper bound of physical DMA area */
@@ -200,7 +200,8 @@ struct hpsb_host_driver {
200struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, 200struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
201 struct device *dev); 201 struct device *dev);
202int hpsb_add_host(struct hpsb_host *host); 202int hpsb_add_host(struct hpsb_host *host);
203void hpsb_remove_host(struct hpsb_host *h); 203void hpsb_resume_host(struct hpsb_host *host);
204void hpsb_remove_host(struct hpsb_host *host);
204 205
205/* Updates the configuration rom image of a host. rom_version must be the 206/* Updates the configuration rom image of a host. rom_version must be the
206 * current version, otherwise it will fail with return value -1. If this 207 * current version, otherwise it will fail with return value -1. If this
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 9a48ca20d1fd..1521e57e124b 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -1178,6 +1178,7 @@ module_exit(ieee1394_cleanup);
1178/** hosts.c **/ 1178/** hosts.c **/
1179EXPORT_SYMBOL(hpsb_alloc_host); 1179EXPORT_SYMBOL(hpsb_alloc_host);
1180EXPORT_SYMBOL(hpsb_add_host); 1180EXPORT_SYMBOL(hpsb_add_host);
1181EXPORT_SYMBOL(hpsb_resume_host);
1181EXPORT_SYMBOL(hpsb_remove_host); 1182EXPORT_SYMBOL(hpsb_remove_host);
1182EXPORT_SYMBOL(hpsb_update_config_rom_image); 1183EXPORT_SYMBOL(hpsb_update_config_rom_image);
1183 1184
@@ -1195,10 +1196,6 @@ EXPORT_SYMBOL(hpsb_selfid_complete);
1195EXPORT_SYMBOL(hpsb_packet_sent); 1196EXPORT_SYMBOL(hpsb_packet_sent);
1196EXPORT_SYMBOL(hpsb_packet_received); 1197EXPORT_SYMBOL(hpsb_packet_received);
1197EXPORT_SYMBOL_GPL(hpsb_disable_irm); 1198EXPORT_SYMBOL_GPL(hpsb_disable_irm);
1198#ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1199EXPORT_SYMBOL(hpsb_send_phy_config);
1200EXPORT_SYMBOL(hpsb_send_packet_and_wait);
1201#endif
1202 1199
1203/** ieee1394_transactions.c **/ 1200/** ieee1394_transactions.c **/
1204EXPORT_SYMBOL(hpsb_get_tlabel); 1201EXPORT_SYMBOL(hpsb_get_tlabel);
@@ -1229,20 +1226,12 @@ EXPORT_SYMBOL(hpsb_set_hostinfo_key);
1229EXPORT_SYMBOL(hpsb_get_hostinfo_bykey); 1226EXPORT_SYMBOL(hpsb_get_hostinfo_bykey);
1230EXPORT_SYMBOL(hpsb_set_hostinfo); 1227EXPORT_SYMBOL(hpsb_set_hostinfo);
1231EXPORT_SYMBOL(highlevel_host_reset); 1228EXPORT_SYMBOL(highlevel_host_reset);
1232#ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1233EXPORT_SYMBOL(highlevel_add_host);
1234EXPORT_SYMBOL(highlevel_remove_host);
1235#endif
1236 1229
1237/** nodemgr.c **/ 1230/** nodemgr.c **/
1238EXPORT_SYMBOL(hpsb_node_fill_packet); 1231EXPORT_SYMBOL(hpsb_node_fill_packet);
1239EXPORT_SYMBOL(hpsb_node_write); 1232EXPORT_SYMBOL(hpsb_node_write);
1240EXPORT_SYMBOL(__hpsb_register_protocol); 1233EXPORT_SYMBOL(__hpsb_register_protocol);
1241EXPORT_SYMBOL(hpsb_unregister_protocol); 1234EXPORT_SYMBOL(hpsb_unregister_protocol);
1242#ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1243EXPORT_SYMBOL(ieee1394_bus_type);
1244EXPORT_SYMBOL(nodemgr_for_each_host);
1245#endif
1246 1235
1247/** csr.c **/ 1236/** csr.c **/
1248EXPORT_SYMBOL(hpsb_update_config_rom); 1237EXPORT_SYMBOL(hpsb_update_config_rom);
@@ -1287,13 +1276,3 @@ EXPORT_SYMBOL(csr1212_read);
1287EXPORT_SYMBOL(csr1212_parse_keyval); 1276EXPORT_SYMBOL(csr1212_parse_keyval);
1288EXPORT_SYMBOL(_csr1212_read_keyval); 1277EXPORT_SYMBOL(_csr1212_read_keyval);
1289EXPORT_SYMBOL(_csr1212_destroy_keyval); 1278EXPORT_SYMBOL(_csr1212_destroy_keyval);
1290#ifdef CONFIG_IEEE1394_EXPORT_FULL_API
1291EXPORT_SYMBOL(csr1212_create_csr);
1292EXPORT_SYMBOL(csr1212_init_local_csr);
1293EXPORT_SYMBOL(csr1212_new_immediate);
1294EXPORT_SYMBOL(csr1212_associate_keyval);
1295EXPORT_SYMBOL(csr1212_new_string_descriptor_leaf);
1296EXPORT_SYMBOL(csr1212_destroy_csr);
1297EXPORT_SYMBOL(csr1212_generate_csr_image);
1298EXPORT_SYMBOL(csr1212_parse_csr);
1299#endif
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 61307ca296ae..ba9faeff4793 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -41,22 +41,6 @@ struct nodemgr_csr_info {
41}; 41};
42 42
43 43
44static char *nodemgr_find_oui_name(int oui)
45{
46#ifdef CONFIG_IEEE1394_OUI_DB
47 extern struct oui_list_struct {
48 int oui;
49 char *name;
50 } oui_list[];
51 int i;
52
53 for (i = 0; oui_list[i].name; i++)
54 if (oui_list[i].oui == oui)
55 return oui_list[i].name;
56#endif
57 return NULL;
58}
59
60/* 44/*
61 * Correct the speed map entry. This is necessary 45 * Correct the speed map entry. This is necessary
62 * - for nodes with link speed < phy speed, 46 * - for nodes with link speed < phy speed,
@@ -274,7 +258,6 @@ static struct device_driver nodemgr_mid_layer_driver = {
274struct device nodemgr_dev_template_host = { 258struct device nodemgr_dev_template_host = {
275 .bus = &ieee1394_bus_type, 259 .bus = &ieee1394_bus_type,
276 .release = nodemgr_release_host, 260 .release = nodemgr_release_host,
277 .driver = &nodemgr_mid_layer_driver,
278}; 261};
279 262
280 263
@@ -473,11 +456,9 @@ fw_attr(ne, struct node_entry, nodeid, unsigned int, "0x%04x\n")
473 456
474fw_attr(ne, struct node_entry, vendor_id, unsigned int, "0x%06x\n") 457fw_attr(ne, struct node_entry, vendor_id, unsigned int, "0x%06x\n")
475fw_attr_td(ne, struct node_entry, vendor_name_kv) 458fw_attr_td(ne, struct node_entry, vendor_name_kv)
476fw_attr(ne, struct node_entry, vendor_oui, const char *, "%s\n")
477 459
478fw_attr(ne, struct node_entry, guid, unsigned long long, "0x%016Lx\n") 460fw_attr(ne, struct node_entry, guid, unsigned long long, "0x%016Lx\n")
479fw_attr(ne, struct node_entry, guid_vendor_id, unsigned int, "0x%06x\n") 461fw_attr(ne, struct node_entry, guid_vendor_id, unsigned int, "0x%06x\n")
480fw_attr(ne, struct node_entry, guid_vendor_oui, const char *, "%s\n")
481fw_attr(ne, struct node_entry, in_limbo, int, "%d\n"); 462fw_attr(ne, struct node_entry, in_limbo, int, "%d\n");
482 463
483static struct device_attribute *const fw_ne_attrs[] = { 464static struct device_attribute *const fw_ne_attrs[] = {
@@ -503,7 +484,6 @@ fw_attr(ud, struct unit_directory, model_id, unsigned int, "0x%06x\n")
503fw_attr(ud, struct unit_directory, specifier_id, unsigned int, "0x%06x\n") 484fw_attr(ud, struct unit_directory, specifier_id, unsigned int, "0x%06x\n")
504fw_attr(ud, struct unit_directory, version, unsigned int, "0x%06x\n") 485fw_attr(ud, struct unit_directory, version, unsigned int, "0x%06x\n")
505fw_attr_td(ud, struct unit_directory, vendor_name_kv) 486fw_attr_td(ud, struct unit_directory, vendor_name_kv)
506fw_attr(ud, struct unit_directory, vendor_oui, const char *, "%s\n")
507fw_attr_td(ud, struct unit_directory, model_name_kv) 487fw_attr_td(ud, struct unit_directory, model_name_kv)
508 488
509static struct device_attribute *const fw_ud_attrs[] = { 489static struct device_attribute *const fw_ud_attrs[] = {
@@ -865,7 +845,6 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
865 845
866 ne->guid = guid; 846 ne->guid = guid;
867 ne->guid_vendor_id = (guid >> 40) & 0xffffff; 847 ne->guid_vendor_id = (guid >> 40) & 0xffffff;
868 ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id);
869 ne->csr = csr; 848 ne->csr = csr;
870 849
871 memcpy(&ne->device, &nodemgr_dev_template_ne, 850 memcpy(&ne->device, &nodemgr_dev_template_ne,
@@ -885,9 +864,6 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
885 goto fail_classdevreg; 864 goto fail_classdevreg;
886 get_device(&ne->device); 865 get_device(&ne->device);
887 866
888 if (ne->guid_vendor_oui &&
889 device_create_file(&ne->device, &dev_attr_ne_guid_vendor_oui))
890 goto fail_addoiu;
891 nodemgr_create_ne_dev_files(ne); 867 nodemgr_create_ne_dev_files(ne);
892 868
893 nodemgr_update_bus_options(ne); 869 nodemgr_update_bus_options(ne);
@@ -898,8 +874,6 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
898 874
899 return ne; 875 return ne;
900 876
901fail_addoiu:
902 put_device(&ne->device);
903fail_classdevreg: 877fail_classdevreg:
904 device_unregister(&ne->device); 878 device_unregister(&ne->device);
905fail_devreg: 879fail_devreg:
@@ -975,15 +949,10 @@ static void nodemgr_register_device(struct node_entry *ne,
975 goto fail_classdevreg; 949 goto fail_classdevreg;
976 get_device(&ud->device); 950 get_device(&ud->device);
977 951
978 if (ud->vendor_oui &&
979 device_create_file(&ud->device, &dev_attr_ud_vendor_oui))
980 goto fail_addoui;
981 nodemgr_create_ud_dev_files(ud); 952 nodemgr_create_ud_dev_files(ud);
982 953
983 return; 954 return;
984 955
985fail_addoui:
986 put_device(&ud->device);
987fail_classdevreg: 956fail_classdevreg:
988 device_unregister(&ud->device); 957 device_unregister(&ud->device);
989fail_devreg: 958fail_devreg:
@@ -1020,9 +989,6 @@ static struct unit_directory *nodemgr_process_unit_directory
1020 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { 989 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1021 ud->vendor_id = kv->value.immediate; 990 ud->vendor_id = kv->value.immediate;
1022 ud->flags |= UNIT_DIRECTORY_VENDOR_ID; 991 ud->flags |= UNIT_DIRECTORY_VENDOR_ID;
1023
1024 if (ud->vendor_id)
1025 ud->vendor_oui = nodemgr_find_oui_name(ud->vendor_id);
1026 } 992 }
1027 break; 993 break;
1028 994
@@ -1153,9 +1119,6 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1153 switch (kv->key.id) { 1119 switch (kv->key.id) {
1154 case CSR1212_KV_ID_VENDOR: 1120 case CSR1212_KV_ID_VENDOR:
1155 ne->vendor_id = kv->value.immediate; 1121 ne->vendor_id = kv->value.immediate;
1156
1157 if (ne->vendor_id)
1158 ne->vendor_oui = nodemgr_find_oui_name(ne->vendor_id);
1159 break; 1122 break;
1160 1123
1161 case CSR1212_KV_ID_NODE_CAPABILITIES: 1124 case CSR1212_KV_ID_NODE_CAPABILITIES:
@@ -1183,9 +1146,6 @@ static void nodemgr_process_root_directory(struct host_info *hi, struct node_ent
1183 last_key_id = kv->key.id; 1146 last_key_id = kv->key.id;
1184 } 1147 }
1185 1148
1186 if (ne->vendor_oui &&
1187 device_create_file(&ne->device, &dev_attr_ne_vendor_oui))
1188 goto fail;
1189 if (ne->vendor_name_kv && 1149 if (ne->vendor_name_kv &&
1190 device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv)) 1150 device_create_file(&ne->device, &dev_attr_ne_vendor_name_kv))
1191 goto fail; 1151 goto fail;
@@ -1889,22 +1849,31 @@ int init_ieee1394_nodemgr(void)
1889 1849
1890 error = class_register(&nodemgr_ne_class); 1850 error = class_register(&nodemgr_ne_class);
1891 if (error) 1851 if (error)
1892 return error; 1852 goto fail_ne;
1893
1894 error = class_register(&nodemgr_ud_class); 1853 error = class_register(&nodemgr_ud_class);
1895 if (error) { 1854 if (error)
1896 class_unregister(&nodemgr_ne_class); 1855 goto fail_ud;
1897 return error;
1898 }
1899 error = driver_register(&nodemgr_mid_layer_driver); 1856 error = driver_register(&nodemgr_mid_layer_driver);
1857 if (error)
1858 goto fail_ml;
1859 /* This driver is not used if nodemgr is off (disable_nodemgr=1). */
1860 nodemgr_dev_template_host.driver = &nodemgr_mid_layer_driver;
1861
1900 hpsb_register_highlevel(&nodemgr_highlevel); 1862 hpsb_register_highlevel(&nodemgr_highlevel);
1901 return 0; 1863 return 0;
1864
1865fail_ml:
1866 class_unregister(&nodemgr_ud_class);
1867fail_ud:
1868 class_unregister(&nodemgr_ne_class);
1869fail_ne:
1870 return error;
1902} 1871}
1903 1872
1904void cleanup_ieee1394_nodemgr(void) 1873void cleanup_ieee1394_nodemgr(void)
1905{ 1874{
1906 hpsb_unregister_highlevel(&nodemgr_highlevel); 1875 hpsb_unregister_highlevel(&nodemgr_highlevel);
1907 1876 driver_unregister(&nodemgr_mid_layer_driver);
1908 class_unregister(&nodemgr_ud_class); 1877 class_unregister(&nodemgr_ud_class);
1909 class_unregister(&nodemgr_ne_class); 1878 class_unregister(&nodemgr_ne_class);
1910} 1879}
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index e25cbadb8be0..4147303ad448 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -70,7 +70,6 @@ struct unit_directory {
70 70
71 quadlet_t vendor_id; 71 quadlet_t vendor_id;
72 struct csr1212_keyval *vendor_name_kv; 72 struct csr1212_keyval *vendor_name_kv;
73 const char *vendor_oui;
74 73
75 quadlet_t model_id; 74 quadlet_t model_id;
76 struct csr1212_keyval *model_name_kv; 75 struct csr1212_keyval *model_name_kv;
@@ -93,7 +92,6 @@ struct unit_directory {
93struct node_entry { 92struct node_entry {
94 u64 guid; /* GUID of this node */ 93 u64 guid; /* GUID of this node */
95 u32 guid_vendor_id; /* Top 24bits of guid */ 94 u32 guid_vendor_id; /* Top 24bits of guid */
96 const char *guid_vendor_oui; /* OUI name of guid vendor id */
97 95
98 struct hpsb_host *host; /* Host this node is attached to */ 96 struct hpsb_host *host; /* Host this node is attached to */
99 nodeid_t nodeid; /* NodeID */ 97 nodeid_t nodeid; /* NodeID */
@@ -104,7 +102,6 @@ struct node_entry {
104 /* The following is read from the config rom */ 102 /* The following is read from the config rom */
105 u32 vendor_id; 103 u32 vendor_id;
106 struct csr1212_keyval *vendor_name_kv; 104 struct csr1212_keyval *vendor_name_kv;
107 const char *vendor_oui;
108 105
109 u32 capabilities; 106 u32 capabilities;
110 107
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 628130a58af3..5729e412cc4a 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -3281,14 +3281,11 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3281 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!", 3281 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3282 (unsigned long long)pci_resource_len(dev, 0)); 3282 (unsigned long long)pci_resource_len(dev, 0));
3283 3283
3284 /* Seems PCMCIA handles this internally. Not sure why. Seems 3284 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3285 * pretty bogus to force a driver to special case this. */ 3285 OHCI1394_DRIVER_NAME))
3286#ifndef PCMCIA
3287 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3288 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable", 3286 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3289 (unsigned long long)ohci_base, 3287 (unsigned long long)ohci_base,
3290 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE); 3288 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3291#endif
3292 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION; 3289 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3293 3290
3294 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE); 3291 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
@@ -3509,10 +3506,8 @@ static void ohci1394_pci_remove(struct pci_dev *pdev)
3509 iounmap(ohci->registers); 3506 iounmap(ohci->registers);
3510 3507
3511 case OHCI_INIT_HAVE_MEM_REGION: 3508 case OHCI_INIT_HAVE_MEM_REGION:
3512#ifndef PCMCIA
3513 release_mem_region(pci_resource_start(ohci->dev, 0), 3509 release_mem_region(pci_resource_start(ohci->dev, 0),
3514 OHCI1394_REGISTER_SIZE); 3510 OHCI1394_REGISTER_SIZE);
3515#endif
3516 3511
3517#ifdef CONFIG_PPC_PMAC 3512#ifdef CONFIG_PPC_PMAC
3518 /* On UniNorth, power down the cable and turn off the chip clock 3513 /* On UniNorth, power down the cable and turn off the chip clock
@@ -3541,9 +3536,6 @@ static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3541 int err; 3536 int err;
3542 struct ti_ohci *ohci = pci_get_drvdata(pdev); 3537 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3543 3538
3544 printk(KERN_INFO "%s does not fully support suspend and resume yet\n",
3545 OHCI1394_DRIVER_NAME);
3546
3547 if (!ohci) { 3539 if (!ohci) {
3548 printk(KERN_ERR "%s: tried to suspend nonexisting host\n", 3540 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3549 OHCI1394_DRIVER_NAME); 3541 OHCI1394_DRIVER_NAME);
@@ -3630,15 +3622,14 @@ static int ohci1394_pci_resume(struct pci_dev *pdev)
3630 mdelay(50); 3622 mdelay(50);
3631 ohci_initialize(ohci); 3623 ohci_initialize(ohci);
3632 3624
3625 hpsb_resume_host(ohci->host);
3633 return 0; 3626 return 0;
3634} 3627}
3635#endif /* CONFIG_PM */ 3628#endif /* CONFIG_PM */
3636 3629
3637#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3638
3639static struct pci_device_id ohci1394_pci_tbl[] = { 3630static struct pci_device_id ohci1394_pci_tbl[] = {
3640 { 3631 {
3641 .class = PCI_CLASS_FIREWIRE_OHCI, 3632 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3642 .class_mask = PCI_ANY_ID, 3633 .class_mask = PCI_ANY_ID,
3643 .vendor = PCI_ANY_ID, 3634 .vendor = PCI_ANY_ID,
3644 .device = PCI_ANY_ID, 3635 .device = PCI_ANY_ID,
diff --git a/drivers/ieee1394/oui.db b/drivers/ieee1394/oui.db
deleted file mode 100644
index 592c8a60d01e..000000000000
--- a/drivers/ieee1394/oui.db
+++ /dev/null
@@ -1,7048 +0,0 @@
1000000 XEROX CORPORATION
2000001 XEROX CORPORATION
3000002 XEROX CORPORATION
4000003 XEROX CORPORATION
5000004 XEROX CORPORATION
6000005 XEROX CORPORATION
7000006 XEROX CORPORATION
8000007 XEROX CORPORATION
9000008 XEROX CORPORATION
10000009 XEROX CORPORATION
1100000A OMRON TATEISI ELECTRONICS CO.
1200000B MATRIX CORPORATION
1300000C CISCO SYSTEMS, INC.
1400000D FIBRONICS LTD.
1500000E FUJITSU LIMITED
1600000F NEXT, INC.
17000010 SYTEK INC.
18000011 NORMEREL SYSTEMES
19000012 INFORMATION TECHNOLOGY LIMITED
20000013 CAMEX
21000014 NETRONIX
22000015 DATAPOINT CORPORATION
23000016 DU PONT PIXEL SYSTEMS .
24000017 TEKELEC
25000018 WEBSTER COMPUTER CORPORATION
26000019 APPLIED DYNAMICS INTERNATIONAL
2700001A ADVANCED MICRO DEVICES
2800001B NOVELL INC.
2900001C BELL TECHNOLOGIES
3000001D CABLETRON SYSTEMS, INC.
3100001E TELSIST INDUSTRIA ELECTRONICA
3200001F Telco Systems, Inc.
33000020 DATAINDUSTRIER DIAB AB
34000021 SUREMAN COMP. & COMMUN. CORP.
35000022 VISUAL TECHNOLOGY INC.
36000023 ABB INDUSTRIAL SYSTEMS AB
37000024 CONNECT AS
38000025 RAMTEK CORP.
39000026 SHA-KEN CO., LTD.
40000027 JAPAN RADIO COMPANY
41000028 PRODIGY SYSTEMS CORPORATION
42000029 IMC NETWORKS CORP.
4300002A TRW - SEDD/INP
4400002B CRISP AUTOMATION, INC
4500002C AUTOTOTE LIMITED
4600002D CHROMATICS INC
4700002E SOCIETE EVIRA
4800002F TIMEPLEX INC.
49000030 VG LABORATORY SYSTEMS LTD
50000031 QPSX COMMUNICATIONS PTY LTD
51000032 Marconi plc
52000033 EGAN MACHINERY COMPANY
53000034 NETWORK RESOURCES CORPORATION
54000035 SPECTRAGRAPHICS CORPORATION
55000036 ATARI CORPORATION
56000037 OXFORD METRICS LIMITED
57000038 CSS LABS
58000039 TOSHIBA CORPORATION
5900003A CHYRON CORPORATION
6000003B i Controls, Inc.
6100003C AUSPEX SYSTEMS INC.
6200003D UNISYS
6300003E SIMPACT
6400003F SYNTREX, INC.
65000040 APPLICON, INC.
66000041 ICE CORPORATION
67000042 METIER MANAGEMENT SYSTEMS LTD.
68000043 MICRO TECHNOLOGY
69000044 CASTELLE CORPORATION
70000045 FORD AEROSPACE & COMM. CORP.
71000046 OLIVETTI NORTH AMERICA
72000047 NICOLET INSTRUMENTS CORP.
73000048 SEIKO EPSON CORPORATION
74000049 APRICOT COMPUTERS, LTD
7500004A ADC CODENOLL TECHNOLOGY CORP.
7600004B ICL DATA OY
7700004C NEC CORPORATION
7800004D DCI CORPORATION
7900004E AMPEX CORPORATION
8000004F LOGICRAFT, INC.
81000050 RADISYS CORPORATION
82000051 HOB ELECTRONIC GMBH & CO. KG
83000052 Intrusion.com, Inc.
84000053 COMPUCORP
85000054 MODICON, INC.
86000055 COMMISSARIAT A L`ENERGIE ATOM.
87000056 DR. B. STRUCK
88000057 SCITEX CORPORATION LTD.
89000058 RACORE COMPUTER PRODUCTS INC.
90000059 HELLIGE GMBH
9100005A SysKonnect GmbH
9200005B ELTEC ELEKTRONIK AG
9300005C TELEMATICS INTERNATIONAL INC.
9400005D CS TELECOM
9500005E USC INFORMATION SCIENCES INST
9600005F SUMITOMO ELECTRIC IND., LTD.
97000060 KONTRON ELEKTRONIK GMBH
98000061 GATEWAY COMMUNICATIONS
99000062 BULL HN INFORMATION SYSTEMS
100000063 DR.ING.SEUFERT GMBH
101000064 YOKOGAWA DIGITAL COMPUTER CORP
102000065 NETWORK ASSOCIATES, INC.
103000066 TALARIS SYSTEMS, INC.
104000067 SOFT * RITE, INC.
105000068 ROSEMOUNT CONTROLS
106000069 CONCORD COMMUNICATIONS INC
10700006A COMPUTER CONSOLES INC.
10800006B SILICON GRAPHICS INC./MIPS
10900006D CRAY COMMUNICATIONS, LTD.
11000006E ARTISOFT, INC.
11100006F Madge Ltd.
112000070 HCL LIMITED
113000071 ADRA SYSTEMS INC.
114000072 MINIWARE TECHNOLOGY
115000073 SIECOR CORPORATION
116000074 RICOH COMPANY LTD.
117000075 Nortel Networks
118000076 ABEKAS VIDEO SYSTEM
119000077 INTERPHASE CORPORATION
120000078 LABTAM LIMITED
121000079 NETWORTH INCORPORATED
12200007A DANA COMPUTER INC.
12300007B RESEARCH MACHINES
12400007C AMPERE INCORPORATED
12500007D SUN MICROSYSTEMS, INC.
12600007E CLUSTRIX CORPORATION
12700007F LINOTYPE-HELL AG
128000080 CRAY COMMUNICATIONS A/S
129000081 BAY NETWORKS
130000082 LECTRA SYSTEMES SA
131000083 TADPOLE TECHNOLOGY PLC
132000084 SUPERNET
133000085 CANON INC.
134000086 MEGAHERTZ CORPORATION
135000087 HITACHI, LTD.
136000088 COMPUTER NETWORK TECH. CORP.
137000089 CAYMAN SYSTEMS INC.
13800008A DATAHOUSE INFORMATION SYSTEMS
13900008B INFOTRON
14000008C Alloy Computer Products (Australia) Pty Ltd
14100008D VERDIX CORPORATION
14200008E SOLBOURNE COMPUTER, INC.
14300008F RAYTHEON COMPANY
144000090 MICROCOM
145000091 ANRITSU CORPORATION
146000092 COGENT DATA TECHNOLOGIES
147000093 PROTEON INC.
148000094 ASANTE TECHNOLOGIES
149000095 SONY TEKTRONIX CORP.
150000096 MARCONI ELECTRONICS LTD.
151000097 EPOCH SYSTEMS
152000098 CROSSCOMM CORPORATION
153000099 MTX, INC.
15400009A RC COMPUTER A/S
15500009B INFORMATION INTERNATIONAL, INC
15600009C ROLM MIL-SPEC COMPUTERS
15700009D LOCUS COMPUTING CORPORATION
15800009E MARLI S.A.
15900009F AMERISTAR TECHNOLOGIES INC.
1600000A0 TOKYO SANYO ELECTRIC CO. LTD.
1610000A1 MARQUETTE ELECTRIC CO.
1620000A2 BAY NETWORKS
1630000A3 NETWORK APPLICATION TECHNOLOGY
1640000A4 ACORN COMPUTERS LIMITED
1650000A5 COMPATIBLE SYSTEMS CORP.
1660000A6 NETWORK GENERAL CORPORATION
1670000A7 NETWORK COMPUTING DEVICES INC.
1680000A8 STRATUS COMPUTER INC.
1690000A9 NETWORK SYSTEMS CORP.
1700000AA XEROX CORPORATION
1710000AB LOGIC MODELING CORPORATION
1720000AC CONWARE COMPUTER CONSULTING
1730000AD BRUKER INSTRUMENTS INC.
1740000AE DASSAULT ELECTRONIQUE
1750000AF NUCLEAR DATA INSTRUMENTATION
1760000B0 RND-RAD NETWORK DEVICES
1770000B1 ALPHA MICROSYSTEMS INC.
1780000B2 TELEVIDEO SYSTEMS, INC.
1790000B3 CIMLINC INCORPORATED
1800000B4 EDIMAX COMPUTER COMPANY
1810000B5 DATABILITY SOFTWARE SYS. INC.
1820000B6 MICRO-MATIC RESEARCH
1830000B7 DOVE COMPUTER CORPORATION
1840000B8 SEIKOSHA CO., LTD.
1850000B9 MCDONNELL DOUGLAS COMPUTER SYS
1860000BA SIIG, INC.
1870000BB TRI-DATA
1880000BC ALLEN-BRADLEY CO. INC.
1890000BD MITSUBISHI CABLE COMPANY
1900000BE THE NTI GROUP
1910000BF SYMMETRIC COMPUTER SYSTEMS
1920000C0 WESTERN DIGITAL CORPORATION
1930000C1 Madge Ltd.
1940000C2 INFORMATION PRESENTATION TECH.
1950000C3 HARRIS CORP COMPUTER SYS DIV
1960000C4 WATERS DIV. OF MILLIPORE
1970000C5 FARALLON COMPUTING/NETOPIA
1980000C6 EON SYSTEMS
1990000C7 ARIX CORPORATION
2000000C8 ALTOS COMPUTER SYSTEMS
2010000C9 EMULEX CORPORATION
2020000CA APPLITEK
2030000CB COMPU-SHACK ELECTRONIC GMBH
2040000CC DENSAN CO., LTD.
2050000CD Allied Telesyn Research Ltd.
2060000CE MEGADATA CORP.
2070000CF HAYES MICROCOMPUTER PRODUCTS
2080000D0 DEVELCON ELECTRONICS LTD.
2090000D1 ADAPTEC INCORPORATED
2100000D2 SBE, INC.
2110000D3 WANG LABORATORIES INC.
2120000D4 PURE DATA LTD.
2130000D5 MICROGNOSIS INTERNATIONAL
2140000D6 PUNCH LINE HOLDING
2150000D7 DARTMOUTH COLLEGE
2160000D8 NOVELL, INC.
2170000D9 NIPPON TELEGRAPH & TELEPHONE
2180000DA ATEX
2190000DB BRITISH TELECOMMUNICATIONS PLC
2200000DC HAYES MICROCOMPUTER PRODUCTS
2210000DD TCL INCORPORATED
2220000DE CETIA
2230000DF BELL & HOWELL PUB SYS DIV
2240000E0 QUADRAM CORP.
2250000E1 GRID SYSTEMS
2260000E2 ACER TECHNOLOGIES CORP.
2270000E3 INTEGRATED MICRO PRODUCTS LTD
2280000E4 IN2 GROUPE INTERTECHNIQUE
2290000E5 SIGMEX LTD.
2300000E6 APTOR PRODUITS DE COMM INDUST
2310000E7 STAR GATE TECHNOLOGIES
2320000E8 ACCTON TECHNOLOGY CORP.
2330000E9 ISICAD, INC.
2340000EA UPNOD AB
2350000EB MATSUSHITA COMM. IND. CO. LTD.
2360000EC MICROPROCESS
2370000ED APRIL
2380000EE NETWORK DESIGNERS, LTD.
2390000EF KTI
2400000F0 SAMSUNG ELECTRONICS CO., LTD.
2410000F1 MAGNA COMPUTER CORPORATION
2420000F2 SPIDER COMMUNICATIONS
2430000F3 GANDALF DATA LIMITED
2440000F4 ALLIED TELESYN INTERNATIONAL
2450000F5 DIAMOND SALES LIMITED
2460000F6 APPLIED MICROSYSTEMS CORP.
2470000F7 YOUTH KEEP ENTERPRISE CO LTD
2480000F8 DIGITAL EQUIPMENT CORPORATION
2490000F9 QUOTRON SYSTEMS INC.
2500000FA MICROSAGE COMPUTER SYSTEMS INC
2510000FB RECHNER ZUR KOMMUNIKATION
2520000FC MEIKO
2530000FD HIGH LEVEL HARDWARE
2540000FE ANNAPOLIS MICRO SYSTEMS
2550000FF CAMTEC ELECTRONICS LTD.
256000100 EQUIP'TRANS
257000102 3COM CORPORATION
258000103 3COM CORPORATION
259000104 DVICO Co., Ltd.
260000105 BECKHOFF GmbH
261000106 Tews Datentechnik GmbH
262000107 Leiser GmbH
263000108 AVLAB Technology, Inc.
264000109 Nagano Japan Radio Co., Ltd.
26500010A CIS TECHNOLOGY INC.
26600010B Space CyberLink, Inc.
26700010C System Talks Inc.
26800010D CORECO, INC.
26900010E Bri-Link Technologies Co., Ltd
27000010F Nishan Systems, Inc.
271000110 Gotham Networks
272000111 iDigm Inc.
273000112 Shark Multimedia Inc.
274000113 OLYMPUS CORPORATION
275000114 KANDA TSUSHIN KOGYO CO., LTD.
276000115 EXTRATECH CORPORATION
277000116 Netspect Technologies, Inc.
278000117 CANAL +
279000118 EZ Digital Co., Ltd.
280000119 Action Controls Pty. Ltd.
28100011A EEH DataLink GmbH
28200011B Unizone Technologies, Inc.
28300011C Universal Talkware Corporation
28400011D Centillium Communications
28500011E Precidia Technologies, Inc.
28600011F RC Networks, Inc.
287000120 OSCILLOQUARTZ S.A.
288000121 RapidStream Inc.
289000122 Trend Communications, Ltd.
290000123 DIGITAL ELECTRONICS CORP.
291000124 Acer Incorporated
292000125 YAESU MUSEN CO., LTD.
293000126 PAC Labs
294000127 The OPEN Group Limited
295000128 EnjoyWeb, Inc.
296000129 DFI Inc.
29700012A Telematica Sistems Inteligente
29800012B TELENET Co., Ltd.
29900012C Aravox Technologies, Inc.
30000012D Komodo Technology
30100012E PC Partner Ltd.
30200012F Twinhead International Corp
303000130 Extreme Networks
304000131 Detection Systems, Inc.
305000132 Dranetz - BMI
306000133 KYOWA Electronic Instruments C
307000134 SIG Positec Systems AG
308000135 KDC Corp.
309000136 CyberTAN Technology, Inc.
310000137 IT Farm Corporation
311000138 XAVi Technologies Corp.
312000139 Point Multimedia Systems
31300013A SHELCAD COMMUNICATIONS, LTD.
31400013B BNA SYSTEMS
31500013C TIW SYSTEMS
31600013D RiscStation Ltd.
31700013E Ascom Tateco AB
31800013F Neighbor World Co., Ltd.
319000140 Sendtek Corporation
320000141 CABLE PRINT
321000142 Cisco Systems, Inc.
322000143 Cisco Systems, Inc.
323000144 Cereva Networks, Inc.
324000145 WINSYSTEMS, INC.
325000146 Tesco Controls, Inc.
326000147 Zhone Technologies
327000148 X-traWeb Inc.
328000149 T.D.T. Transfer Data Test GmbH
32900014A SONY COMPUTER SCIENCE LABS., I
33000014B Ennovate Networks, Inc.
33100014C Berkeley Process Control
33200014D Shin Kin Enterprises Co., Ltd
33300014E WIN Enterprises, Inc.
33400014F LUMINOUS Networks, Inc.
335000150 GILAT COMMUNICATIONS, LTD.
336000151 Ensemble Communications
337000152 CHROMATEK INC.
338000153 ARCHTEK TELECOM CORPORATION
339000154 G3M Corporation
340000155 Promise Technology, Inc.
341000156 FIREWIREDIRECT.COM, INC.
342000157 SYSWAVE CO., LTD
343000158 Electro Industries/Gauge Tech
344000159 S1 Corporation
34500015A Digital Video Broadcasting
34600015B ITALTEL S.p.A/RF-UP-I
34700015C CADANT INC.
34800015D Sun Microsystems, Inc
34900015E BEST TECHNOLOGY CO., LTD.
35000015F DIGITAL DESIGN GmbH
351000160 ELMEX Co., LTD.
352000161 Meta Machine Technology
353000162 Cygnet Technologies, Inc.
354000163 Cisco Systems, Inc.
355000164 Cisco Systems, Inc.
356000165 AirSwitch Corporation
357000166 TC GROUP A/S
358000167 HIOKI E.E. CORPORATION
359000168 VITANA CORPORATION
360000169 Celestix Networks Pte Ltd.
36100016A ALITEC
36200016B LightChip, Inc.
36300016C FOXCONN
36400016D CarrierComm Inc.
36500016E Conklin Corporation
36600016F HAITAI ELECTRONICS CO., LTD.
367000170 ESE Embedded System Engineer'g
368000171 Allied Data Technologies
369000172 TechnoLand Co., LTD.
370000173 JNI Corporation
371000174 CyberOptics Corporation
372000175 Radiant Communications Corp.
373000176 Orient Silver Enterprises
374000177 EDSL
375000178 MARGI Systems, Inc.
376000179 WIRELESS TECHNOLOGY, INC.
37700017A Chengdu Maipu Electric Industrial Co., Ltd.
37800017B Heidelberger Druckmaschinen AG
37900017C AG-E GmbH
38000017D ThermoQuest
38100017E ADTEK System Science Co., Ltd.
38200017F Experience Music Project
383000180 AOpen, Inc.
384000181 Nortel Networks
385000182 DICA TECHNOLOGIES AG
386000183 ANITE TELECOMS
387000184 SIEB & MEYER AG
388000185 Aloka Co., Ltd.
389000186 DISCH GmbH
390000187 i2SE GmbH
391000188 LXCO Technologies ag
392000189 Refraction Technology, Inc.
39300018A ROI COMPUTER AG
39400018B NetLinks Co., Ltd.
39500018C Mega Vision
39600018D AudeSi Technologies
39700018E Logitec Corporation
39800018F Kenetec, Inc.
399000190 SMK-M
400000191 SYRED Data Systems
401000192 Texas Digital Systems
402000193 Hanbyul Telecom Co., Ltd.
403000194 Capital Equipment Corporation
404000195 Sena Technologies, Inc.
405000196 Cisco Systems, Inc.
406000197 Cisco Systems, Inc.
407000198 Darim Vision
408000199 HeiSei Electronics
40900019A LEUNIG GmbH
41000019B Kyoto Microcomputer Co., Ltd.
41100019C JDS Uniphase Inc.
41200019D E-Control Systems, Inc.
41300019E ESS Technology, Inc.
41400019F Phonex Broadband
4150001A0 Infinilink Corporation
4160001A1 Mag-Tek, Inc.
4170001A2 Logical Co., Ltd.
4180001A3 GENESYS LOGIC, INC.
4190001A4 Microlink Corporation
4200001A5 Nextcomm, Inc.
4210001A6 Scientific-Atlanta Arcodan A/S
4220001A7 UNEX TECHNOLOGY CORPORATION
4230001A8 Welltech Computer Co., Ltd.
4240001A9 BMW AG
4250001AA Airspan Communications, Ltd.
4260001AB Main Street Networks
4270001AC Sitara Networks, Inc.
4280001AD Coach Master International d.b.a. CMI Worldwide, Inc.
4290001AE Trex Enterprises
4300001AF Motorola Computer Group
4310001B0 Fulltek Technology Co., Ltd.
4320001B1 General Bandwidth
4330001B2 Digital Processing Systems, Inc.
4340001B3 Precision Electronic Manufacturing
4350001B4 Wayport, Inc.
4360001B5 Turin Networks, Inc.
4370001B6 SAEJIN T&M Co., Ltd.
4380001B7 Centos, Inc.
4390001B8 Netsensity, Inc.
4400001B9 SKF Condition Monitoring
4410001BA IC-Net, Inc.
4420001BB Frequentis
4430001BC Brains Corporation
4440001BD Peterson Electro-Musical Products, Inc.
4450001BE Gigalink Co., Ltd.
4460001BF Teleforce Co., Ltd.
4470001C0 CompuLab, Ltd.
4480001C1 Vitesse Semiconductor Corporation
4490001C2 ARK Research Corp.
4500001C3 Acromag, Inc.
4510001C4 NeoWave, Inc.
4520001C5 Simpler Networks
4530001C6 Quarry Technologies
4540001C7 Cisco Systems, Inc.
4550001C8 THOMAS CONRAD CORP.
4560001C8 CONRAD CORP.
4570001C9 Cisco Systems, Inc.
4580001CA Geocast Network Systems, Inc.
4590001CB NetGame, Ltd.
4600001CC Japan Total Design Communication Co., Ltd.
4610001CD ARtem
4620001CE Custom Micro Products, Ltd.
4630001CF Alpha Data Parallel Systems, Ltd.
4640001D0 VitalPoint, Inc.
4650001D1 CoNet Communications, Inc.
4660001D2 MacPower Peripherals, Ltd.
4670001D3 PAXCOMM, Inc.
4680001D4 Leisure Time, Inc.
4690001D5 HAEDONG INFO & COMM CO., LTD
4700001D6 MAN Roland Druckmaschinen AG
4710001D7 F5 Networks, Inc.
4720001D8 Teltronics, Inc.
4730001D9 Sigma, Inc.
4740001DA WINCOMM Corporation
4750001DB Freecom Technologies GmbH
4760001DC Activetelco
4770001DD Avail Networks
4780001DE Trango Systems, Inc.
4790001DF ISDN Communications, Ltd.
4800001E0 Fast Systems, Inc.
4810001E1 Kinpo Electronics, Inc.
4820001E2 Ando Electric Corporation
4830001E3 Siemens AG
4840001E4 Sitera, Inc.
4850001E5 Supernet, Inc.
4860001E6 Hewlett-Packard Company
4870001E7 Hewlett-Packard Company
4880001E8 Force10 Networks, Inc.
4890001E9 Litton Marine Systems B.V.
4900001EA Cirilium Corp.
4910001EB C-COM Corporation
4920001EC Ericsson Group
4930001ED SETA Corp.
4940001EE Comtrol Europe, Ltd.
4950001EF Camtel Technology Corp.
4960001F0 Tridium, Inc.
4970001F1 Innovative Concepts, Inc.
4980001F2 Mark of the Unicorn, Inc.
4990001F3 QPS, Inc.
5000001F4 Enterasys Networks
5010001F5 ERIM S.A.
5020001F6 Association of Musical Electronics Industry
5030001F7 Image Display Systems, Inc.
5040001F8 Adherent Systems, Ltd.
5050001F9 TeraGlobal Communications Corp.
5060001FA HOROSCAS
5070001FB DoTop Technology, Inc.
5080001FC Keyence Corporation
5090001FD Digital Voice Systems, Inc.
5100001FE DIGITAL EQUIPMENT CORPORATION
5110001FF Data Direct Networks, Inc.
512000200 Net & Sys Co., Ltd.
513000201 IFM Electronic gmbh
514000202 Amino Communications, Ltd.
515000203 Woonsang Telecom, Inc.
516000204 Bodmann Industries Elektronik GmbH
517000205 Hitachi Denshi, Ltd.
518000206 Telital R&D Denmark A/S
519000207 VisionGlobal Network Corp.
520000208 Unify Networks, Inc.
521000209 Shenzhen SED Information Technology Co., Ltd.
52200020A Gefran Spa
52300020B Native Networks, Inc.
52400020C Metro-Optix
52500020D Micronpc.com
52600020E Laurel Networks, Inc.
52700020F AATR
528000210 Fenecom
529000211 Nature Worldwide Technology Corp.
530000212 SierraCom
531000213 S.D.E.L.
532000214 DTVRO
533000215 Cotas Computer Technology A/B
534000216 Cisco Systems, Inc.
535000217 Cisco Systems, Inc.
536000218 Advanced Scientific Corp
537000219 Paralon Technologies
53800021A Zuma Networks
53900021B Kollmorgen-Servotronix
54000021C Network Elements, Inc.
54100021D Data General Communication Ltd.
54200021E SIMTEL S.R.L.
54300021F Aculab PLC
544000220 Canon Aptex, Inc.
545000221 DSP Application, Ltd.
546000222 Chromisys, Inc.
547000223 ClickTV
548000224 Lantern Communications, Inc.
549000225 Certus Technology, Inc.
550000226 XESystems, Inc.
551000227 ESD GmbH
552000228 Necsom, Ltd.
553000229 Adtec Corporation
55400022A Asound Electronic
55500022B Tamura Electric Works, Ltd.
55600022C ABB Bomem, Inc.
55700022D Agere Systems
55800022E TEAC Corp. R& D
55900022F P-Cube, Ltd.
560000230 Intersoft Electronics
561000231 Ingersoll-Rand
562000232 Avision, Inc.
563000233 Mantra Communications, Inc.
564000234 Imperial Technology, Inc.
565000235 Paragon Networks International
566000236 INIT GmbH
567000237 Cosmo Research Corp.
568000238 Serome Technology, Inc.
569000239 Visicom
57000023A ZSK Stickmaschinen GmbH
57100023B Redback Networks
57200023C Creative Technology, Ltd.
57300023D NuSpeed, Inc.
57400023E Selta Telematica S.p.a
57500023F Compal Electronics, Inc.
576000240 Seedek Co., Ltd.
577000241 Amer.com
578000242 Videoframe Systems
579000243 Raysis Co., Ltd.
580000244 SURECOM Technology Co.
581000245 Lampus Co, Ltd.
582000246 All-Win Tech Co., Ltd.
583000247 Great Dragon Information Technology (Group) Co., Ltd.
584000248 Pilz GmbH & Co.
585000249 Aviv Infocom Co, Ltd.
58600024A Cisco Systems, Inc.
58700024B Cisco Systems, Inc.
58800024C SiByte, Inc.
58900024D Mannesman Dematic Colby Pty. Ltd.
59000024E Datacard Group
59100024F IPM Datacom S.R.L.
592000250 Geyser Networks, Inc.
593000251 Soma Networks
594000252 Carrier Corporation
595000253 Televideo, Inc.
596000254 WorldGate
597000255 IBM Corporation
598000256 Alpha Processor, Inc.
599000257 Microcom Corp.
600000258 Flying Packets Communications
601000259 Tsann Kuen China (Shanghai)Enterprise Co., Ltd. IT Group
60200025A Catena Networks
60300025B Cambridge Silicon Radio
60400025C SCI Systems (Kunshan) Co., Ltd.
60500025D Calix Networks
60600025E High Technology Ltd
60700025F Nortel Networks
608000260 Accordion Networks, Inc.
609000261 i3 Micro Technology AB
610000262 Soyo Group Soyo Com Tech Co., Ltd
611000263 UPS Manufacturing SRL
612000264 AudioRamp.com
613000265 Virditech Co. Ltd.
614000266 Thermalogic Corporation
615000267 NODE RUNNER, INC.
616000268 Harris Government Communications
617000269 Nadatel Co., Ltd
61800026A Cocess Telecom Co., Ltd.
61900026B BCM Computers Co., Ltd.
62000026C Philips CFT
62100026D Adept Telecom
62200026E NeGeN Access, Inc.
62300026F Senao International Co., Ltd.
624000270 Crewave Co., Ltd.
625000271 Vpacket Communications
626000272 CC&C Technologies, Inc.
627000273 Coriolis Networks
628000274 Tommy Technologies Corp.
629000275 SMART Technologies, Inc.
630000276 Primax Electronics Ltd.
631000277 Cash Systemes Industrie
632000278 Samsung Electro-Mechanics Co., Ltd.
633000279 Control Applications, Ltd.
63400027A IOI Technology Corporation
63500027B Amplify Net, Inc.
63600027C Trilithic, Inc.
63700027D Cisco Systems, Inc.
63800027E Cisco Systems, Inc.
63900027F ask-technologies.com
640000280 Mu Net, Inc.
641000281 Madge Ltd.
642000282 ViaClix, Inc.
643000283 Spectrum Controls, Inc.
644000284 Alstom T&D P&C
645000285 Riverstone Networks
646000286 Occam Networks
647000287 Adapcom
648000288 GLOBAL VILLAGE COMMUNICATION
649000289 DNE Technologies
65000028A Ambit Microsystems Corporation
65100028B VDSL Systems OY
65200028C Micrel-Synergy Semiconductor
65300028D Movita Technologies, Inc.
65400028E Rapid 5 Networks, Inc.
65500028F Globetek, Inc.
656000290 Woorigisool, Inc.
657000291 Open Network Co., Ltd.
658000292 Logic Innovations, Inc.
659000293 Solid Data Systems
660000294 Tokyo Sokushin Co., Ltd.
661000295 IP.Access Limited
662000296 Lectron Co,. Ltd.
663000297 C-COR.net
664000298 Broadframe Corporation
665000299 Apex, Inc.
66600029A Storage Apps
66700029B Kreatel Communications AB
66800029C 3COM
66900029D Merix Corp.
67000029E Information Equipment Co., Ltd.
67100029F L-3 Communication Aviation Recorders
6720002A0 Flatstack Ltd.
6730002A1 World Wide Packets
6740002A2 Hilscher GmbH
6750002A3 ABB Power Automation
6760002A4 AddPac Technology Co., Ltd.
6770002A5 Compaq Computer Corporation
6780002A6 Effinet Systems Co., Ltd.
6790002A7 Vivace Networks
6800002A8 Air Link Technology
6810002A9 RACOM, s.r.o.
6820002AA PLcom Co., Ltd.
6830002AB CTC Union Technologies Co., Ltd.
6840002AC 3PAR data
6850002AD Pentax Corpotation
6860002AE Scannex Electronics Ltd.
6870002AF TeleCruz Technology, Inc.
6880002B0 Hokubu Communication & Industrial Co., Ltd.
6890002B1 Anritsu, Ltd.
6900002B2 Cablevision
6910002B3 Intel Corporation
6920002B4 DAPHNE
6930002B5 Avnet, Inc.
6940002B6 Acrosser Technology Co., Ltd.
6950002B7 Watanabe Electric Industry Co., Ltd.
6960002B8 WHI KONSULT AB
6970002B9 Cisco Systems, Inc.
6980002BA Cisco Systems, Inc.
6990002BB Continuous Computing
7000002BC LVL 7 Systems, Inc.
7010002BD Bionet Co., Ltd.
7020002BE Totsu Engineering, Inc.
7030002BF dotRocket, Inc.
7040002C0 Bencent Tzeng Industry Co., Ltd.
7050002C1 Innovative Electronic Designs, Inc.
7060002C2 Net Vision Telecom
7070002C3 Arelnet Ltd.
7080002C4 Vector International BUBA
7090002C5 Evertz Microsystems Ltd.
7100002C6 Data Track Technology PLC
7110002C7 ALPS ELECTRIC Co., Ltd.
7120002C8 Technocom Communications Technology (pte) Ltd
7130002C9 Mellanox Technologies
7140002CA EndPoints, Inc.
7150002CB TriState Ltd.
7160002CC M.C.C.I
7170002CD TeleDream, Inc.
7180002CE FoxJet, Inc.
7190002CF ZyGate Communications, Inc.
7200002D0 Comdial Corporation
7210002D1 Vivotek, Inc.
7220002D2 Workstation AG
7230002D3 NetBotz, Inc.
7240002D4 PDA Peripherals, Inc.
7250002D5 ACR
7260002D6 NICE Systems
7270002D7 EMPEG Ltd
7280002D8 BRECIS Communications Corporation
7290002D9 Reliable Controls
7300002DA ExiO Communications, Inc.
7310002DB NETSEC
7320002DC Fujitsu General Limited
7330002DD Bromax Communications, Ltd.
7340002DE Astrodesign, Inc.
7350002DF Net Com Systems, Inc.
7360002E0 ETAS GmbH
7370002E1 Integrated Network Corporation
7380002E2 NDC Infared Engineering
7390002E3 LITE-ON Communications, Inc.
7400002E4 JC HYUN Systems, Inc.
7410002E5 Timeware Ltd.
7420002E6 Gould Instrument Systems, Inc.
7430002E7 CAB GmbH & Co KG
7440002E8 E.D.&A.
7450002E9 CS Systemes De Securite - C3S
7460002EA Videonics, Inc.
7470002EB Pico Communications
7480002EC Maschoff Design Engineering
7490002ED DXO Telecom Co., Ltd.
7500002EE Nokia Danmark A/S
7510002EF CCC Network Systems Group Ltd.
7520002F0 AME Optimedia Technology Co., Ltd.
7530002F1 Pinetron Co., Ltd.
7540002F2 eDevice, Inc.
7550002F3 Media Serve Co., Ltd.
7560002F4 PCTEL, Inc.
7570002F5 VIVE Synergies, Inc.
7580002F6 Equipe Communications
7590002F7 ARM
7600002F8 SEAKR Engineering, Inc.
7610002F9 Mimos Semiconductor SDN BHD
7620002FA DX Antenna Co., Ltd.
7630002FB Baumuller Aulugen-Systemtechnik GmbH
7640002FC Cisco Systems, Inc.
7650002FD Cisco Systems, Inc.
7660002FE Viditec, Inc.
7670002FF Handan BroadInfoCom
768000300 NetContinuum, Inc.
769000301 Avantas Networks Corporation
770000302 Oasys Telecom, Inc.
771000303 JAMA Electronics Co., Ltd.
772000304 Pacific Broadband Communications
773000305 Smart Network Devices GmbH
774000306 Fusion In Tech Co., Ltd.
775000307 Secure Works, Inc.
776000308 AM Communications, Inc.
777000309 Texcel Technology PLC
77800030A Argus Technologies
77900030B Hunter Technology, Inc.
78000030C Telesoft Technologies Ltd.
78100030D Uniwill Computer Corp.
78200030E Core Communications Co., Ltd.
78300030F Digital China (Shanghai) Networks Ltd.
784000310 Link Evolution Corp.
785000311 Micro Technology Co., Ltd.
786000312 TR-Systemtechnik GmbH
787000313 Access Media SPA
788000314 Teleware Network Systems
789000315 Cidco Incorporated
790000316 Nobell Communications, Inc.
791000317 Merlin Systems, Inc.
792000318 Cyras Systems, Inc.
793000319 Infineon AG
79400031A Beijing Broad Telecom Ltd., China
79500031B Cellvision Systems, Inc.
79600031C Svenska Hardvarufabriken AB
79700031D Taiwan Commate Computer, Inc.
79800031E Optranet, Inc.
79900031F Condev Ltd.
800000320 Xpeed, Inc.
801000321 Reco Research Co., Ltd.
802000322 IDIS Co., Ltd.
803000323 Cornet Technology, Inc.
804000324 SANYO Multimedia Tottori Co., Ltd.
805000325 Arima Computer Corp.
806000326 Iwasaki Information Systems Co., Ltd.
807000327 ACT'L
808000328 Mace Group, Inc.
809000329 F3, Inc.
81000032A UniData Communication Systems, Inc.
81100032B GAI Datenfunksysteme GmbH
81200032C ABB Industrie AG
81300032D IBASE Technology, Inc.
81400032E Scope Information Management, Ltd.
81500032F Global Sun Technology, Inc.
816000330 Imagenics, Co., Ltd.
817000331 Cisco Systems, Inc.
818000332 Cisco Systems, Inc.
819000333 Digitel Co., Ltd.
820000334 Newport Electronics
821000335 Mirae Technology
822000336 Zetes Technologies
823000337 Vaone, Inc.
824000338 Oak Technology
825000339 Eurologic Systems, Ltd.
82600033A Silicon Wave, Inc.
82700033B TAMI Tech Co., Ltd.
82800033C Daiden Co., Ltd.
82900033D ILSHin Lab
83000033E Tateyama System Laboratory Co., Ltd.
83100033F BigBand Networks, Ltd.
832000340 Floware Wireless Systems, Ltd.
833000341 Axon Digital Design
834000342 Nortel Networks
835000343 Martin Professional A/S
836000344 Tietech.Co., Ltd.
837000345 Routrek Networks Corporation
838000346 Hitachi Kokusai Electric, Inc.
839000347 Intel Corporation
840000348 Norscan Instruments, Ltd.
841000349 Vidicode Datacommunicatie B.V.
84200034A RIAS Corporation
84300034B Nortel Networks
84400034C Shanghai DigiVision Technology Co., Ltd.
84500034D Chiaro Networks, Ltd.
84600034E Pos Data Company, Ltd.
84700034F Sur-Gard Security
848000350 BTICINO SPA
849000351 Diebold, Inc.
850000352 Colubris Networks
851000353 Mitac, Inc.
852000354 Fiber Logic Communications
853000355 TeraBeam Internet Systems
854000356 Wincor Nixdorf GmbH & Co KG
855000357 Intervoice-Brite, Inc.
856000358 iCable System Co., Ltd.
857000359 DigitalSis
85800035A Photron Limited
85900035B BridgeWave Communications
86000035C Saint Song Corp.
86100035D Bosung Hi-Net Co., Ltd.
86200035E Metropolitan Area Networks, Inc.
86300035F Prueftechnik Condition Monitoring GmbH & Co. KG
864000360 PAC Interactive Technology, Inc.
865000361 Widcomm, Inc.
866000362 Vodtel Communications, Inc.
867000363 Miraesys Co., Ltd.
868000364 Scenix Semiconductor, Inc.
869000365 Kira Information & Communications, Ltd.
870000366 ASM Pacific Technology
871000367 Jasmine Networks, Inc.
872000368 Embedone Co., Ltd.
873000369 Nippon Antenna Co., Ltd.
87400036A Mainnet, Ltd.
87500036B Cisco Systems, Inc.
87600036C Cisco Systems, Inc.
87700036D Runtop, Inc.
87800036E Nicon Systems (Pty) Limited
87900036F Telsey SPA
880000370 NXTV, Inc.
881000371 Acomz Networks Corp.
882000372 ULAN
883000373 Aselsan A.S
884000374 Hunter Watertech
885000375 NetMedia, Inc.
886000376 Graphtec Technology, Inc.
887000377 Gigabit Wireless
888000378 HUMAX Co., Ltd.
889000379 Proscend Communications, Inc.
89000037A Taiyo Yuden Co., Ltd.
89100037B IDEC IZUMI Corporation
89200037C Coax Media
89300037D Stellcom
89400037E PORTech Communications, Inc.
89500037F Atheros Communications, Inc.
896000380 SSH Communications Security Corp.
897000381 Ingenico International
898000382 A-One Co., Ltd.
899000383 Metera Networks, Inc.
900000384 AETA
901000385 Actelis Networks, Inc.
902000386 Ho Net, Inc.
903000387 Blaze Network Products
904000388 Fastfame Technology Co., Ltd.
905000389 Plantronics
90600038A America Online, Inc.
90700038B PLUS-ONE I&T, Inc.
90800038C Total Impact
90900038D PCS Revenue Control Systems, Inc.
91000038E Atoga Systems, Inc.
91100038F Weinschel Corporation
912000390 Digital Video Communications, Inc.
913000392 Hyundai Teletek Co., Ltd.
914000393 Apple Computer, Inc.
915000394 Connect One
916000395 California Amplifier
917000396 EZ Cast Co., Ltd.
918000397 Watchfront Electronics
919000398 WISI
920000399 Dongju Informations & Communications Co., Ltd.
92100039A nSine, Ltd.
92200039B NetChip Technology, Inc.
92300039C OptiMight Communications, Inc.
92400039D BENQ CORPORATION
92500039E Tera System Co., Ltd.
92600039F Cisco Systems, Inc.
9270003A0 Cisco Systems, Inc.
9280003A1 HIPER Information & Communication, Inc.
9290003A2 Catapult Communications
9300003A3 MAVIX, Ltd.
9310003A4 Data Storage and Information Management
9320003A5 Medea Corporation
9330003A7 Unixtar Technology, Inc.
9340003A8 IDOT Computers, Inc.
9350003A9 AXCENT Media AG
9360003AA Watlow
9370003AB Bridge Information Systems
9380003AC Fronius Schweissmaschinen
9390003AD Emerson Energy Systems AB
9400003AE Allied Advanced Manufacturing Pte, Ltd.
9410003AF Paragea Communications
9420003B0 Xsense Technology Corp.
9430003B1 Abbott Laboratories HPD
9440003B2 Radware
9450003B3 IA Link Systems Co., Ltd.
9460003B4 Macrotek International Corp.
9470003B5 Entra Technology Co.
9480003B6 QSI Corporation
9490003B7 ZACCESS Systems
9500003B8 NetKit Solutions, LLC
9510003B9 Hualong Telecom Co., Ltd.
9520003BA Sun Microsystems
9530003BB Signal Communications Limited
9540003BC COT GmbH
9550003BD OmniCluster Technologies, Inc.
9560003BE Netility
9570003BF Centerpoint Broadband Technologies, Inc.
9580003C0 RFTNC Co., Ltd.
9590003C1 Packet Dynamics Ltd
9600003C2 Solphone K.K.
9610003C3 Micronik Multimedia
9620003C4 Tomra Systems ASA
9630003C5 Mobotix AG
9640003C6 ICUE Systems, Inc.
9650003C7 hopf Elektronik GmbH
9660003C8 CML Emergency Services
9670003C9 TECOM Co., Ltd.
9680003CA MTS Systems Corp.
9690003CB Nippon Systems Development Co., Ltd.
9700003CC Momentum Computer, Inc.
9710003CD Clovertech, Inc.
9720003CE ETEN Technologies, Inc.
9730003CF Muxcom, Inc.
9740003D0 KOANKEISO Co., Ltd.
9750003D1 Takaya Corporation
9760003D2 Crossbeam Systems, Inc.
9770003D3 Internet Energy Systems, Inc.
9780003D4 Alloptic, Inc.
9790003D5 Advanced Communications Co., Ltd.
9800003D6 RADVision, Ltd.
9810003D7 NextNet Wireless, Inc.
9820003D8 iMPath Networks, Inc.
9830003D9 Secheron SA
9840003DA Takamisawa Cybernetics Co., Ltd.
9850003DB Apogee Electronics Corp.
9860003DC Lexar Media, Inc.
9870003DD Comark Corp.
9880003DE OTC Wireless
9890003DF Desana Systems
9900003E0 RadioFrame Networks, Inc.
9910003E1 Winmate Communication, Inc.
9920003E2 Comspace Corporation
9930003E3 Cisco Systems, Inc.
9940003E4 Cisco Systems, Inc.
9950003E5 Hermstedt SG
9960003E6 Entone Technologies, Inc.
9970003E7 Logostek Co. Ltd.
9980003E8 Wavelength Digital Limited
9990003E9 Akara Canada, Inc.
10000003EA Mega System Technologies, Inc.
10010003EB Atrica
10020003EC ICG Research, Inc.
10030003ED Shinkawa Electric Co., Ltd.
10040003EE MKNet Corporation
10050003EF Oneline AG
10060003F0 Redfern Broadband Networks
10070003F1 Cicada Semiconductor, Inc.
10080003F2 Seneca Networks
10090003F3 Dazzle Multimedia, Inc.
10100003F4 NetBurner
10110003F5 Chip2Chip
10120003F6 Allegro Networks, Inc.
10130003F7 Plast-Control GmbH
10140003F8 SanCastle Technologies, Inc.
10150003F9 Pleiades Communications, Inc.
10160003FA TiMetra Networks
10170003FB Toko Seiki Company, Ltd.
10180003FC Intertex Data AB
10190003FD Cisco Systems, Inc.
10200003FE Cisco Systems, Inc.
10210003FF Connectix
1022000400 LEXMARK INTERNATIONAL, INC.
1023000401 Osaki Electric Co., Ltd.
1024000402 Nexsan Technologies, Ltd.
1025000403 Nexsi Corporation
1026000404 Makino Milling Machine Co., Ltd.
1027000405 ACN Technologies
1028000406 Fa. Metabox AG
1029000407 Topcon Positioning Systems, Inc.
1030000408 Sanko Electronics Co., Ltd.
1031000409 Cratos Networks
103200040A Sage Systems
103300040B 3com Europe Ltd.
103400040C KANNO Work's Ltd.
103500040D Avaya, Inc.
103600040E AVM GmbH
103700040F Asus Network Technologies, Inc.
1038000410 Spinnaker Networks, Inc.
1039000411 Inkra Networks, Inc.
1040000412 WaveSmith Networks, Inc.
1041000413 SNOM Technology AG
1042000414 Umezawa Musen Denki Co., Ltd.
1043000415 Rasteme Systems Co., Ltd.
1044000416 Parks S/A Comunicacoes Digitais
1045000417 ELAU AG
1046000418 Teltronic S.A.U.
1047000419 Fibercycle Networks, Inc.
104800041A ines GmbH
104900041B Digital Interfaces Ltd.
105000041C ipDialog, Inc.
105100041D Corega of America
105200041E Shikoku Instrumentation Co., Ltd.
105300041F Sony Computer Entertainment, Inc.
1054000420 Slim Devices, Inc.
1055000421 Ocular Networks
1056000422 Gordon Kapes, Inc.
1057000423 Intel Corporation
1058000424 TMC s.r.l.
1059000425 Atmel Corporation
1060000426 Autosys
1061000427 Cisco Systems, Inc.
1062000428 Cisco Systems, Inc.
1063000429 Pixord Corporation
106400042A Wireless Networks, Inc.
106500042B IT Access Co., Ltd.
106600042C Minet, Inc.
106700042D Sarian Systems, Ltd.
106800042E Netous Technologies, Ltd.
106900042F International Communications Products, Inc.
1070000430 Netgem
1071000431 GlobalStreams, Inc.
1072000432 Voyetra Turtle Beach, Inc.
1073000433 Cyberboard A/S
1074000434 Accelent Systems, Inc.
1075000435 Comptek International, Inc.
1076000436 ELANsat Technologies, Inc.
1077000437 Powin Information Technology, Inc.
1078000438 Nortel Networks
1079000439 Rosco Entertainment Technology, Inc.
108000043A Intelligent Telecommunications, Inc.
108100043B Lava Computer Mfg., Inc.
108200043C SONOS Co., Ltd.
108300043D INDEL AG
108400043E Telencomm
108500043F Electronic Systems Technology, Inc.
1086000440 cyberPIXIE, Inc.
1087000441 Half Dome Systems, Inc.
1088000442 NACT
1089000443 Agilent Technologies, Inc.
1090000444 Western Multiplex Corporation
1091000445 LMS Skalar Instruments GmbH
1092000446 CYZENTECH Co., Ltd.
1093000447 Acrowave Systems Co., Ltd.
1094000448 Polaroid Professional Imaging
1095000449 Mapletree Networks
109600044A iPolicy Networks, Inc.
109700044B NVIDIA
109800044C JENOPTIK
109900044D Cisco Systems, Inc.
110000044E Cisco Systems, Inc.
110100044F Leukhardt Systemelektronik GmbH
1102000450 DMD Computers SRL
1103000451 Medrad, Inc.
1104000452 RocketLogix, Inc.
1105000453 YottaYotta, Inc.
1106000454 Quadriga UK
1107000455 ANTARA.net
1108000456 PipingHot Networks
1109000457 Universal Access Technology, Inc.
1110000458 Fusion X Co., Ltd.
1111000459 Veristar Corporation
111200045A The Linksys Group, Inc.
111300045B Techsan Electronics Co., Ltd.
111400045C Mobiwave Pte Ltd
111500045D BEKA Elektronik
111600045E PolyTrax Information Technology AG
111700045F Evalue Technology, Inc.
1118000460 Knilink Technology, Inc.
1119000461 EPOX Computer Co., Ltd.
1120000462 DAKOS Data & Communication Co., Ltd.
1121000463 Bosch Security Systems
1122000464 Fantasma Networks, Inc.
1123000465 i.s.t isdn-support technik GmbH
1124000466 ARMITEL Co.
1125000467 Wuhan Research Institute of MII
1126000468 Vivity, Inc.
1127000469 Innocom, Inc.
112800046A Navini Networks
112900046B Palm Wireless, Inc.
113000046C Cyber Technology Co., Ltd.
113100046D Cisco Systems, Inc.
113200046E Cisco Systems, Inc.
113300046F Digitel S/A Industria Eletronica
1134000470 ipUnplugged AB
1135000471 IPrad
1136000472 Telelynx, Inc.
1137000473 Photonex Corporation
1138000474 LEGRAND
1139000475 3 Com Corporation
1140000476 3 Com Corporation
1141000477 Scalant Systems, Inc.
1142000478 G. Star Technology Corporation
1143000479 Radius Co., Ltd.
114400047A AXXESSIT ASA
114500047B Schlumberger
114600047C Skidata AG
114700047D Pelco
114800047E NKF Electronics
114900047F Chr. Mayr GmbH & Co. KG
1150000480 Foundry Networks, Inc.
1151000481 Econolite Control Products, Inc.
1152000482 Medialogic Corp.
1153000483 Deltron Technology, Inc.
1154000484 Amann GmbH
1155000485 PicoLight
1156000486 ITTC, University of Kansas
1157000487 Cogency Semiconductor, Inc.
1158000488 Eurotherm Action Incorporated.
1159000489 YAFO Networks, Inc.
116000048A Temia Vertriebs GmbH
116100048B Poscon Corporation
116200048C Nayna Networks, Inc.
116300048D Tone Commander Systems, Inc.
116400048E Ohm Tech Labs, Inc.
116500048F TD Systems Corp.
1166000490 Optical Access
1167000491 Technovision, Inc.
1168000492 Hive Internet, Ltd.
1169000493 Tsinghua Unisplendour Co., Ltd.
1170000494 Breezecom, Ltd.
1171000495 Tejas Networks
1172000496 Extreme Networks
1173000497 MacroSystem Digital Video AG
1174000499 Chino Corporation
117500049A Cisco Systems, Inc.
117600049B Cisco Systems, Inc.
117700049C Surgient Networks, Inc.
117800049D Ipanema Technologies
117900049E Wirelink Co., Ltd.
118000049F Metrowerks
11810004A0 Verity Instruments, Inc.
11820004A1 Pathway Connectivity
11830004A2 L.S.I. Japan Co., Ltd.
11840004A3 Microchip Technology, Inc.
11850004A4 NetEnabled, Inc.
11860004A5 Barco Projection Systems NV
11870004A6 SAF Tehnika Ltd.
11880004A7 FabiaTech Corporation
11890004A8 Broadmax Technologies, Inc.
11900004A9 SandStream Technologies, Inc.
11910004AA Jetstream Communications
11920004AB Comverse Network Systems, Inc.
11930004AC IBM CORP.
11940004AD Malibu Networks
11950004AE Liquid Metronics
11960004AF Digital Fountain, Inc.
11970004B0 ELESIGN Co., Ltd.
11980004B1 Signal Technology, Inc.
11990004B2 ESSEGI SRL
12000004B3 Videotek, Inc.
12010004B4 CIAC
12020004B5 Equitrac Corporation
12030004B6 Stratex Networks, Inc.
12040004B7 AMB i.t. Holding
12050004B8 Kumahira Co., Ltd.
12060004B9 S.I. Soubou, Inc.
12070004BA KDD Media Will Corporation
12080004BB Bardac Corporation
12090004BC Giantec, Inc.
12100004BD Motorola BCS
12110004BE OptXCon, Inc.
12120004BF VersaLogic Corp.
12130004C0 Cisco Systems, Inc.
12140004C1 Cisco Systems, Inc.
12150004C2 Magnipix, Inc.
12160004C3 CASTOR Informatique
12170004C4 Allen & Heath Limited
12180004C5 ASE Technologies, USA
12190004C6 Yamaha Motor Co., Ltd.
12200004C7 NetMount
12210004C8 LIBA Maschinenfabrik GmbH
12220004C9 Micro Electron Co., Ltd.
12230004CA FreeMs Corp.
12240004CB Tdsoft Communication, Ltd.
12250004CC Peek Traffic B.V.
12260004CD Informedia Research Group
12270004CE Patria Ailon
12280004CF Seagate Technology
12290004D0 Softlink s.r.o.
12300004D1 Drew Technologies, Inc.
12310004D2 Adcon Telemetry AG
12320004D3 Toyokeiki Co., Ltd.
12330004D4 Proview Electronics Co., Ltd.
12340004D5 Hitachi Communication Systems, Inc.
12350004D6 Takagi Industrial Co., Ltd.
12360004D7 Omitec Instrumentation Ltd.
12370004D8 IPWireless, Inc.
12380004D9 Titan Electronics, Inc.
12390004DA Relax Technology, Inc.
12400004DB Tellus Group Corp.
12410004DC Nortel Networks
12420004DD Cisco Systems, Inc.
12430004DE Cisco Systems, Inc.
12440004DF Teracom Telematica Ltda.
12450004E0 Procket Networks
12460004E1 Infinior Microsystems
12470004E2 SMC Networks, Inc.
12480004E3 Accton Technology Corp.
12490004E4 Daeryung Ind., Inc.
12500004E5 Glonet Systems, Inc.
12510004E6 Banyan Network Private Limited
12520004E7 Lightpointe Communications, Inc
12530004E8 IER, Inc.
12540004E9 Infiniswitch Corporation
12550004EA Hewlett-Packard Company
12560004EB Paxonet Communications, Inc.
12570004EC Memobox SA
12580004ED Billion Electric Co., Ltd.
12590004EE Lincoln Electric Company
12600004EF Polestar Corp.
12610004F0 International Computers, Ltd
12620004F1 WhereNet
12630004F2 Circa Communications, Ltd.
12640004F3 FS FORTH-SYSTEME GmbH
12650004F4 Infinite Electronics Inc.
12660004F5 SnowShore Networks, Inc.
12670004F6 Amphus
12680004F7 Omega Band, Inc.
12690004F8 QUALICABLE TV Industria E Com., Ltda
12700004F9 Xtera Communications, Inc.
12710004FA MIST Inc.
12720004FB Commtech, Inc.
12730004FC Stratus Computer (DE), Inc.
12740004FD Japan Control Engineering Co., Ltd.
12750004FE Pelago Networks
12760004FF Acronet Co., Ltd.
1277000500 Cisco Systems, Inc.
1278000501 Cisco Systems, Inc.
1279000502 APPLE COMPUTER
1280000503 ICONAG
1281000504 Naray Information & Communication Enterprise
1282000505 Systems Integration Solutions, Inc.
1283000506 Reddo Networks AB
1284000507 Fine Appliance Corp.
1285000508 Inetcam, Inc.
1286000509 AVOC Nishimura Ltd.
128700050A ICS Spa
128800050B SICOM Systems, Inc.
128900050C Network Photonics, Inc.
129000050D Midstream Technologies, Inc.
129100050E 3ware, Inc.
129200050F Tanaka S/S Ltd.
1293000510 Infinite Shanghai Communication Terminals Ltd.
1294000511 Complementary Technologies Ltd
1295000512 MeshNetworks, Inc.
1296000513 VTLinx Multimedia Systems, Inc.
1297000514 KDT Systems Co., Ltd.
1298000515 Nuark Co., Ltd.
1299000516 SMART Modular Technologies
1300000517 Shellcomm, Inc.
1301000518 Jupiters Technology
1302000519 Siemens Building Technologies AG,
130300051A 3Com Europe Ltd.
130400051B Magic Control Technology Corporation
130500051C Xnet Technology Corp.
130600051D Airocon, Inc.
130700051E Brocade Communications Systems, Inc.
130800051F Taijin Media Co., Ltd.
1309000520 Smartronix, Inc.
1310000521 Control Microsystems
1311000522 LEA*D Corporation, Inc.
1312000523 AVL List GmbH
1313000524 BTL System (HK) Limited
1314000525 Puretek Industrial Co., Ltd.
1315000526 IPAS GmbH
1316000527 SJ Tek Co. Ltd
1317000528 New Focus, Inc.
1318000529 Shanghai Broadan Communication Technology Co., Ltd
131900052A Ikegami Tsushinki Co., Ltd.
132000052B HORIBA, Ltd.
132100052C Supreme Magic Corporation
132200052D Zoltrix International Limited
132300052E Cinta Networks
132400052F Leviton Voice and Data
1325000530 Andiamo Systems, Inc.
1326000531 Cisco Systems, Inc.
1327000532 Cisco Systems, Inc.
1328000533 Sanera Systems, Inc.
1329000534 Northstar Engineering Ltd.
1330000535 Chip PC Ltd.
1331000536 Danam Communications, Inc.
1332000537 Nets Technology Co., Ltd.
1333000538 Merilus, Inc.
1334000539 A Brand New World in Sweden AB
133500053A Willowglen Services Pte Ltd
133600053B Harbour Networks Ltd., Co. Beijing
133700053C Xircom
133800053D Agere Systems
133900053E KID Systeme GmbH
134000053F VisionTek, Inc.
1341000540 FAST Corporation
1342000541 Advanced Systems Co., Ltd.
1343000542 Otari, Inc.
1344000543 IQ Wireless GmbH
1345000544 Valley Technologies, Inc.
1346000545 Internet Photonics
1347000546 K-Solutions Inc.
1348000547 Starent Networks
1349000548 Disco Corporation
1350000549 Salira Optical Network Systems
135100054A Ario Data Networks, Inc.
135200054B Micro Innovation AG
135300054C RF Innovations Pty Ltd
135400054D Brans Technologies, Inc.
135500054E Philips Components
1356000550 Digi-Tech Communications Limited
1357000551 F & S Elektronik Systeme GmbH
1358000552 Xycotec Computer GmbH
1359000553 DVC Company, Inc.
1360000554 Rangestar Wireless
1361000555 Japan Cash Machine Co., Ltd.
1362000556 360 Systems
1363000557 Agile TV Corporation
1364000558 Synchronous, Inc.
1365000559 Intracom S.A.
136600055A Power Dsine Ltd.
136700055B Charles Industries, Ltd.
136800055C Kowa Company, Ltd.
136900055D D-Link Systems, Inc.
137000055E Cisco Systems, Inc.
137100055F Cisco Systems, Inc.
1372000560 LEADER COMM.CO., LTD
1373000561 nac Image Technology, Inc.
1374000562 Digital View Limited
1375000563 J-Works, Inc.
1376000564 Tsinghua Bitway Co., Ltd.
1377000565 Tailyn Communication Company Ltd.
1378000566 Secui.com Corporation
1379000567 Etymonic Design, Inc.
1380000568 Piltofish Networks AB
1381000569 VMWARE, Inc.
138200056A Heuft Systemtechnik GmbH
138300056B C.P. Technology Co., Ltd.
138400056C Hung Chang Co., Ltd.
138500056D Pacific Corporation
138600056E National Enhance Technology, Inc.
138700056F Innomedia Technologies Pvt. Ltd.
1388000570 Baydel Ltd.
1389000571 Seiwa Electronics Co.
1390000572 Deonet Co., Ltd.
1391000573 Cisco Systems, Inc.
1392000574 Cisco Systems, Inc.
1393000575 CDS-Electronics BV
1394000576 NSM Technology Ltd.
1395000577 SM Information & Communication
1396000579 Universal Control Solution Corp.
139700057A Hatteras Networks
139800057B Chung Nam Electronic Co., Ltd.
139900057C RCO Security AB
140000057D Sun Communications, Inc.
140100057E Eckelmann Steuerungstechnik GmbH
140200057F Acqis Technology
1403000580 Fibrolan Ltd.
1404000581 Snell & Wilcox Ltd.
1405000582 ClearCube Technology
1406000583 ImageCom Limited
1407000584 AbsoluteValue Systems, Inc.
1408000585 Juniper Networks, Inc.
1409000586 Lucent Technologies
1410000587 Locus, Incorporated
1411000588 Sensoria Corp.
1412000589 National Datacomputer
141300058A Netcom Co., Ltd.
141400058B IPmental, Inc.
141500058C Opentech Inc.
141600058D Lynx Photonic Networks, Inc.
141700058E Flextronics International GmbH & Co. Nfg. KG
141800058F CLCsoft co.
1419000590 Swissvoice Ltd.
1420000591 Active Silicon Ltd.
1421000592 Pultek Corp.
1422000593 Grammar Engine Inc.
1423000594 IXXAT Automation GmbH
1424000595 Alesis Corporation
1425000596 Genotech Co., Ltd.
1426000597 Eagle Traffic Control Systems
1427000598 CRONOS S.r.l.
1428000599 DRS Test and Energy Management or DRS-TEM
142900059A Cisco Systems, Inc.
143000059B Cisco Systems, Inc.
143100059C Kleinknecht GmbH, Ing. Buero
143200059D Daniel Computing Systems, Inc.
143300059E Zinwell Corporation
143400059F Yotta Networks, Inc.
14350005A0 MOBILINE Kft.
14360005A1 Zenocom
14370005A2 CELOX Networks
14380005A3 QEI, Inc.
14390005A4 Lucid Voice Ltd.
14400005A5 KOTT
14410005A6 Extron Electronics
14420005A7 Hyperchip, Inc.
14430005A8 WYLE ELECTRONICS
14440005A9 Princeton Networks, Inc.
14450005AA Moore Industries International Inc.
14460005AB Cyber Fone, Inc.
14470005AC Northern Digital, Inc.
14480005AD Topspin Communications, Inc.
14490005AE Mediaport USA
14500005AF InnoScan Computing A/S
14510005B0 Korea Computer Technology Co., Ltd.
14520005B1 ASB Technology BV
14530005B2 Medison Co., Ltd.
14540005B3 Asahi-Engineering Co., Ltd.
14550005B4 Aceex Corporation
14560005B5 Broadcom Technologies
14570005B6 INSYS Microelectronics GmbH
14580005B7 Arbor Technology Corp.
14590005B8 Electronic Design Associates, Inc.
14600005B9 Airvana, Inc.
14610005BA Area Netwoeks, Inc.
14620005BB Myspace AB
14630005BC Resorsys Ltd.
14640005BD ROAX BV
14650005BE Kongsberg Seatex AS
14660005BF JustEzy Technology, Inc.
14670005C0 Digital Network Alacarte Co., Ltd.
14680005C1 A-Kyung Motion, Inc.
14690005C2 Soronti, Inc.
14700005C3 Pacific Instruments, Inc.
14710005C4 Telect, Inc.
14720005C5 Flaga HF
14730005C6 Triz Communications
14740005C7 I/F-COM A/S
14750005C8 VERYTECH
14760005C9 LG Innotek
14770005CA Hitron Technology, Inc.
14780005CB ROIS Technologies, Inc.
14790005CC Sumtel Communications, Inc.
14800005CD Denon, Ltd.
14810005CE Prolink Microsystems Corporation
14820005CF Thunder River Technologies, Inc.
14830005D0 Solinet Systems
14840005D1 Metavector Technologies
14850005D2 DAP Technologies
14860005D3 eProduction Solutions, Inc.
14870005D4 FutureSmart Networks, Inc.
14880005D5 Speedcom Wireless
14890005D6 Titan Wireless
14900005D7 Vista Imaging, Inc.
14910005D8 Arescom, Inc.
14920005D9 Techno Valley, Inc.
14930005DA Apex Automationstechnik
14940005DB Nentec GmbH
14950005DC Cisco Systems, Inc.
14960005DD Cisco Systems, Inc.
14970005DE Gi Fone Korea, Inc.
14980005DF Electronic Innovation, Inc.
14990005E0 Empirix Corp.
15000005E1 Trellis Photonics, Ltd.
15010005E2 Creativ Network Technologies
15020005E3 LightSand Communications, Inc.
15030005E4 Red Lion Controls L.P.
15040005E5 Renishaw PLC
15050005E6 Egenera, Inc.
15060005E7 Netrake Corp.
15070005E8 TurboWave, Inc.
15080005E9 Unicess Network, Inc.
15090005EA Rednix
15100005EB Blue Ridge Networks, Inc.
15110005EC Mosaic Systems Inc.
15120005ED Technikum Joanneum GmbH
15130005EE BEWATOR Group
15140005EF ADOIR Digital Technology
15150005F0 SATEC
15160005F1 Vrcom, Inc.
15170005F2 Power R, Inc.
15180005F3 Weboyn
15190005F4 System Base Co., Ltd.
15200005F5 OYO Geospace Corp.
15210005F6 Young Chang Co. Ltd.
15220005F7 Analog Devices, Inc.
15230005F8 Real Time Access, Inc.
15240005F9 TOA Corporation
15250005FA IPOptical, Inc.
15260005FB ShareGate, Inc.
15270005FC Schenck Pegasus Corp.
15280005FD PacketLight Networks Ltd.
15290005FE Traficon N.V.
15300005FF SNS Solutions, Inc.
1531000600 Tokyo Electronic Industry Co., Ltd.
1532000601 Otanikeiki Co., Ltd.
1533000602 Cirkitech Electronics Co.
1534000603 Baker Hughes Inc.
1535000604 @Track Communications, Inc.
1536000605 Inncom International, Inc.
1537000606 RapidWAN, Inc.
1538000607 Omni Directional Control Technology Inc.
1539000608 At-Sky SAS
1540000609 Crossport Systems
154100060A Blue2space
154200060B Paceline Systems Corporation
154300060C Melco Industries, Inc.
154400060D Wave7 Optics
154500060E IGYS Systems, Inc.
154600060F Narad Networks Inc
1547000610 Abeona Networks Inc
1548000611 Zeus Wireless, Inc.
1549000612 Accusys, Inc.
1550000613 Kawasaki Microelectronics Incorporated
1551000614 Prism Holdings
1552000615 Kimoto Electric Co., Ltd.
1553000616 Tel Net Co., Ltd.
1554000617 Redswitch Inc.
1555000618 DigiPower Manufacturing Inc.
1556000619 Connection Technology Systems
155700061A Zetari Inc.
155800061B Portable Systems, IBM Japan Co, Ltd
155900061C Hoshino Metal Industries, Ltd.
156000061D MIP Telecom, Inc.
156100061E Maxan Systems
156200061F Vision Components GmbH
1563000620 Serial System Ltd.
1564000621 Hinox, Co., Ltd.
1565000622 Chung Fu Chen Yeh Enterprise Corp.
1566000623 MGE UPS Systems France
1567000624 Gentner Communications Corp.
1568000625 The Linksys Group, Inc.
1569000626 MWE GmbH
1570000627 Uniwide Technologies, Inc.
1571000628 Cisco Systems, Inc.
1572000629 IBM CORPORATION
157300062A Cisco Systems, Inc.
157400062B INTRASERVER TECHNOLOGY
157500062C Network Robots, Inc.
157600062D TouchStar Technologies, L.L.C.
157700062E Aristos Logic Corp.
157800062F Pivotech Systems Inc.
1579000630 Adtranz Sweden
1580000631 Optical Solutions, Inc.
1581000632 Mesco Engineering GmbH
1582000633 Heimann Biometric Systems GmbH
1583000634 GTE Airfone Inc.
1584000635 PacketAir Networks, Inc.
1585000636 Jedai Broadband Networks
1586000637 Toptrend-Meta Information (ShenZhen) Inc.
1587000638 Sungjin C&C Co., Ltd.
1588000639 Newtec
158900063A Dura Micro, Inc.
159000063B Arcturus Networks, Inc.
159100063C NMI Electronics Ltd
159200063D Microwave Data Systems Inc.
159300063E Opthos Inc.
159400063F Everex Communications Inc.
1595000640 White Rock Networks
1596000641 ITCN
1597000642 Genetel Systems Inc.
1598000643 SONO Computer Co., Ltd.
1599000644 NEIX Inc.
1600000645 Meisei Electric Co. Ltd.
1601000646 ShenZhen XunBao Network Technology Co Ltd
1602000647 Etrali S.A.
1603000648 Seedsware, Inc.
1604000649 Quante
160500064A Honeywell Co., Ltd. (KOREA)
160600064B Alexon Co., Ltd.
160700064C Invicta Networks, Inc.
160800064D Sencore
160900064E Broad Net Technology Inc.
161000064F PRO-NETS Technology Corporation
1611000650 Tiburon Networks, Inc.
1612000651 Aspen Networks Inc.
1613000652 Cisco Systems, Inc.
1614000653 Cisco Systems, Inc.
1615000654 Maxxio Technologies
1616000655 Yipee, Inc.
1617000656 Tactel AB
1618000657 Market Central, Inc.
1619000658 Helmut Fischer GmbH & Co. KG
1620000659 EAL (Apeldoorn) B.V.
162100065A Strix Systems
162200065B Dell Computer Corp.
162300065C Malachite Technologies, Inc.
162400065D Heidelberg Web Systems
162500065E Photuris, Inc.
162600065F ECI Telecom - NGTS Ltd.
1627000660 NADEX Co., Ltd.
1628000661 NIA Home Technologies Corp.
1629000662 MBM Technology Ltd.
1630000663 Human Technology Co., Ltd.
1631000664 Fostex Corporation
1632000665 Sunny Giken, Inc.
1633000666 Roving Networks
1634000667 Tripp Lite
1635000668 Vicon Industries Inc.
1636000669 Datasound Laboratories Ltd
163700066A InfiniCon Systems, Inc.
163800066B Sysmex Corporation
163900066C Robinson Corporation
164000066D Compuprint S.P.A.
164100066E Delta Electronics, Inc.
164200066F Korea Data Systems
1643000670 Upponetti Oy
1644000671 Softing AG
1645000672 Netezza
1646000673 Optelecom, Inc.
1647000674 Spectrum Control, Inc.
1648000675 Banderacom, Inc.
1649000676 Novra Technologies Inc.
1650000677 SICK AG
1651000678 Marantz Japan, Inc.
1652000679 Konami Corporation
165300067A JMP Systems
165400067B Toplink C&C Corporation
165500067C CISCO SYSTEMS, INC.
165600067D Takasago Ltd.
165700067E WinCom Systems, Inc.
165800067F Rearden Steel Technologies
1659000680 Card Access, Inc.
1660000681 Goepel Electronic GmbH
1661000682 Convedia
1662000683 Bravara Communications, Inc.
1663000684 Biacore AB
1664000685 NetNearU Corporation
1665000686 ZARDCOM Co., Ltd.
1666000687 Omnitron Systems Technology, Inc.
1667000688 Telways Communication Co., Ltd.
1668000689 yLez Technologies Pte Ltd
166900068A NeuronNet Co. Ltd. R&D Center
167000068B AirRunner Technologies, Inc.
167100068C 3Com Corporation
167200068D SANgate Systems
167300068E HID Corporation
167400068F Telemonitor, Inc.
1675000690 Euracom Communication GmbH
1676000691 PT Inovacao
1677000692 Intruvert Networks, Inc.
1678000693 Flexus Computer Technology, Inc.
1679000694 Mobillian Corporation
1680000695 Ensure Technologies, Inc.
1681000696 Advent Networks
1682000697 R & D Center
1683000698 egnite Software GmbH
1684000699 Vida Design Co.
168500069A e & Tel
168600069B AVT Audio Video Technologies GmbH
168700069C Transmode Systems AB
168800069D Petards Mobile Intelligence
168900069E UNIQA, Inc.
169000069F Kuokoa Networks
16910006A0 Mx Imaging
16920006A1 Celsian Technologies, Inc.
16930006A2 Microtune, Inc.
16940006A3 Bitran Corporation
16950006A4 INNOWELL Corp.
16960006A5 PINON Corp.
16970006A6 Artistic Licence (UK) Ltd
16980006A7 Primarion
16990006A8 KC Technology, Inc.
17000006A9 Universal Instruments Corp.
17010006AA Miltope Corporation
17020006AB W-Link Systems, Inc.
17030006AC Intersoft Co.
17040006AD KB Electronics Ltd.
17050006AE Himachal Futuristic Communications Ltd
17060006B0 Comtech EF Data Corp.
17070006B1 Sonicwall
17080006B2 Linxtek Co.
17090006B3 Diagraph Corporation
17100006B4 Vorne Industries, Inc.
17110006B5 Luminent, Inc.
17120006B6 Nir-Or Israel Ltd.
17130006B7 TELEM GmbH
17140006B8 Bandspeed Pty Ltd
17150006B9 A5TEK Corp.
17160006BA Westwave Communications
17170006BB ATI Technologies Inc.
17180006BC Macrolink, Inc.
17190006BD BNTECHNOLOGY Co., Ltd.
17200006BE Baumer Optronic GmbH
17210006BF Accella Technologies Co., Ltd.
17220006C0 United Internetworks, Inc.
17230006C1 CISCO SYSTEMS, INC.
17240006C2 Smartmatic Corporation
17250006C3 Schindler Elevators Ltd.
17260006C4 Piolink Inc.
17270006C5 INNOVI Technologies Limited
17280006C6 lesswire AG
17290006C7 RFNET Technologies Pte Ltd (S)
17300006C8 Sumitomo Metal Micro Devices, Inc.
17310006C9 Technical Marketing Research, Inc.
17320006CA American Computer & Digital Components, Inc. (ACDC)
17330006CB Jotron Electronics A/S
17340006CC JMI Electronics Co., Ltd.
17350006CD CreoScitex Corporation Ltd.
17360006CE DATENO
17370006CF Thales Avionics In-Flight Systems, LLC
17380006D0 Elgar Electronics Corp.
17390006D1 Tahoe Networks, Inc.
17400006D2 Tundra Semiconductor Corp.
17410006D3 Alpha Telecom, Inc. U.S.A.
17420006D4 Interactive Objects, Inc.
17430006D5 Diamond Systems Corp.
17440006D6 Cisco Systems, Inc.
17450006D7 Cisco Systems, Inc.
17460006D8 Maple Optical Systems
17470006D9 IPM-Net S.p.A.
17480006DA ITRAN Communications Ltd.
17490006DB ICHIPS Co., Ltd.
17500006DC Syabas Technology (Amquest)
17510006DD AT & T Laboratories - Cambridge Ltd
17520006DE Flash Technology
17530006DF AIDONIC Corporation
17540006E0 MAT Co., Ltd.
17550006E1 Techno Trade s.a
17560006E2 Ceemax Technology Co., Ltd.
17570006E3 Quantitative Imaging Corporation
17580006E4 Citel Technologies Ltd.
17590006E5 Fujian Newland Computer Ltd. Co.
17600006E6 DongYang Telecom Co., Ltd.
17610006E7 Bit Blitz Communications Inc.
17620006E8 Optical Network Testing, Inc.
17630006E9 Intime Corp.
17640006EA ELZET80 Mikrocomputer GmbH&Co. KG
17650006EB Global Data
17660006EC M/A COM Private Radio System Inc.
17670006ED Inara Networks
17680006EE Shenyang Neu-era Information & Technology Stock Co., Ltd
17690006EF Maxxan Systems, Inc.
17700006F0 Digeo, Inc.
17710006F1 Optillion
17720006F2 Platys Communications
17730006F3 AcceLight Networks
17740006F4 Prime Electronics & Satellitics Inc.
17750006F9 Mitsui Zosen Systems Research Inc.
17760006FA IP SQUARE Co, Ltd.
17770006FB Hitachi Printing Solutions, Ltd.
17780006FC Fnet Co., Ltd.
17790006FD Comjet Information Systems Corp.
17800006FE Celion Networks, Inc.
17810006FF Sheba Systems Co., Ltd.
1782000700 Zettamedia Korea
1783000701 RACAL-DATACOM
1784000702 Varian Medical Systems
1785000703 CSEE Transport
1786000705 Endress & Hauser GmbH & Co
1787000706 Sanritz Corporation
1788000707 Interalia Inc.
1789000708 Bitrage Inc.
1790000709 Westerstrand Urfabrik AB
179100070A Unicom Automation Co., Ltd.
179200070B Octal, SA
179300070C SVA-Intrusion.com Co. Ltd.
179400070D Cisco Systems Inc.
179500070E Cisco Systems Inc.
179600070F Fujant, Inc.
1797000710 Adax, Inc.
1798000711 Acterna
1799000712 JAL Information Technology
1800000713 IP One, Inc.
1801000714 Brightcom
1802000715 General Research of Electronics, Inc.
1803000716 J & S Marine Ltd.
1804000717 Wieland Electric GmbH
1805000718 iCanTek Co., Ltd.
1806000719 Mobiis Co., Ltd.
180700071A Finedigital Inc.
180800071B Position Technology Inc.
180900071C AT&T Fixed Wireless Services
181000071D Satelsa Sistemas Y Aplicaciones De Telecomunicaciones, S.A.
181100071E Tri-M Engineering / Nupak Dev. Corp.
181200071F European Systems Integration
1813000720 Trutzschler GmbH & Co. KG
1814000721 Formac Elektronik GmbH
1815000722 Nielsen Media Research
1816000723 ELCON Systemtechnik GmbH
1817000724 Telemax Co., Ltd.
1818000725 Bematech International Corp.
1819000727 Zi Corporation (HK) Ltd.
1820000728 Neo Telecom
1821000729 Kistler Instrumente AG
182200072A Innovance Networks
182300072B Jung Myung Telecom Co., Ltd.
182400072C Fabricom
182500072D CNSystems
182600072E North Node AB
182700072F Instransa, Inc.
1828000730 Hutchison OPTEL Telecom Technology Co., Ltd.
1829000731 Spiricon, Inc.
1830000732 AAEON Technology Inc.
1831000733 DANCONTROL Engineering
1832000734 ONStor, Inc.
1833000735 Flarion Technologies, Inc.
1834000736 Data Video Technologies Co., Ltd.
1835000737 Soriya Co. Ltd.
1836000738 Young Technology Co., Ltd.
1837000739 Motion Media Technology Ltd.
183800073A Inventel Systemes
183900073B Tenovis GmbH & Co KG
184000073C Telecom Design
184100073D Nanjing Postel Telecommunications Co., Ltd.
184200073E China Great-Wall Computer Shenzhen Co., Ltd.
184300073F Woojyun Systec Co., Ltd.
1844000740 Melco Inc.
1845000741 Sierra Automated Systems
1846000742 Current Technologies
1847000743 Chelsio Communications
1848000744 Unico, Inc.
1849000745 Radlan Computer Communications Ltd.
1850000746 Interlink BT, LLC
1851000747 Mecalc
1852000748 The Imaging Source Europe
1853000749 CENiX Inc.
185400074A Carl Valentin GmbH
185500074B Daihen Corporation
185600074C Beicom Inc.
185700074D Zebra Technologies Corp.
185800074E Naughty boy co., Ltd.
185900074F Cisco Systems, Inc.
1860000750 Cisco Systems, Inc.
1861000751 m.u.t. - GmbH
1862000752 Rhythm Watch Co., Ltd.
1863000753 Beijing Qxcomm Technology Co., Ltd.
1864000754 Xyterra Computing, Inc.
1865000755 Lafon SA
1866000756 Juyoung Telecom
1867000757 Topcall International AG
1868000758 Dragonwave
1869000759 Boris Manufacturing Corp.
187000075A Air Products and Chemicals, Inc.
187100075B Gibson Guitars
187200075C ENCAD, Inc.
187300075D Celleritas Inc.
187400075E Pulsar Technologies, Inc.
187500075F VCS Video Communication Systems AG
1876000760 TOMIS Information & Telecom Corp.
1877000761 Logitech SA
1878000762 Group Sense Limited
1879000763 Sunniwell Cyber Tech. Co., Ltd.
1880000764 YoungWoo Telecom Co. Ltd.
1881000765 Jade Quantum Technologies, Inc.
1882000766 Chou Chin Industrial Co., Ltd.
1883000767 Yuxing Electronics Company Limited
1884000768 Danfoss A/S
1885000769 Italiana Macchi SpA
188600076A NEXTEYE Co., Ltd.
188700076B Stralfors AB
188800076C Daehanet, Inc.
188900076D Flexlight Networks
189000076E Sinetica Corporation Ltd.
189100076F Synoptics Limited
1892000770 Locusnetworks Corporation
1893000771 Embedded System Corporation
1894000772 Alcatel Shanghai Bell Co., Ltd.
1895000773 Ascom Powerline Communications Ltd.
1896000774 GuangZhou Thinker Technology Co. Ltd.
1897000775 Valence Semiconductor, Inc.
1898000776 Federal APD
1899000777 Motah Ltd.
1900000778 GERSTEL GmbH & Co. KG
1901000779 Sungil Telecom Co., Ltd.
190200077A Infoware System Co., Ltd.
190300077B Millimetrix Broadband Networks
190400077C OnTime Networks
190500077E Elrest GmbH
190600077F J Communications Co., Ltd.
1907000780 Bluegiga Technologies OY
1908000781 Itron Inc.
1909000782 Nauticus Networks, Inc.
1910000783 SynCom Network, Inc.
1911000784 Cisco Systems Inc.
1912000785 Cisco Systems Inc.
1913000786 Wireless Networks Inc.
1914000787 Idea System Co., Ltd.
1915000788 Clipcomm, Inc.
1916000789 Eastel Systems Corporation
191700078A Mentor Data System Inc.
191800078B Wegener Communications, Inc.
191900078C Elektronikspecialisten i Borlange AB
192000078D NetEngines Ltd.
192100078E Garz & Friche GmbH
192200078F Emkay Innovative Products
1923000790 Tri-M Technologies (s) Limited
1924000791 International Data Communications, Inc.
1925000792 Suetron Electronic GmbH
1926000794 Simple Devices, Inc.
1927000795 Elitegroup Computer System Co. (ECS)
1928000796 LSI Systems, Inc.
1929000797 Netpower Co., Ltd.
1930000798 Selea SRL
1931000799 Tipping Point Technologies, Inc.
193200079A SmartSight Networks Inc.
193300079B Aurora Networks
193400079C Golden Electronics Technology Co., Ltd.
193500079D Musashi Co., Ltd.
193600079E Ilinx Co., Ltd.
193700079F Action Digital Inc.
19380007A0 e-Watch Inc.
19390007A1 VIASYS Healthcare GmbH
19400007A2 Opteon Corporation
19410007A3 Ositis Software, Inc.
19420007A4 GN Netcom Ltd.
19430007A5 Y.D.K Co. Ltd.
19440007A6 Home Automation, Inc.
19450007A7 A-Z Inc.
19460007A8 Haier Group Technologies Ltd.
19470007A9 Novasonics
19480007AA Quantum Data Inc.
19490007AC Eolring
19500007AD Pentacon GmbH Foto-und Feinwerktechnik
19510007AE Layer N Networks
19520007AF N-Tron Corp.
19530007B0 Office Details, Inc.
19540007B1 Equator Technologies
19550007B2 Transaccess S.A.
19560007B3 Cisco Systems Inc.
19570007B4 Cisco Systems Inc.
19580007B5 Any One Wireless Ltd.
19590007B6 Telecom Technology Ltd.
19600007B7 Samurai Ind. Prods Eletronicos Ltda
19610007B8 American Predator Corp.
19620007B9 Ginganet Corporation
19630007BA Xebeo Communications, Inc.
19640007BB Candera Inc.
19650007BC Identix Inc.
19660007BD Radionet Ltd.
19670007BE DataLogic SpA
19680007BF Armillaire Technologies, Inc.
19690007C0 NetZerver Inc.
19700007C1 Overture Networks, Inc.
19710007C2 Netsys Telecom
19720007C3 Cirpack
19730007C4 JEAN Co. Ltd.
19740007C5 Gcom, Inc.
19750007C6 VDS Vosskuhler GmbH
19760007C7 Synectics Systems Limited
19770007C8 Brain21, Inc.
19780007C9 Technol Seven Co., Ltd.
19790007CA Creatix Polymedia Ges Fur Kommunikaitonssysteme
19800007CB Freebox SA
19810007CC Kaba Benzing GmbH
19820007CD NMTEL Co., Ltd.
19830007CE Cabletime Limited
19840007CF Anoto AB
19850007D0 Automat Engenharia de Automaoa Ltda.
19860007D1 Spectrum Signal Processing Inc.
19870007D2 Logopak Systeme
19880007D3 Stork Digital Imaging B.V.
19890007D4 Zhejiang Yutong Network Communication Co Ltd.
19900007D5 3e Technologies Int;., Inc.
19910007D6 Commil Ltd.
19920007D7 Caporis Networks AG
19930007D8 Hitron Systems Inc.
19940007D9 Splicecom
19950007DA Neuro Telecom Co., Ltd.
19960007DB Kirana Networks, Inc.
19970007DC Atek Co, Ltd.
19980007DD Cradle Technologies
19990007DE eCopilt AB
20000007DF Vbrick Systems Inc.
20010007E0 Palm Inc.
20020007E1 WIS Communications Co. Ltd.
20030007E2 Bitworks, Inc.
20040007E3 Navcom Technology, Inc.
20050007E4 SoftRadio Co., Ltd.
20060007E5 Coup Corporation
20070007E6 edgeflow Canada Inc.
20080007E7 FreeWave Technologies
20090007E8 St. Bernard Software
20100007E9 Intel Corporation
20110007EA Massana, Inc.
20120007EB Cisco Systems Inc.
20130007EC Cisco Systems Inc.
20140007ED Altera Corporation
20150007EE telco Informationssysteme GmbH
20160007EF Lockheed Martin Tactical Systems
20170007F0 LogiSync Corporation
20180007F1 TeraBurst Networks Inc.
20190007F2 IOA Corporation
20200007F3 Think Engine Networks
20210007F4 Eletex Co., Ltd.
20220007F5 Bridgeco Co AG
20230007F6 Qqest Software Systems
20240007F7 Galtronics
20250007F8 ITDevices, Inc.
20260007F9 Phonetics, Inc.
20270007FA ITT Co., Ltd.
20280007FB Giga Stream UMTS Technologies GmbH
20290007FC Adept Systems Inc.
20300007FD LANergy Ltd.
20310007FE Rigaku Corporation
20320007FF Gluon Networks
2033000800 MULTITECH SYSTEMS, INC.
2034000801 HighSpeed Surfing Inc.
2035000802 Compaq Computer Corporation
2036000803 Cos Tron
2037000804 ICA Inc.
2038000805 Techno-Holon Corporation
2039000806 Raonet Systems, Inc.
2040000807 Access Devices Limited
2041000808 PPT Vision, Inc.
2042000809 Systemonic AG
204300080A Espera-Werke GmbH
204400080B Birka BPA Informationssystem AB
204500080C VDA elettronica SrL
204600080D Toshiba
204700080E Motorola, BCS
204800080F Proximion Fiber Optics AB
2049000810 Key Technology, Inc.
2050000811 VOIX Corporation
2051000812 GM-2 Corporation
2052000813 Diskbank, Inc.
2053000814 TIL Technologies
2054000815 CATS Co., Ltd.
2055000816 Bluetags A/S
2056000817 EmergeCore Networks LLC
2057000818 Pixelworks, Inc.
2058000819 Banksys
205900081A Sanrad Intelligence Storage Communications (2000) Ltd.
206000081B Windigo Systems
206100081C @pos.com
206200081D Ipsil, Incorporated
206300081E Repeatit AB
206400081F Pou Yuen Tech Corp. Ltd.
2065000820 Cisco Systems Inc.
2066000821 Cisco Systems Inc.
2067000822 InPro Comm
2068000823 Texa Corp.
2069000824 Promatek Industries Ltd.
2070000825 Acme Packet
2071000826 Colorado Med Tech
2072000827 Pirelli Cables & Systems
2073000828 Koei Engineering Ltd.
2074000829 Aval Nagasaki Corporation
207500082A Powerwallz Network Security
207600082B Wooksung Electronics, Inc.
207700082C Homag AG
207800082D Indus Teqsite Private Limited
207900082E Multitone Electronics PLC
208000084E DivergeNet, Inc.
208100084F Qualstar Corporation
2082000850 Arizona Instrument Corp.
2083000851 Canadian Bank Note Company, Ltd.
2084000852 Davolink Co. Inc.
2085000853 Schleicher GmbH & Co. Relaiswerke KG
2086000854 Netronix, Inc.
2087000855 NASA-Goddard Space Flight Center
2088000856 Gamatronic Electronic Industries Ltd.
2089000857 Polaris Networks, Inc.
2090000858 Novatechnology Inc.
2091000859 ShenZhen Unitone Electronics Co., Ltd.
209200085A IntiGate Inc.
209300085B Hanbit Electronics Co., Ltd.
209400085C Shanghai Dare Technologies Co. Ltd.
209500085D Aastra
209600085E PCO AG
209700085F Picanol N.V.
2098000860 LodgeNet Entertainment Corp.
2099000861 SoftEnergy Co., Ltd.
2100000862 NEC Eluminant Technologies, Inc.
2101000863 Entrisphere Inc.
2102000864 Fasy S.p.A.
2103000865 JASCOM CO., LTD
2104000866 DSX Access Systems, Inc.
2105000867 Uptime Devices
2106000868 PurOptix
2107000869 Command-e Technology Co.,Ltd.
210800086A Industrie Technik IPS GmbH
210900086B MIPSYS
211000086C Plasmon LMS
211100086D Missouri FreeNet
211200086E Hyglo AB
211300086F Resources Computer Network Ltd.
2114000870 Rasvia Systems, Inc.
2115000871 NORTHDATA Co., Ltd.
2116000872 Sorenson Technologies, Inc.
2117000873 DAP Design B.V.
2118000874 Dell Computer Corp.
2119000875 Acorp Electronics Corp.
2120000876 SDSystem
2121000877 Liebert HIROSS S.p.A.
2122000878 Benchmark Storage Innovations
2123000879 CEM Corporation
212400087A Wipotec GmbH
212500087B RTX Telecom A/S
212600087C Cisco Systems, Inc.
212700087D Cisco Systems Inc.
212800087E Bon Electro-Telecom Inc.
212900087F SPAUN electronic GmbH & Co. KG
2130000880 BroadTel Canada Communications inc.
2131000881 DIGITAL HANDS CO.,LTD.
2132000882 SIGMA CORPORATION
2133000883 Hewlett-Packard Company
2134000884 Index Braille AB
2135000885 EMS Dr. Thomas Wuensche
2136000886 Hansung Teliann, Inc.
2137000887 Maschinenfabrik Reinhausen GmbH
2138000888 OULLIM Information Technology Inc,.
2139000889 Echostar Technologies Corp
214000088A Minds@Work
214100088B Tropic Networks Inc.
214200088C Quanta Network Systems Inc.
214300088D Sigma-Links Inc.
214400088E Nihon Computer Co., Ltd.
214500088F ADVANCED DIGITAL TECHNOLOGY
2146000890 AVILINKS SA
2147000891 Lyan Inc.
2148000892 EM Solutions
2149000894 InnoVISION Multimedia Ltd.
2150000895 DIRC Technologie GmbH & Co.KG
2151000896 Printronix, Inc.
2152000897 Quake Technologies
2153000898 Gigabit Optics Corporation
2154000899 Netbind, Inc.
215500089A Alcatel Microelectronics
215600089B ICP Electronics Inc.
215700089C Elecs Industry Co., Ltd.
215800089D UHD-Elektronik
215900089E Beijing Enter-Net co.LTD
216000089F EFM Networks
21610008A0 Stotz Feinmesstechnik GmbH
21620008A1 CNet Technology Inc.
21630008A2 ADI Engineering, Inc.
21640008A3 Cisco Systems
21650008A4 Cisco Systems
21660008A5 Peninsula Systems Inc.
21670008A6 Multiware & Image Co., Ltd.
21680008A7 iLogic Inc.
21690008A8 Systec Co., Ltd.
21700008A9 SangSang Technology, Inc.
21710008AA KARAM
21720008AB EnerLinx.com, Inc.
21730008AD Toyo-Linx Co., Ltd.
21740008AE Packetfront
21750008AF Novatec Corporation
21760008B0 BKtel communications GmbH
21770008B1 ProQuent Systems
21780008B2 SHENZHEN COMPASS TECHNOLOGY DEVELOPMENT CO.,LTD
21790008B3 Fastwel
21800008B4 SYSPOL
21810008B5 TAI GUEN ENTERPRISE CO., LTD
21820008B6 RouteFree, Inc.
21830008B7 HIT Incorporated
21840008B8 E.F. Johnson
21850008B9 KAON MEDIA Co., Ltd.
21860008BA Erskine Systems Ltd
21870008BB NetExcell
21880008BC Ilevo AB
21890008BD TEPG-US
21900008BE XENPAK MSA Group
21910008BF Aptus Elektronik AB
21920008C0 ASA SYSTEMS
21930008C1 Avistar Communications Corporation
21940008C2 Cisco Systems
21950008C3 Contex A/S
21960008C4 Hikari Co.,Ltd.
21970008C5 Liontech Co., Ltd.
21980008C6 Philips Consumer Communications
21990008C7 COMPAQ COMPUTER CORPORATION
22000008C8 Soneticom, Inc.
22010008C9 TechniSat Digital GmbH
22020008CA TwinHan Technology Co.,Ltd
22030008CB Zeta Broadband Inc.
22040008CC Remotec, Inc.
22050008CD With-Net Inc
22060008CF Nippon Koei Power Systems Co., Ltd.
22070008D0 Musashi Engineering Co., LTD.
22080008D1 KAREL INC.
22090008D2 ZOOM Networks Inc.
22100008D3 Hercules Technologies S.A.
22110008D4 IneoQuest Technologies, Inc
22120008D5 Vanguard Managed Solutions
22130008D6 HASSNET Inc.
22140008D7 HOW CORPORATION
22150008D8 Dowkey Microwave
22160008D9 Mitadenshi Co.,LTD
22170008DA SofaWare Technologies Ltd.
22180008DB Corrigent Systems
22190008DC Wiznet
22200008DD Telena Communications, Inc.
22210008DE 3UP Systems
22220008DF Alistel Inc.
22230008E0 ATO Technology Ltd.
22240008E1 Barix AG
22250008E2 Cisco Systems
22260008E3 Cisco Systems
22270008E4 Envenergy Inc
22280008E5 IDK Corporation
22290008E6 Littlefeet
22300008E7 SHI ControlSystems,Ltd.
22310008E8 Excel Master Ltd.
22320008E9 NextGig
22330008EA Motion Control Engineering, Inc
22340008EB ROMWin Co.,Ltd.
22350008EC Zonu, Inc.
22360008ED ST&T Instrument Corp.
22370008EE Logic Product Development
22380008EF DIBAL,S.A.
22390008F0 Next Generation Systems, Inc.
22400008F1 Voltaire
22410008F2 C&S Technology
22420008F3 WANY
22430008F4 Bluetake Technology Co., Ltd.
22440008F5 YESTECHNOLOGY Co.,Ltd.
22450008F6 SUMITOMO ELECTRIC HIGHTECHS.co.,ltd.
22460008F7 Hitachi Ltd, Semiconductor &amp; Integrated Circuits Gr
22470008F8 Guardall Ltd
22480008F9 Padcom, Inc.
22490008FA Karl E.Brinkmann GmbH
22500008FB SonoSite, Inc.
22510008FC Gigaphoton Inc.
22520008FD BlueKorea Co., Ltd.
22530008FE UNIK C&C Co.,Ltd.
22540008FF Trilogy Broadcast (Holdings) Ltd
2255000900 TMT
2256000901 Shenzhen Shixuntong Information & Technoligy Co
2257000902 Redline Communications Inc.
2258000903 Panasas, Inc
2259000904 MONDIAL electronic
2260000905 iTEC Technologies Ltd.
2261000906 Esteem Networks
2262000907 Chrysalis Development
2263000908 VTech Technology Corp.
2264000909 Telenor Connect A/S
226500090A SnedFar Technology Co., Ltd.
226600090B MTL Instruments PLC
226700090C Mayekawa Mfg. Co. Ltd.
226800090D LEADER ELECTRONICS CORP.
226900090E Helix Technology Inc.
227000090F Fortinet Inc.
2271000910 Simple Access Inc.
2272000911 Cisco Systems
2273000912 Cisco Systems
2274000914 COMPUTROLS INC.
2275000915 CAS Corp.
2276000916 Listman Home Technologies, Inc.
2277000917 WEM Technology Inc
2278000918 SAMSUNG TECHWIN CO.,LTD
2279000919 MDS Gateways
228000091A Macat Optics & Electronics Co., Ltd.
228100091B Digital Generation Inc.
228200091C CacheVision, Inc
228300091D Proteam Computer Corporation
228400091E Firstech Technology Corp.
228500091F A&amp;D Co., Ltd.
2286000920 EpoX COMPUTER CO.,LTD.
2287000921 Planmeca Oy
2288000922 Touchless Sensor Technology AG
2289000923 Heaman System Co., Ltd
2290000924 Telebau GmbH
2291000925 VSN Systemen BV
2292000926 YODA COMMUNICATIONS, INC.
2293000927 TOYOKEIKI CO.,LTD.
2294000928 Telecore Inc
2295000929 Sanyo Industries (UK) Limited
229600092A MYTECS Co.,Ltd.
229700092B iQstor Networks, Inc.
229800092C Hitpoint Inc.
229900092D High Tech Computer, Corp.
230000092E B&Tech System Inc.
230100092F Akom Technology Corporation
2302000930 AeroConcierge Inc.
2303000931 Future Internet, Inc.
2304000932 Omnilux
2305000933 OPTOVALLEY Co. Ltd.
2306000934 Dream-Multimedia-Tv GmbH
2307000935 Sandvine Incorporated
2308000936 Ipetronik GmbH & Co.KG
2309000937 Inventec Appliance Corp
2310000938 Allot Communications
2311000939 ShibaSoku Co.,Ltd.
231200093A Molex Fiber Optics
231300093B HYUNDAI NETWORKS INC.
231400093C Jacques Technologies P/L
231500093D Newisys,Inc.
231600093E C&I Technologies
231700093F Double-Win Enterpirse CO., LTD
2318000940 AGFEO GmbH & Co. KG
2319000941 Allied Telesis K.K.
2320000942 CRESCO, LTD.
2321000943 Cisco Systems
2322000944 Cisco Systems
2323000945 Palmmicro Communications Inc
2324000946 Cluster Labs GmbH
2325000947 Aztek, Inc.
2326000948 Vista Control Systems, Corp.
2327000949 Glyph Technologies Inc.
232800094A Homenet Communications
232900094B FillFactory NV
233000094C Communication Weaver Co.,Ltd.
233100094D Braintree Communications Pty Ltd
233200094E BARTECH SYSTEMS INTERNATIONAL, INC
233300094F elmegt GmbH & Co. KG
2334000950 Independent Storage Corporation
2335000951 Apogee Instruments, Inc
2336000952 Auerswald GmbH & Co. KG
2337000953 Linkage System Integration Co.Ltd.
2338000954 AMiT spol. s. r. o.
2339000955 Young Generation International Corp.
2340000956 Network Systems Group, Ltd. (NSG)
2341000957 Supercaller, Inc.
2342000958 INTELNET S.A.
2343000959 Sitecsoft
234400095A RACEWOOD TECHNOLOGY
234500095B Netgear, Inc.
234600095C Philips Medical Systems - Cardiac and Monitoring Systems (CM
234700095D Dialogue Technology Corp.
234800095E Masstech Group Inc.
234900095F Telebyte, Inc.
2350000960 YOZAN Inc.
2351000961 Switchgear and Instrumentation Ltd
2352000962 Filetrac AS
2353000963 Dominion Lasercom Inc.
2354000964 Hi-Techniques
2355000966 Thales Navigation
2356000967 Tachyon, Inc
2357000968 TECHNOVENTURE, INC.
2358000969 Meret Optical Communications
235900096A Cloverleaf Communications Inc.
236000096B IBM Corporation
236100096C Imedia Semiconductor Corp.
236200096D Powernet Technologies Corp.
236300096E GIANT ELECTRONICS LTD.
236400096F Beijing Zhongqing Elegant Tech. Corp.,Limited
2365000970 Vibration Research Corporation
2366000971 Time Management, Inc.
2367000972 Securebase,Inc
2368000973 Lenten Technology Co., Ltd.
2369000974 Innopia Technologies, Inc.
2370000975 fSONA Communications Corporation
2371000976 Datasoft ISDN Systems GmbH
2372000977 Brunner Elektronik AG
2373000978 AIJI System Co., Ltd.
2374000979 Advanced Television Systems Committee, Inc.
237500097A Louis Design Labs.
237600097B Cisco Systems
237700097C Cisco Systems
237800097D SecWell Networks Oy
237900097E IMI TECHNOLOGY CO., LTD
238000097F Vsecure 2000 LTD.
2381000980 Power Zenith Inc.
2382000981 Newport Networks
2383000982 Loewe Opta GmbH
2384000983 Gvision Incorporated
2385000984 MyCasa Network Inc.
2386000985 Auto Telecom Company
2387000986 Metalink LTD.
2388000987 NISHI NIPPON ELECTRIC WIRE & CABLE CO.,LTD.
2389000988 Nudian Electron Co., Ltd.
2390000989 VividLogic Inc.
239100098A EqualLogic Inc
239200098B Entropic Communications, Inc.
239300098C Possio AB
239400098D DCT Ltd (Digital Communication Technologies Ltd)
239500098E ipcas GmbH
239600098F Cetacean Networks
2397000990 ACKSYS Communications & systems
2398000991 GE Fanuc Automation Manufacturing, Inc.
2399000992 InterEpoch Technology,INC.
2400000993 Visteon Corporation
2401000994 Cronyx Engineering
2402000995 Castle Technology Ltd
2403000996 RDI
2404000997 Nortel Networks
2405000998 Capinfo Company Limited
2406000999 CP GEORGES RENAULT
240700099A ELMO COMPANY, LIMITED
240800099B Western Telematic Inc.
240900099C Naval Research Laboratory
241000099D Haliplex Communications
241100099E Testech, Inc.
241200099F VIDEX INC.
24130009A0 Microtechno Corporation
24140009A1 Telewise Communications, Inc.
24150009A2 Interface Co., Ltd.
24160009A3 Leadfly Techologies Corp. Ltd.
24170009A4 HARTEC Corporation
24180009A5 HANSUNG ELETRONIC INDUSTRIES DEVELOPMENT CO., LTD
24190009A6 Ignis Optics, Inc.
24200009A7 Bang & Olufsen A/S
24210009A8 Eastmode Pte Ltd
24220009A9 Ikanos Communications
24230009AA Data Comm for Business, Inc.
24240009AB Netcontrol Oy
24250009AC LANVOICE
24260009AD HYUNDAI SYSCOMM, INC.
24270009AE OKANO ELECTRIC CO.,LTD
24280009AF e-generis
24290009B0 Onkyo Corporation
24300009B1 Kanematsu Electronics, Ltd.
24310009B2 L&F Inc.
24320009B3 MCM Systems Ltd
24330009B4 KISAN TELECOM CO., LTD.
24340009B5 3J Tech. Co., Ltd.
24350009B6 Cisco Systems
24360009B7 Cisco Systems
24370009B8 Entise Systems
24380009B9 Action Imaging Solutions
24390009BA MAKU Informationstechik GmbH
24400009BB MathStar, Inc.
24410009BC Digital Safety Technologies Inc.
24420009BD Epygi Technologies, Ltd.
24430009BE Mamiya-OP Co.,Ltd.
24440009BF Nintendo Co.,Ltd.
24450009C0 6WIND
24460009C1 PROCES-DATA A/S
24470009C3 NETAS
24480009C4 Medicore Co., Ltd
24490009C5 KINGENE Technology Corporation
24500009C6 Visionics Corporation
24510009C7 Movistec
24520009C8 SINAGAWA TSUSHIN KEISOU SERVICE
24530009C9 BlueWINC Co., Ltd.
24540009CA iMaxNetworks(Shenzhen)Limited.
24550009CB HBrain
24560009CC Moog GmbH
24570009CD HUDSON SOFT CO.,LTD.
24580009CE SpaceBridge Semiconductor Corp.
24590009CF iAd GmbH
24600009D0 Versatel Networks
24610009D1 SERANOA NETWORKS INC
24620009D2 Mai Logic Inc.
24630009D3 Western DataCom Co., Inc.
24640009D4 Transtech Networks
24650009D5 Signal Communication, Inc.
24660009D6 KNC One GmbH
24670009D7 DC Security Products
24680009D9 Neoscale Systems, Inc
24690009DA Control Module Inc.
24700009DB eSpace
24710009DC Galaxis Technology AG
24720009DD Mavin Technology Inc.
24730009DE Samjin Information & Communications Co., Ltd.
24740009DF Vestel Komunikasyon Sanayi ve Ticaret A.S.
24750009E0 XEMICS S.A.
24760009E1 Gemtek Technology Co., Ltd.
24770009E2 Sinbon Electronics Co., Ltd.
24780009E3 Angel Iglesias S.A.
24790009E4 K Tech Infosystem Inc.
24800009E5 Hottinger Baldwin Messtechnik GmbH
24810009E6 Cyber Switching Inc.
24820009E7 ADC Techonology
24830009E8 Cisco Systems
24840009E9 Cisco Systems
24850009EA YEM Inc.
24860009EB HuMANDATA LTD.
24870009EC Daktronics, Inc.
24880009ED CipherOptics
24890009EE MEIKYO ELECTRIC CO.,LTD
24900009EF Vocera Communications
24910009F0 Shimizu Technology Inc.
24920009F1 Yamaki Electric Corporation
24930009F2 Cohu, Inc., Electronics Division
24940009F3 WELL Communication Corp.
24950009F4 Alcon Laboratories, Inc.
24960009F5 Emerson Network Power Co.,Ltd
24970009F6 Shenzhen Eastern Digital Tech Ltd.
24980009F7 SED, a division of Calian
24990009F8 UNIMO TECHNOLOGY CO., LTD.
25000009F9 ART JAPAN CO., LTD.
25010009FB Philips Medizinsysteme Boeblingen GmbH
25020009FC IPFLEX Inc.
25030009FD Ubinetics Limited
25040009FE Daisy Technologies, Inc.
25050009FF X.net 2000 GmbH
2506000A00 Mediatek Corp.
2507000A01 SOHOware, Inc.
2508000A02 ANNSO CO., LTD.
2509000A03 ENDESA SERVICIOS, S.L.
2510000A04 3Com Europe Ltd
2511000A05 Widax Corp.
2512000A06 Teledex LLC
2513000A07 WebWayOne Ltd
2514000A08 ALPINE ELECTRONICS, INC.
2515000A09 TaraCom Integrated Products, Inc.
2516000A0A SUNIX Co., Ltd.
2517000A0B Sealevel Systems, Inc.
2518000A0C Scientific Research Corporation
2519000A0D MergeOptics GmbH
2520000A0E Invivo Research Inc.
2521000A0F Ilryung Telesys, Inc
2522000A10 FAST media integrations AG
2523000A11 ExPet Technologies, Inc
2524000A12 Azylex Technology, Inc
2525000A13 Silent Witness
2526000A14 TECO a.s.
2527000A15 Silicon Data, Inc
2528000A16 Lassen Research
2529000A17 NESTAR COMMUNICATIONS, INC
2530000A18 Vichel Inc.
2531000A19 Valere Power, Inc.
2532000A1A Imerge Ltd
2533000A1B Stream Labs
2534000A1C Bridge Information Co., Ltd.
2535000A1D Optical Communications Products Inc.
2536000A1E Red-M (Communications) Limited
2537000A1F ART WARE Telecommunication Co., Ltd.
2538000A20 SVA Networks, Inc.
2539000A21 Integra Telecom Co. Ltd
2540000A22 Amperion Inc
2541000A23 Parama Networks Inc
2542000A24 Octave Communications
2543000A25 CERAGON NETWORKS
2544000A26 CEIA S.p.A.
2545000A27 Apple Computer, Inc.
2546000A28 Motorola
2547000A29 Pan Dacom Networking AG
2548000A2A QSI Systems Inc.
2549000A2B Etherstuff
2550000A2C Active Tchnology Corporation
2551000A2E MAPLE NETWORKS CO., LTD
2552000A2F Artnix Inc.
2553000A30 Johnson Controls-ASG
2554000A31 HCV Wireless
2555000A32 Xsido Corporation
2556000A33 Sierra Logic, Inc.
2557000A34 Identicard Systems Incorporated
2558000A35 Xilinx
2559000A36 Synelec Telecom Multimedia
2560000A37 Procera Networks, Inc.
2561000A38 Netlock Technologies, Inc.
2562000A39 LoPA Information Technology
2563000A3A J-THREE INTERNATIONAL Holding Co., Ltd.
2564000A3B GCT Semiconductor, Inc
2565000A3C Enerpoint Ltd.
2566000A3D Elo Sistemas Eletronicos S.A.
2567000A3E EADS Telecom
2568000A3F Data East Corporation
2569000A40 Crown Audio
2570000A41 Cisco Systems
2571000A42 Cisco Systems
2572000A43 Chunghwa Telecom Co., Ltd.
2573000A44 Avery Dennison Deutschland GmbH
2574000A45 Audio-Technica Corp.
2575000A46 ARO Controls SAS
2576000A47 Allied Vision Technologies
2577000A48 Albatron Technology
2578000A49 Acopia Networks
2579000A4A Targa Systems Ltd.
2580000A4B DataPower Technology, Inc.
2581000A4C Molecular Devices Corporation
2582000A4D Noritz Corporation
2583000A4E UNITEK Electronics INC.
2584000A4F Brain Boxes Limited
2585000A50 REMOTEK CORPORATION
2586000A51 GyroSignal Technology Co., Ltd.
2587000A52 Venitek Co. Ltd.
2588000A53 Intronics, Incorporated
2589000A54 Laguna Hills, Inc.
2590000A55 MARKEM Corporation
2591000A56 HITACHI Maxell Ltd.
2592000A57 Hewlett-Packard Company - Standards
2593000A58 Ingenieur-Buero Freyer & Siegel
2594000A59 HW server
2595000A5A GreenNET Technologies Co.,Ltd.
2596000A5B Power-One as
2597000A5C Carel s.p.a.
2598000A5D PUC Founder (MSC) Berhad
2599000A5E 3COM Corporation
2600000A5F almedio inc.
2601000A60 Autostar Technology Pte Ltd
2602000A61 Cellinx Systems Inc.
2603000A62 Crinis Networks, Inc.
2604000A63 DHD GmbH
2605000A64 Eracom Technologies
2606000A65 GentechMedia.co.,ltd.
2607000A66 MITSUBISHI ELECTRIC SYSTEM & SERVICE CO.,LTD.
2608000A67 OngCorp
2609000A68 SolarFlare Communications, Inc.
2610000A69 SUNNY bell Technology Co., Ltd.
2611000A6A SVM Microwaves s.r.o.
2612000A6B Tadiran Telecom Business Systems LTD
2613000A6C Walchem Corporation
2614000A6D EKS Elektronikservice GmbH
2615000A6E Broadcast Technology Limited
2616000A6F ZyTera Technologies Inc.
2617000A70 MPLS Forum
2618000A71 Avrio Technologies, Inc
2619000A72 SimpleTech, Inc.
2620000A73 Scientific Atlanta
2621000A74 Manticom Networks Inc.
2622000A75 Cat Electronics
2623000A76 Beida Jade Bird Huaguang Technology Co.,Ltd
2624000A77 Bluewire Technologies LLC
2625000A78 OLITEC
2626000A79 corega K.K.
2627000A7A Kyoritsu Electric Co., Ltd.
2628000A7B Cornelius Consult
2629000A7C Tecton Ltd
2630000A7D Valo, Inc.
2631000A7E The Advantage Group
2632000A7F Teradon Industries, Inc
2633000A80 Telkonet Inc.
2634000A81 TEIMA Audiotex S.L.
2635000A82 TATSUTA SYSTEM ELECTRONICS CO.,LTD.
2636000A83 SALTO SYSTEMS S.L.
2637000A84 Rainsun Enterprise Co., Ltd.
2638000A85 PLAT'C2,Inc
2639000A86 Lenze
2640000A87 Integrated Micromachines Inc.
2641000A88 InCypher S.A.
2642000A89 Creval Systems, Inc.
2643000A8A Cisco Systems
2644000A8B Cisco Systems
2645000A8C Guardware Systems Ltd.
2646000A8D EUROTHERM LIMITED
2647000A8E Invacom Ltd
2648000A8F Aska International Inc.
2649000A90 Bayside Interactive, Inc.
2650000A91 HemoCue AB
2651000A92 Presonus Corporation
2652000A93 W2 Networks, Inc.
2653000A94 ShangHai cellink CO., LTD
2654000A95 Apple Computer, Inc.
2655000A96 MEWTEL TECHNOLOGY INC.
2656000A97 SONICblue, Inc.
2657000A98 M+F Gwinner GmbH & Co
2658000A99 Dataradio Inc.
2659000A9A Aiptek International Inc
2660000A9B Towa Meccs Corporation
2661000A9C Server Technology, Inc.
2662000A9D King Young Technology Co. Ltd.
2663000A9E BroadWeb Corportation
2664000A9F Pannaway Technologies, Inc.
2665000AA0 Cedar Point Communications
2666000AA1 V V S Limited
2667000AA2 SYSTEK INC.
2668000AA3 SHIMAFUJI ELECTRIC CO.,LTD.
2669000AA4 SHANGHAI SURVEILLANCE TECHNOLOGY CO,LTD
2670000AA5 MAXLINK INDUSTRIES LIMITED
2671000AA6 Hochiki Corporation
2672000AA7 FEI Company
2673000AA8 ePipe Pty. Ltd.
2674000AA9 Brooks Automation GmbH
2675000AAA AltiGen Communications Inc.
2676000AAB TOYOTA MACS, INC.
2677000AAC TerraTec Electronic GmbH
2678000AAD Stargames Corporation
2679000AAE Rosemount Process Analytical
2680000AAF Pipal Systems
2681000AB0 LOYTEC electronics GmbH
2682000AB1 GENETEC Corporation
2683000AB2 Fresnel Wireless Systems
2684000AB3 Fa. GIRA
2685000AB4 ETIC Telecommunications
2686000AB5 Digital Electronic Network
2687000AB6 COMPUNETIX, INC
2688000AB7 Cisco Systems
2689000AB8 Cisco Systems
2690000AB9 Astera Technologies Corp.
2691000ABA Arcon Technology Limited
2692000ABB Taiwan Secom Co,. Ltd
2693000ABC Seabridge Ltd.
2694000ABD Rupprecht & Patashnick Co.
2695000ABE OPNET Technologies CO., LTD.
2696000ABF HIROTA SS
2697000AC0 Fuyoh Video Industry CO., LTD.
2698000AC1 Futuretel
2699000AC2 FiberHome Telecommunication Technologies CO.,LTD
2700000AC3 eM Technics Co., Ltd.
2701000AC4 Daewoo Teletech Co., Ltd
2702000AC5 Color Kinetics
2703000AC7 Unication Group
2704000AC8 ZPSYS CO.,LTD. (Planning&Management)
2705000AC9 Zambeel Inc
2706000ACA YOKOYAMA SHOKAI CO.,Ltd.
2707000ACB XPAK MSA Group
2708000ACC Winnow Networks, Inc.
2709000ACD Sunrich Technology Limited
2710000ACE RADIANTECH, INC.
2711000ACF PROVIDEO Multimedia Co. Ltd.
2712000AD0 Niigata Develoment Center, F.I.T. Co., Ltd.
2713000AD1 MWS
2714000AD2 JEPICO Corporation
2715000AD3 INITECH Co., Ltd
2716000AD4 CoreBell Systems Inc.
2717000AD5 Brainchild Electronic Co., Ltd.
2718000AD6 BeamReach Networks
2719000AD8 IPCserv Technology Corp.
2720000AD9 Sony Ericsson Mobile Communications AB
2721000ADB SkyPilot Network, Inc
2722000ADC RuggedCom Inc.
2723000ADD InSciTek Microsystems, Inc.
2724000ADE Happy Communication Co., Ltd.
2725000ADF Gennum Corporation
2726000AE0 Fujitsu Softek
2727000AE1 EG Technology
2728000AE2 Binatone Electronics International, Ltd
2729000AE3 YANG MEI TECHNOLOGY CO., LTD
2730000AE4 Wistron Corp.
2731000AE5 ScottCare Corporation
2732000AE6 Elitegroup Computer System Co. (ECS)
2733000AE7 ELIOP S.A.
2734000AE8 Cathay Roxus Information Technology Co. LTD
2735000AE9 AirVast Technology Inc.
2736000AEA ADAM ELEKTRONIK LTD.STI.
2737000AEB Shenzhen Tp-link Technology Co; Ltd.
2738000AEC Koatsu Gas Kogyo Co., Ltd.
2739000AED HARTING Vending G.m.b.H. & CO KG
2740000AEE GCD Hard- & Software GmbH
2741000AEF OTRUM ASA
2742000AF0 SHIN-OH ELECTRONICS CO., LTD. R&D
2743000AF1 Clarity Design, Inc.
2744000AF2 NeoAxiom Corp.
2745000AF3 Cisco Systems
2746000AF4 Cisco Systems
2747000AF5 Airgo Networks, Inc.
2748000AF6 Computer Process Controls
2749000AF7 Broadcom Corp.
2750000AF8 American Telecare Inc.
2751000AFA Traverse Technologies Australia
2752000AFB Ambri Limited
2753000AFC Core Tec Communications, LLC
2754000AFD Viking Electronic Services
2755000AFE NovaPal Ltd
2756000AFF Kilchherr Elektronik AG
2757000B00 FUJIAN START COMPUTER EQUIPMENT CO.,LTD
2758000B01 DAIICHI ELECTRONICS CO., LTD.
2759000B02 Dallmeier electronic
2760000B03 Taekwang Industrial Co., Ltd
2761000B04 Volktek Corporation
2762000B05 Pacific Broadband Networks
2763000B06 Motorola BCS
2764000B07 Voxpath Networks
2765000B08 Pillar Data Systems
2766000B09 Ifoundry Systems Singapore
2767000B0A dBm Optics
2768000B0B Corrent Corporation
2769000B0C Agile Systems Inc.
2770000B0D Air2U, Inc.
2771000B0E Trapeze Networks
2772000B0F Nyquist Industrial Control BV
2773000B10 11wave Technonlogy Co.,Ltd
2774000B11 HIMEJI ABC TRADING CO.,LTD.
2775000B13 ZETRON INC
2776000B14 ViewSonic Corporation
2777000B15 Platypus Technology
2778000B16 Communication Machinery Corporation
2779000B17 MKS Instruments
2780000B19 Vernier Networks, Inc.
2781000B1A Teltone Corporation
2782000B1B Systronix, Inc.
2783000B1D LayerZero Power Systems, Inc.
2784000B1E KAPPA opto-electronics GmbH
2785000B1F I CON Computer Co.
2786000B20 Hirata corporation
2787000B21 G-Star Communications Inc.
2788000B22 Environmental Systems and Services
2789000B23 Efficient Networks, Inc.
2790000B24 AirLogic
2791000B25 Aeluros
2792000B26 Wetek Corporation
2793000B27 Scion Corporation
2794000B28 Quatech Inc.
2795000B29 LG Industrial Systems Co.,Ltd.
2796000B2A HOWTEL Co., Ltd.
2797000B2B HOSTNET CORPORATION
2798000B2C Eiki Industrial Co. Ltd.
2799000B2D Danfoss Inc.
2800000B2E Cal-Comp Electronics (Thailand) Public Company Limited Taipe
2801000B2F bplan GmbH
2802000B30 Beijing Gongye Science & Technology Co.,Ltd
2803000B31 Yantai ZhiYang Scientific and technology industry CO., LTD
2804000B32 VORMETRIC, INC.
2805000B33 Vivato
2806000B34 ShangHai Broadband Technologies CO.LTD
2807000B35 Quad Bit System co., Ltd.
2808000B36 Productivity Systems, Inc.
2809000B37 MANUFACTURE DES MONTRES ROLEX SA
2810000B38 Knuerr AG
2811000B39 Keisoku Giken Co.,Ltd.
2812000B3A Fortel DTV, Inc.
2813000B3B devolo AG
2814000B3C Cygnal Integrated Products, Inc.
2815000B3D CONTAL OK Ltd.
2816000B3E BittWare, Inc
2817000B3F Anthology Solutions Inc.
2818000B40 OpNext Inc.
2819000B41 Ing. Buero Dr. Beutlhauser
2820000B42 commax Co., Ltd.
2821000B43 Microscan Systems, Inc.
2822000B44 Concord IDea Corp.
2823000B45 Cisco
2824000B46 Cisco
2825000B47 Advanced Energy
2826000B48 sofrel
2827000B49 RF-Link System Inc.
2828000B4A Visimetrics (UK) Ltd
2829000B4B VISIOWAVE SA
2830000B4C Clarion (M) Sdn Bhd
2831000B4D Emuzed
2832000B4E VertexRSI Antenna Products Division
2833000B4F Verifone, INC.
2834000B50 Oxygnet
2835000B51 Micetek International Inc.
2836000B52 JOYMAX ELECTRONICS CORP.
2837000B53 INITIUM Co., Ltd.
2838000B54 BiTMICRO Networks, Inc.
2839000B55 ADInstruments
2840000B56 Cybernetics
2841000B57 Silicon Laboratories
2842000B58 Astronautics C.A LTD
2843000B59 ScriptPro, LLC
2844000B5A HyperEdge
2845000B5B Rincon Research Corporation
2846000B5C Newtech Co.,Ltd
2847000B5D FUJITSU LIMITED
2848000B5E ATMAVA Ltd
2849000B5F Cisco Systems
2850000B60 Cisco Systems
2851000B61 Friedrich Lütze GmbH &Co.
2852000B62 Ingenieurbüro Ingo Mohnen
2853000B64 Kieback & Peter GmbH & Co KG
2854000B65 Sy.A.C. srl
2855000B66 Teralink Communications
2856000B67 Topview Technology Corporation
2857000B68 Addvalue Communications Pte Ltd
2858000B69 Franke Finland Oy
2859000B6A Asiarock Incorporation
2860000B6B Wistron Neweb Corp.
2861000B6C Sychip Inc.
2862000B6D SOLECTRON JAPAN NAKANIIDA
2863000B6E Neff Instrument Corp.
2864000B6F Media Streaming Networks Inc
2865000B70 Load Technology, Inc.
2866000B71 Litchfield Communications Inc.
2867000B72 Lawo AG
2868000B73 Kodeos Communications
2869000B74 Kingwave Technology Co., Ltd.
2870000B75 Iosoft Ltd.
2871000B76 ET&T Co. Ltd.
2872000B77 Cogent Systems, Inc.
2873000B78 TAIFATECH INC.
2874000B79 X-COM, Inc.
2875000B7B Test-Um Inc.
2876000B7C Telex Communications
2877000B7D SOLOMON EXTREME INTERNATIONAL LTD.
2878000B7E SAGINOMIYA Seisakusho Inc.
2879000B7F OmniWerks
2880000B81 Kaparel Corporation
2881000B82 Grandstream Networks, Inc.
2882000B83 DATAWATT B.V.
2883000B84 BODET
2884000B85 Airespace, Inc.
2885000B86 Aruba Networks
2886000B87 American Reliance Inc.
2887000B88 Vidisco ltd.
2888000B89 Top Global Technology, Ltd.
2889000B8A MITEQ Inc.
2890000B8B KERAJET, S.A.
2891000B8C flextronics israel
2892000B8D Avvio Networks
2893000B8E Ascent Corporation
2894000B8F AKITA ELECTRONICS SYSTEMS CO.,LTD.
2895000B90 Covaro Networks, Inc.
2896000B91 Aglaia Gesellschaft für Bildverarbeitung und Kommunikation m
2897000B92 Ascom Danmark A/S
2898000B93 Barmag Electronic
2899000B94 Digital Monitoring Products, Inc.
2900000B95 eBet Gaming Systems Pty Ltd
2901000B96 Innotrac Diagnostics Oy
2902000B97 Matsushita Electric Industrial Co.,Ltd.
2903000B98 NiceTechVision
2904000B99 SensAble Technologies, Inc.
2905000B9A Shanghai Ulink Telecom Equipment Co. Ltd.
2906000B9B Sirius System Co, Ltd.
2907000B9C TriBeam Technologies, Inc.
2908000B9D TwinMOS Technologies Inc.
2909000B9E Yasing Technology Corp.
2910000B9F Neue ELSA GmbH
2911000BA0 T&L Information Inc.
2912000BA1 SYSCOM Ltd.
2913000BA2 Sumitomo Electric Networks, Inc
2914000BA3 Siemens AG, I&S
2915000BA4 Shiron Satellite Communications Ltd. (1996)
2916000BA5 Quasar Cipta Mandiri, PT
2917000BA6 Miyakawa Electric Works Ltd.
2918000BA7 Maranti Networks
2919000BA8 HANBACK ELECTRONICS CO., LTD.
2920000BAA Aiphone co.,Ltd
2921000BAB Advantech Technology (CHINA) Co., Ltd.
2922000BAC 3Com Europe Ltd.
2923000BAD PC-PoS Inc.
2924000BAE Vitals System Inc.
2925000BB0 Sysnet Telematica srl
2926000BB1 Super Star Technology Co., Ltd.
2927000BB2 SMALLBIG TECHNOLOGY
2928000BB3 RiT technologies Ltd.
2929000BB4 RDC Semiconductor Inc.,
2930000BB5 nStor Technologies, Inc.
2931000BB6 Mototech Inc.
2932000BB7 Micro Systems Co.,Ltd.
2933000BB8 Kihoku Electronic Co.
2934000BB9 Imsys AB
2935000BBA Harmonic Broadband Access Networks
2936000BBB Etin Systems Co., Ltd
2937000BBC En Garde Systems, Inc.
2938000BBD Connexionz Limited
2939000BBE Cisco Systems
2940000BBF Cisco Systems
2941000BC0 China IWNComm Co., Ltd.
2942000BC1 Bay Microsystems, Inc.
2943000BC2 Corinex Communication Corp.
2944000BC3 Multiplex, Inc.
2945000BC4 BIOTRONIK GmbH & Co
2946000BC5 SMC Networks, Inc.
2947000BC6 ISAC, Inc.
2948000BC7 ICET S.p.A.
2949000BC8 AirFlow Networks
2950000BC9 Electroline Equipment
2951000BCA DATAVAN International Corporation
2952000BCB Fagor Automation , S. Coop
2953000BCC JUSAN, S.A.
2954000BCD Compaq (HP)
2955000BCE Free2move AB
2956000BCF AGFA NDT INC.
2957000BD0 XiMeta Technology Americas Inc.
2958000BD1 Aeronix, Inc.
2959000BD2 Remopro Technology Inc.
2960000BD3 cd3o
2961000BD4 Beijing Wise Technology & Science Development Co.Ltd
2962000BD5 Nvergence, Inc.
2963000BD6 Paxton Access Ltd
2964000BD7 MBB Gelma GmbH
2965000BD8 Industrial Scientific Corp.
2966000BD9 General Hydrogen
2967000BDA EyeCross Co.,Inc.
2968000BDB Dell ESG PCBA Test
2969000BDC AKCP
2970000BDD TOHOKU RICOH Co., LTD.
2971000BDF Shenzhen RouterD Networks Limited
2972000BE0 SercoNet Ltd.
2973000BE2 Lumenera Corporation
2974000BE3 Key Stream Co., Ltd.
2975000BE4 Hosiden Corporation
2976000BE5 HIMS Korea Co., Ltd.
2977000BE6 Datel Electronics
2978000BE7 COMFLUX TECHNOLOGY INC.
2979000BE8 AOIP
2980000BEA Zultys Technologies
2981000BEB Systegra AG
2982000BEC NIPPON ELECTRIC INSTRUMENT, INC.
2983000BED ELM Inc.
2984000BEE inc.jet, Incorporated
2985000BEF Code Corporation
2986000BF0 MoTEX Products Co., Ltd.
2987000BF1 LAP Laser Applikations
2988000BF2 Chih-Kan Technology Co., Ltd.
2989000BF3 BAE SYSTEMS
2990000BF5 Shanghai Sibo Telecom Technology Co.,Ltd
2991000BF6 Nitgen Co., Ltd
2992000BF7 NIDEK CO.,LTD
2993000BF8 Infinera
2994000BF9 Gemstone communications, Inc.
2995000BFB D-NET International Corporation
2996000BFC Cisco Systems
2997000BFD Cisco Systems
2998000BFE CASTEL Broadband Limited
2999000BFF Berkeley Camera Engineering
3000000C00 BEB Industrie-Elektronik AG
3001000C01 Abatron AG
3002000C02 ABB Oy
3003000C03 HDMI Licensing, LLC
3004000C04 Tecnova
3005000C05 RPA Reserch Co., Ltd.
3006000C06 Nixvue Systems Pte Ltd
3007000C07 Iftest AG
3008000C08 HUMEX Technologies Corp.
3009000C09 Hitachi IE Systems Co., Ltd
3010000C0A Guangdong Province Electronic Technology Research Institute
3011000C0B Broadbus Technologies
3012000C0C APPRO TECHNOLOGY INC.
3013000C0D Communications & Power Industries / Satcom Division
3014000C0E XtremeSpectrum, Inc.
3015000C0F Techno-One Co., Ltd
3016000C10 PNI Corporation
3017000C11 NIPPON DEMPA CO.,LTD.
3018000C12 Micro-Optronic-Messtechnik GmbH
3019000C13 MediaQ
3020000C14 Diagnostic Instruments, Inc.
3021000C15 CyberPower Systems, Inc.
3022000C16 Concorde Microsystems Inc.
3023000C17 AJA Video Systems Inc
3024000C18 Zenisu Keisoku Inc.
3025000C19 Telio Communications GmbH
3026000C1A Quest Technical Solutions Inc.
3027000C1B ORACOM Co, Ltd.
3028000C1C MicroWeb Co., Ltd.
3029000C1D Mettler & Fuchs AG
3030000C1E Global Cache
3031000C1F Glimmerglass Networks
3032000C20 Fi WIn, Inc.
3033000C21 Faculty of Science and Technology, Keio University
3034000C22 Double D Electronics Ltd
3035000C23 Beijing Lanchuan Tech. Co., Ltd.
3036000C25 Allied Telesyn Networks
3037000C26 Weintek Labs. Inc.
3038000C27 Sammy Corporation
3039000C28 RIFATRON
3040000C29 VMware, Inc.
3041000C2A OCTTEL Communication Co., Ltd.
3042000C2B ELIAS Technology, Inc.
3043000C2C Enwiser Inc.
3044000C2D FullWave Technology Co., Ltd.
3045000C2E Openet information technology(shenzhen) Co., Ltd.
3046000C2F SeorimTechnology Co.,Ltd.
3047000C30 Cisco
3048000C31 Cisco
3049000C32 Avionic Design Development GmbH
3050000C33 Compucase Enterprise Co. Ltd.
3051000C34 Vixen Co., Ltd.
3052000C35 KaVo Dental GmbH & Co. KG
3053000C36 SHARP TAKAYA ELECTRONICS INDUSTRY CO.,LTD.
3054000C37 Geomation, Inc.
3055000C38 TelcoBridges Inc.
3056000C39 Sentinel Wireless Inc.
3057000C3A Oxance
3058000C3B Orion Electric Co., Ltd.
3059000C3C MediaChorus, Inc.
3060000C3D Glsystech Co., Ltd.
3061000C3E Crest Audio
3062000C3F Cogent Defence & Security Networks,
3063000C40 Altech Controls
3064000C41 The Linksys Group, Inc.
3065000C42 Routerboard.com
3066000C43 Ralink Technology, Corp.
3067000C44 Automated Interfaces, Inc.
3068000C45 Animation Technologies Inc.
3069000C46 Allied Telesyn Inc.
3070000C47 SK Teletech(R&D Planning Team)
3071000C48 QoStek Corporation
3072000C49 Dangaard Telecom RTC Division A/S
3073000C4A Cygnus Microsystems Private Limited
3074000C4B Cheops Elektronik
3075000C4C Arcor AG&Co.
3076000C4D ACRA CONTROL
3077000C4E Winbest Technology CO,LT
3078000C4F UDTech Japan Corporation
3079000C50 Seagate Technology
3080000C51 Scientific Technologies Inc.
3081000C52 Roll Systems Inc.
3082000C54 Pedestal Networks, Inc
3083000C55 Microlink Communications Inc.
3084000C56 Megatel Computer (1986) Corp.
3085000C57 MACKIE Engineering Services Belgium BVBA
3086000C58 M&S Systems
3087000C59 Indyme Electronics, Inc.
3088000C5A IBSmm Industrieelektronik Multimedia
3089000C5B HANWANG TECHNOLOGY CO.,LTD
3090000C5C GTN Systems B.V.
3091000C5D CHIC TECHNOLOGY (CHINA) CORP.
3092000C5F Avtec, Inc.
3093000C60 ACM Systems
3094000C61 AC Tech corporation DBA Advanced Digital
3095000C62 ABB Automation Technology Products AB, Control
3096000C63 Zenith Electronics Corporation
3097000C64 X2 MSA Group
3098000C65 Sunin Telecom
3099000C66 Pronto Networks Inc
3100000C67 OYO ELECTRIC CO.,LTD
3101000C68 Oasis Semiconductor, Inc.
3102000C69 National Radio Astronomy Observatory
3103000C6A MBARI
3104000C6B Kurz Industrie-Elektronik GmbH
3105000C6C Elgato Systems LLC
3106000C6D BOC Edwards
3107000C6E ASUSTEK COMPUTER INC.
3108000C6F Amtek system co.,LTD.
3109000C70 ACC GmbH
3110000C71 Wybron, Inc
3111000C72 Tempearl Industrial Co., Ltd.
3112000C73 TELSON ELECTRONICS CO., LTD
3113000C74 RIVERTEC CORPORATION
3114000C75 Oriental integrated electronics. LTD
3115000C76 MICRO-STAR INTERNATIONAL CO., LTD.
3116000C77 Life Racing Ltd
3117000C78 In-Tech Electronics Limited
3118000C79 Extel Communications P/L
3119000C7A DaTARIUS Technologies GmbH
3120000C7B ALPHA PROJECT Co.,Ltd.
3121000C7C Internet Information Image Inc.
3122000C7D TEIKOKU ELECTRIC MFG. CO., LTD
3123000C7E Tellium Incorporated
3124000C7F synertronixx GmbH
3125000C80 Opelcomm Inc.
3126000C81 Nulec Industries Pty Ltd
3127000C82 NETWORK TECHNOLOGIES INC
3128000C83 Logical Solutions
3129000C84 Eazix, Inc.
3130000C85 Cisco Systems
3131000C86 Cisco Systems
3132000C87 ATI
3133000C88 Apache Micro Peripherals, Inc.
3134000C89 AC Electric Vehicles, Ltd.
3135000C8A Bose Corporation
3136000C8B Connect Tech Inc
3137000C8C KODICOM CO.,LTD.
3138000C8D MATRIX VISION GmbH
3139000C8E Mentor Engineering Inc
3140000C8F Nergal s.r.l.
3141000C90 Octasic Inc.
3142000C91 Riverhead Networks Inc.
3143000C92 WolfVision Gmbh
3144000C93 Xeline Co., Ltd.
3145000C94 United Electronic Industries, Inc.
3146000C95 PrimeNet
3147000C96 OQO, Inc.
3148000C97 NV ADB TTV Technologies SA
3149000C98 LETEK Communications Inc.
3150000C99 HITEL LINK Co.,Ltd
3151000C9A Hitech Electronics Corp.
3152000C9B EE Solutions, Inc
3153000C9C Chongho information & communications
3154000C9D AirWalk Communications, Inc.
3155000C9E MemoryLink Corp.
3156000C9F NKE Corporation
3157000CA0 StorCase Technology, Inc.
3158000CA1 SIGMACOM Co., LTD.
3159000CA2 Scopus Network Technologies Ltd
3160000CA3 Rancho Technology, Inc.
3161000CA4 Prompttec Product Management GmbH
3162000CA6 Mintera Corporation
3163000CA7 Metro (Suzhou) Technologies Co., Ltd.
3164000CA8 Garuda Networks Corporation
3165000CA9 Ebtron Inc.
3166000CAA Cubic Transportation Systems Inc
3167000CAB COMMEND International
3168000CAC Citizen Watch Co., Ltd.
3169000CAD BTU International
3170000CAE Ailocom Oy
3171000CAF TRI TERM CO.,LTD.
3172000CB0 Star Semiconductor Corporation
3173000CB1 Salland Engineering (Europe) BV
3174000CB2 safei Co., Ltd.
3175000CB3 ROUND Co.,Ltd.
3176000CB4 Propagate Networks, Inc
3177000CB5 Premier Technolgies, Inc
3178000CB6 NANJING SEU MOBILE & INTERNET TECHNOLOGY CO.,LTD
3179000CB7 Nanjing Huazhuo Electronics Co., Ltd.
3180000CB8 MEDION AG
3181000CB9 LEA
3182000CBA Jamex
3183000CBB ISKRAEMECO
3184000CBC Iscutum
3185000CBD Interface Masters, Inc
3186000CBF Holy Stone Ent. Co., Ltd.
3187000CC0 Genera Oy
3188000CC1 Cooper Industries Inc.
3189000CC3 BeWAN systems
3190000CC4 Tiptel AG
3191000CC5 Nextlink Co., Ltd.
3192000CC6 Ka-Ro electronics GmbH
3193000CC7 Intelligent Computer Solutions Inc.
3194000CC8 Integrated Digital Systems, Inc.
3195000CC9 ILWOO DATA & TECHNOLOGY CO.,LTD
3196000CCA Hitachi Global Storage Technologies
3197000CCB Design Combus Ltd
3198000CCC Bluesoft Ltd.
3199000CCD IEC - TC57
3200000CCE Cisco Systems
3201000CCF Cisco Systems
3202000CD0 Symetrix
3203000CD1 SFOM Technology Corp.
3204000CD2 Schaffner EMV AG
3205000CD3 Prettl Elektronik Radeberg GmbH
3206000CD4 Positron Public Safety Systems inc.
3207000CD5 Passave Inc.
3208000CD6 PARTNER TECH
3209000CD7 Nallatech Ltd
3210000CD8 M. K. Juchheim GmbH & Co
3211000CD9 Itcare Co., Ltd
3212000CDA FreeHand Systems, Inc.
3213000CDB Foundry Networks
3214000CDC BECS Technology, Inc
3215000CDD AOS Technologies AG
3216000CDE ABB STOTZ-KONTAKT GmbH
3217000CDF PULNiX America, Inc
3218000CE0 Trek Diagnostics Inc.
3219000CE1 The Open Group
3220000CE2 Rolls-Royce
3221000CE3 Option International N.V.
3222000CE4 NeuroCom International, Inc.
3223000CE5 Motorola BCS
3224000CE6 Meru Networks Inc
3225000CE7 MediaTek Inc.
3226000CE8 GuangZhou AnJuBao Co., Ltd
3227000CE9 BLOOMBERG L.P.
3228000CEA aphona Kommunikationssysteme
3229000CEB CNMP Networks, Inc.
3230000CEC Spectracom Corp.
3231000CED Real Digital Media
3232000CEE Q-Networks
3233000CEF Open Networks Engineering Ltd
3234000CF0 M & N GmbH
3235000CF1 Intel Corporation
3236000CF2 GAMESA EÓLICA
3237000CF3 CALL IMAGE SA
3238000CF4 AKATSUKI ELECTRIC MFG.CO.,LTD.
3239000CF5 InfoExpress
3240000CF6 Sitecom Europe BV
3241000CF7 Nortel Networks
3242000CF8 Nortel Networks
3243000CF9 ITT Flygt AB
3244000CFA Digital Systems Corp
3245000CFB Korea Network Systems
3246000CFC S2io Technologies Corp
3247000CFE Grand Electronic Co., Ltd
3248000CFF MRO-TEK LIMITED
3249000D00 Seaway Networks Inc.
3250000D01 P&E Microcomputer Systems, Inc.
3251000D02 NEC Access Technica,Ltd
3252000D03 Matrics, Inc.
3253000D04 Foxboro Eckardt Development GmbH
3254000D05 cybernet manufacturing inc.
3255000D06 Compulogic Limited
3256000D07 Calrec Audio Ltd
3257000D08 AboveCable, Inc.
3258000D09 Yuehua(Zhuhai) Electronic CO. LTD
3259000D0A Projectiondesign as
3260000D0B Melco Inc.
3261000D0C MDI Security Systems
3262000D0D ITSupported, LLC
3263000D0E Inqnet Systems, Inc.
3264000D0F Finlux Ltd
3265000D10 Embedtronics Oy
3266000D11 DENTSPLY - Gendex
3267000D12 AXELL Corporation
3268000D13 Wilhelm Rutenbeck GmbH&Co.
3269000D14 Vtech Innovation LP dba Advanced American Telephones
3270000D15 Voipac s.r.o.
3271000D16 UHS Systems Pty Ltd
3272000D17 Turbo Networks Co.Ltd
3273000D18 Sunitec Enterprise Co., Ltd.
3274000D19 ROBE Show lighting
3275000D1A Mustek System Inc.
3276000D1B Kyoto Electronics Manufacturing Co., Ltd.
3277000D1C I2E TELECOM
3278000D1D HIGH-TEK HARNESS ENT. CO., LTD.
3279000D1E Control Techniques
3280000D1F AV Digital
3281000D20 ASAHIKASEI TECHNOSYSTEM CO.,LTD.
3282000D21 WISCORE Inc.
3283000D22 Unitronics
3284000D23 Smart Solution, Inc
3285000D24 SENTEC E&E CO., LTD.
3286000D25 SANDEN CORPORATION
3287000D26 Primagraphics Limited
3288000D27 MICROPLEX Printware AG
3289000D28 Cisco
3290000D29 Cisco
3291000D2A Scanmatic AS
3292000D2B Racal Instruments
3293000D2C Patapsco Designs Ltd
3294000D2D NCT Deutschland GmbH
3295000D2E Matsushita Avionics Systems Corporation
3296000D2F AIN Comm.Tech.Co., LTD
3297000D30 IceFyre Semiconductor
3298000D31 Compellent Technologies, Inc.
3299000D32 DispenseSource, Inc.
3300000D33 Prediwave Corp.
3301000D34 Shell International Exploration and Production, Inc.
3302000D35 PAC International Ltd
3303000D36 Wu Han Routon Electronic Co., Ltd
3304000D37 WIPLUG
3305000D38 NISSIN INC.
3306000D39 Network Electronics
3307000D3A Microsoft Corp.
3308000D3B Microelectronics Technology Inc.
3309000D3C i.Tech Dynamic Ltd
3310000D3E APLUX Communications Ltd.
3311000D3F VXI Technology
3312000D40 Verint Loronix Video Solutions
3313000D41 Siemens AG ICM MP UC RD IT KLF1
3314000D42 Newbest Development Limited
3315000D43 DRS Tactical Systems Inc.
3316000D45 Tottori SANYO Electric Co., Ltd.
3317000D46 Eurotherm Drives, Ltd.
3318000D47 Collex
3319000D48 AEWIN Technologies Co., Ltd.
3320000D49 Triton Systems of Delaware, Inc.
3321000D4A Steag ETA-Optik
3322000D4B Roku, LLC
3323000D4C Outline Electronics Ltd.
3324000D4D Ninelanes
3325000D4E NDR Co.,LTD.
3326000D4F Kenwood Corporation
3327000D50 Galazar Networks
3328000D51 DIVR Systems, Inc.
3329000D52 Comart system
3330000D53 Beijing 5w Communication Corp.
3331000D54 3Com Europe Ltd
3332000D55 SANYCOM Technology Co.,Ltd
3333000D56 Dell PCBA Test
3334000D57 Fujitsu I-Network Systems Limited.
3335000D59 Amity Systems, Inc.
3336000D5A Tiesse SpA
3337000D5B Smart Empire Investments Limited
3338000D5C Robert Bosch GmbH, VT-ATMO
3339000D5D Raritan Computer, Inc
3340000D5E NEC CustomTechnica, Ltd.
3341000D5F Minds Inc
3342000D60 IBM Corporation
3343000D61 Giga-Byte Technology Co., Ltd.
3344000D62 Funkwerk Dabendorf GmbH
3345000D63 DENT Instruments, Inc.
3346000D64 COMAG Handels AG
3347000D65 Cisco Systems
3348000D66 Cisco Systems
3349000D67 BelAir Networks Inc.
3350000D68 Vinci Systems, Inc.
3351000D69 TMT&D Corporation
3352000D6A Redwood Technologies LTD
3353000D6B Mita-Teknik A/S
3354000D6C M-Audio
3355000D6D K-Tech Devices Corp.
3356000D6E K-Patents Oy
3357000D6F Ember Corporation
3358000D70 Datamax Corporation
3359000D71 boca systems
3360000D72 2Wire, Inc
3361000D73 Technical Support, Inc.
3362000D74 Sand Network Systems, Inc.
3363000D75 Kobian Pte Ltd - Taiwan Branch
3364000D76 Hokuto Denshi Co,. Ltd.
3365000D77 FalconStor Software
3366000D78 Engineering & Security
3367000D79 Dynamic Solutions Co,.Ltd.
3368000D7A DiGATTO Asia Pacific Pte Ltd
3369000D7B Consensys Computers Inc.
3370000D7C Codian Ltd
3371000D7D Afco Systems
3372000D7E Axiowave Networks, Inc.
3373000D7F MIDAS COMMUNICATION TECHNOLOGIES PTE LTD ( Foreign Branch)
3374000D80 Online Development Inc
3375000D81 Pepperl+Fuchs GmbH
3376000D82 PHS srl
3377000D83 Sanmina-SCI Hungary Ltd.
3378000D84 Seodu Inchip, Inc.
3379000D85 Tapwave, Inc.
3380000D86 Huber + Suhner AG
3381000D87 Elitegroup Computer System Co. (ECS)
3382000D88 D-Link Corporation
3383000D89 Bils Technology Inc
3384000D8A Winners Electronics Co., Ltd.
3385000D8B T&D Corporation
3386000D8C Shanghai Wedone Digital Ltd. CO.
3387000D8D ProLinx Communication Gateways, Inc.
3388000D8E Koden Electronics Co., Ltd.
3389000D8F King Tsushin Kogyo Co., LTD.
3390000D90 Factum Electronics AB
3391000D91 Eclipse (HQ Espana) S.L.
3392000D92 Arima Communication Corporation
3393000D93 Apple Computer
3394000D94 AFAR Communications,Inc
3395000D96 Vtera Technology Inc.
3396000D97 Tropos Networks, Inc.
3397000D98 S.W.A.C. Schmitt-Walter Automation Consult GmbH
3398000D99 Orbital Sciences Corp.; Launch Systems Group
3399000D9A INFOTEC LTD
3400000D9C Elan GmbH & Co KG
3401000D9D Hewlett Packard
3402000D9E TOKUDEN OHIZUMI SEISAKUSYO Co.,Ltd.
3403000D9F RF Micro Devices
3404000DA0 NEDAP N.V.
3405000DA1 MIRAE ITS Co.,LTD.
3406000DA2 Infrant Technologies, Inc.
3407000DA3 Emerging Technologies Limited
3408000DA4 DOSCH & AMAND SYSTEMS AG
3409000DA5 Fabric7 Systems, Inc
3410000DA6 Universal Switching Corporation
3411000DA8 Teletronics Technology Corporation
3412000DA9 T.E.A.M. S.L.
3413000DAA S.A.Tehnology co.,Ltd.
3414000DAB Parker Hannifin GmbH Electromechanical Division Europe
3415000DAC Japan CBM Corporation
3416000DAD Dataprobe Inc
3417000DAE SAMSUNG HEAVY INDUSTRIES CO., LTD.
3418000DAF Plexus Corp (UK) Ltd
3419000DB0 Olym-tech Co.,Ltd.
3420000DB1 Japan Network Service Co., Ltd.
3421000DB2 Ammasso, Inc.
3422000DB3 SDO Communication Corperation
3423000DB4 NETASQ
3424000DB5 GLOBALSAT TECHNOLOGY CORPORATION
3425000DB6 Teknovus, Inc.
3426000DB7 SANKO ELECTRIC CO,.LTD
3427000DB8 SCHILLER AG
3428000DB9 PC Engines GmbH
3429000DBA Océ Document Technologies GmbH
3430000DBB Nippon Dentsu Co.,Ltd.
3431000DBC Cisco Systems
3432000DBD Cisco Systems
3433000DBE Bel Fuse Europe Ltd.,UK
3434000DBF TekTone Sound & Signal Mfg., Inc.
3435000DC0 Spagat AS
3436000DC1 SafeWeb Inc
3437000DC3 First Communication, Inc.
3438000DC4 Emcore Corporation
3439000DC5 EchoStar International Corporation
3440000DC6 DigiRose Technology Co., Ltd.
3441000DC7 COSMIC ENGINEERING INC.
3442000DC8 AirMagnet, Inc
3443000DC9 THALES Elektronik Systeme GmbH
3444000DCA Tait Electronics
3445000DCB Petcomkorea Co., Ltd.
3446000DCC NEOSMART Corp.
3447000DCD GROUPE TXCOM
3448000DCE Dynavac Technology Pte Ltd
3449000DCF Cidra Corp.
3450000DD0 TetraTec Instruments GmbH
3451000DD1 Stryker Corporation
3452000DD2 Simrad Optronics ASA
3453000DD3 SAMWOO Telecommunication Co.,Ltd.
3454000DD4 Revivio Inc.
3455000DD5 O'RITE TECHNOLOGY CO.,LTD
3456000DD7 Bright
3457000DD8 BBN
3458000DD9 Anton Paar GmbH
3459000DDA ALLIED TELESIS K.K.
3460000DDB AIRWAVE TECHNOLOGIES INC.
3461000DDC VAC
3462000DDD PROFÝLO TELRA ELEKTRONÝK SANAYÝ VE TÝCARET A.Þ.
3463000DDE Joyteck Co., Ltd.
3464000DDF Japan Image & Network Inc.
3465000DE0 ICPDAS Co.,LTD
3466000DE1 Control Products, Inc.
3467000DE2 CMZ Sistemi Elettronici
3468000DE3 AT Sweden AB
3469000DE4 DIGINICS, Inc.
3470000DE5 Samsung Thales
3471000DE6 YOUNGBO ENGINEERING CO.,LTD
3472000DE7 Snap-on OEM Group
3473000DE8 Nasaco Electronics Pte. Ltd
3474000DE9 Napatech Aps
3475000DEA Kingtel Telecommunication Corp.
3476000DEB CompXs Limited
3477000DEC Cisco Systems
3478000DED Cisco Systems
3479000DEF Soc. Coop. Bilanciai
3480000DF0 QCOM TECHNOLOGY INC.
3481000DF1 IONIX INC.
3482000DF3 Asmax Solutions
3483000DF4 Watertek Co.
3484000DF5 Teletronics International Inc.
3485000DF6 Technology Thesaurus Corp.
3486000DF7 Space Dynamics Lab
3487000DF8 ORGA Kartensysteme GmbH
3488000DF9 NDS Limited
3489000DFA Micro Control Systems Ltd.
3490000DFB Komax AG
3491000DFC ITFOR Inc. resarch and development
3492000DFD Huges Hi-Tech Inc.,
3493000DFE Hauppauge Computer Works, Inc.
3494000DFF CHENMING MOLD INDUSTRY CORP.
3495000E01 ASIP Technologies Inc.
3496000E02 Advantech AMT Inc.
3497000E03 Aarohi Communications, Inc.
3498000E05 WIRELESS MATRIX CORP.
3499000E06 Team Simoco Ltd
3500000E07 Sony Ericsson Mobile Communications AB
3501000E08 Sipura Technology, Inc.
3502000E09 Shenzhen Coship Software Co.,LTD.
3503000E0B Netac Technology Co., Ltd.
3504000E0C Intel Corporation
3505000E0D HESCH Schröder GmbH
3506000E0E ESA elettronica S.P.A.
3507000E0F ERMME
3508000E11 BDT Büro- und Datentechnik GmbH & Co. KG
3509000E12 Adaptive Micro Systems Inc.
3510000E13 Accu-Sort Systems inc.
3511000E14 Visionary Solutions, Inc.
3512000E15 Tadlys LTD
3513000E16 SouthWing
3514000E18 MyA Technology
3515000E19 LogicaCMG Pty Ltd
3516000E1B IAV GmbH
3517000E1C Hach Company
3518000E1F TCL Networks Equipment Co., Ltd.
3519000E20 PalmSource, Inc.
3520000E21 MTU Friedrichshafen GmbH
3521000E23 Incipient, Inc.
3522000E25 Hannae Technology Co., Ltd
3523000E26 Gincom Technology Corp.
3524000E27 Crere Networks, Inc.
3525000E28 Dynamic Ratings P/L
3526000E29 Shester Communications Inc
3527000E2B Safari Technologies
3528000E2C Netcodec co.
3529000E2D Hyundai Digital Technology Co.,Ltd.
3530000E2E Edimax Technology Co., Ltd.
3531000E2F Disetronic Medical Systems AG
3532000E30 AERAS Networks, Inc.
3533000E31 Olympus BioSystems GmbH
3534000E32 Kontron Medical
3535000E33 Shuko Electronics Co.,Ltd
3536000E34 NexxGenCity
3537000E35 Intel Corp
3538000E36 HEINESYS, Inc.
3539000E37 Harms & Wende GmbH & Co.KG
3540000E38 Cisco Systems
3541000E39 Cisco Systems
3542000E3A Cirrus Logic
3543000E3B Hawking Technologies, Inc.
3544000E3C TransAct Technoloiges Inc.
3545000E3D Televic N.V.
3546000E3E Sun Optronics Inc
3547000E3F Soronti, Inc.
3548000E40 Nortel Networks
3549000E41 NIHON MECHATRONICS CO.,LTD.
3550000E42 Motic Incoporation Ltd.
3551000E43 G-Tek Electronics Sdn. Bhd.
3552000E44 Digital 5, Inc.
3553000E45 Beijing Newtry Electronic Technology Ltd
3554000E46 Niigata Seimitsu Co.,Ltd.
3555000E47 NCI System Co.,Ltd.
3556000E48 Lipman TransAction Solutions
3557000E49 Forsway Scandinavia AB
3558000E4A Changchun Huayu WEBPAD Co.,LTD
3559000E4B atrium c and i
3560000E4C Bermai Inc.
3561000E4D Numesa Inc.
3562000E4E Waveplus Technology Co., Ltd.
3563000E4F Trajet GmbH
3564000E50 Thomson Multi Media
3565000E51 tecna elettronica srl
3566000E52 Optium Corporation
3567000E53 AV TECH CORPORATION
3568000E54 AlphaCell Wireless Ltd.
3569000E55 AUVITRAN
3570000E56 4G Systems GmbH
3571000E57 Iworld Networking, Inc.
3572000E58 Rincon Networks
3573000E5A TELEFIELD inc.
3574000E5B ParkerVision - Direct2Data
3575000E5C Motorola BCS
3576000E5D Com-X Networks
3577000E5E Beijing Raisecom Science & Technology Development Co.,Ltd
3578000E5F activ-net GmbH & Co. KG
3579000E60 360SUN Digital Broadband Corporation
3580000E61 MICROTROL LIMITED
3581000E62 Nortel Networks
3582000E63 Lemke Diagnostics GmbH
3583000E64 Elphel, Inc
3584000E65 TransCore
3585000E66 Hitachi Advanced Digital, Inc.
3586000E67 Eltis Microelectronics Ltd.
3587000E68 E-TOP Network Technology Inc.
3588000E69 China Electric Power Research Institute
3589000E6A 3COM EUROPE LTD
3590000E6B Janitza electronics GmbH
3591000E6C Device Drivers Limited
3592000E6D Murata Manufacturing Co., Ltd.
3593000E6E MICRELEC ELECTRONICS S.A
3594000E6F IRIS Corporation Berhad
3595000E70 in2 Networks
3596000E71 Gemstar Technology Development Ltd.
3597000E72 CTS electronics
3598000E73 Tpack A/S
3599000E74 Solar Telecom. Tech
3600000E75 New York Air Brake Corp.
3601000E76 GEMSOC INNOVISION INC.
3602000E77 Decru, Inc.
3603000E78 Amtelco
3604000E79 Ample Communications Inc.
3605000E7B Toshiba
3606000E7D Electronics Line 3000 Ltd.
3607000E7E Comprog Oy
3608000E7F Hewlett Packard
3609000E81 Instant802 Networks Inc.
3610000E82 Commtech Wireless
3611000E83 Cisco Systems
3612000E84 Cisco Systems
3613000E85 Catalyst Enterprises, Inc.
3614000E86 Alcatel North America
3615000E87 adp Gauselmann GmbH
3616000E88 VIDEOTRON CORP.
3617000E89 CLEMATIC
3618000E8A Avara Technologies Pty. Ltd.
3619000E8B Astarte Technology Co, Ltd.
3620000E8C Siemens AG A&D ET
3621000E8D Systems in Progress Holding GmbH
3622000E8E SparkLAN Communications, Inc.
3623000E8F Sercomm Corp.
3624000E90 PONICO CORP.
3625000E92 Millinet Co., Ltd.
3626000E93 Milénio 3 Sistemas Electrónicos, Lda.
3627000E94 Maas International BV
3628000E95 Fujiya Denki Seisakusho Co.,Ltd.
3629000E96 Cubic Defense Applications, Inc.
3630000E97 Ultracker Technology CO., Inc
3631000E98 Vitec CC, INC.
3632000E99 Spectrum Digital, Inc
3633000E9A BOE TECHNOLOGY GROUP CO.,LTD
3634000E9C Pemstar
3635000E9D Video Networks Ltd
3636000E9E Topfield Co., Ltd
3637000E9F TEMIC SDS GmbH
3638000EA0 NetKlass Technology Inc.
3639000EA1 Formosa Teletek Corporation
3640000EA2 CyberGuard Corporation
3641000EA3 CNCR-IT CO.,LTD,HangZhou P.R.CHINA
3642000EA4 Certance Inc.
3643000EA5 BLIP Systems
3644000EA6 ASUSTEK COMPUTER INC.
3645000EA7 Endace Inc Ltd.
3646000EA8 United Technologists Europe Limited
3647000EA9 Shanghai Xun Shi Communications Equipment Ltd. Co.
3648000EAC MINTRON ENTERPRISE CO., LTD.
3649000EAD Metanoia Technologies, Inc.
3650000EAE GAWELL TECHNOLOGIES CORP.
3651000EAF CASTEL
3652000EB0 Solutions Radio BV
3653000EB1 Newcotech,Ltd
3654000EB2 Micro-Research Finland Oy
3655000EB3 LeftHand Networks
3656000EB4 GUANGZHOU GAOKE COMMUNICATIONS TECHNOLOGY CO.LTD.
3657000EB5 Ecastle Electronics Co., Ltd.
3658000EB6 Riverbed Technology, Inc.
3659000EB7 Knovative, Inc.
3660000EB8 Iiga co.,Ltd
3661000EB9 HASHIMOTO Electronics Industry Co.,Ltd.
3662000EBA HANMI SEMICONDUCTOR CO., LTD.
3663000EBB Everbee Networks
3664000EBC Cullmann GmbH
3665000EBD Burdick, a Quinton Compny
3666000EBE B&B Electronics Manufacturing Co.
3667000EC0 Nortel Networks
3668000EC1 MYNAH Technologies
3669000EC2 Lowrance Electronics, Inc.
3670000EC3 Logic Controls, Inc.
3671000EC4 Iskra Transmission d.d.
3672000EC6 ASIX ELECTRONICS CORP.
3673000EC7 Appeal Telecom Co.,Ltd.
3674000EC8 Zoran Corporation
3675000EC9 YOKO Technology Corp.
3676000ECB VineSys Technology
3677000ECC Tableau
3678000ECD SKOV A/S
3679000ECE S.I.T.T.I. S.p.A.
3680000ECF PROFIBUS Nutzerorganisation e.V.
3681000ED0 Privaris, Inc.
3682000ED1 Osaka Micro Computer.
3683000ED2 Filtronic plc
3684000ED3 Epicenter, Inc.
3685000ED4 CRESITT INDUSTRIE
3686000ED5 COPAN Systems Inc.
3687000ED6 Cisco Systems
3688000ED7 Cisco Systems
3689000ED8 Aktino, Inc.
3690000ED9 Aksys, Ltd.
3691000EDA C-TECH UNITED CORP.
3692000EDB XiNCOM Corp.
3693000EDC Tellion INC.
3694000EDD SHURE INCORPORATED
3695000EDE REMEC, Inc.
3696000EDF PLX Technology
3697000EE0 Mcharge
3698000EE1 ExtremeSpeed Inc.
3699000EE2 Custom Engineering S.p.A.
3700000EE3 Chiyu Technology Co.,Ltd
3701000EE5 bitWallet, Inc.
3702000EE6 Adimos Systems LTD
3703000EE7 AAC ELECTRONICS CORP.
3704000EE8 zioncom
3705000EE9 WayTech Development, Inc.
3706000EEA Shadong Luneng Jicheng Electronics,Co.,Ltd
3707000EEB Sandmartin(zhong shan)Electronics Co.,Ltd
3708000EEC Orban
3709000EED Nokia Danmark A/S
3710000EEE Muco Industrie BV
3711000EF0 Festo AG & Co. KG
3712000EF1 EZQUEST INC.
3713000EF3 Smarthome
3714000EF4 Shenzhen Kasda Digital Technology Co.,Ltd
3715000EF5 iPAC Technology Co., Ltd.
3716000EF6 E-TEN Information Systems Co., Ltd.
3717000EF7 Vulcan Portals Inc
3718000EF8 SBC ASI
3719000EF9 REA Elektronik GmbH
3720000EFA Optoway Technology Incorporation
3721000EFB Macey Enterprises
3722000EFC JTAG Technologies B.V.
3723000EFD FUJI PHOTO OPTICAL CO., LTD.
3724000EFE EndRun Technologies LLC
3725000EFF Megasolution,Inc.
3726000F00 Legra Systems, Inc.
3727000F01 DIGITALKS INC
3728000F02 Digicube Technology Co., Ltd
3729000F03 COM&C CO., LTD
3730000F04 cim-usa inc
3731000F05 3B SYSTEM INC.
3732000F06 Nortel Networks
3733000F07 Mangrove Systems, Inc.
3734000F08 Indagon Oy
3735000F0B Kentima Technologies AB
3736000F0C SYNCHRONIC ENGINEERING
3737000F0D Hunt Electronic Co., Ltd.
3738000F0E WaveSplitter Technologies, Inc.
3739000F0F Real ID Technology Co., Ltd.
3740000F10 RDM Corporation
3741000F11 Prodrive B.V.
3742000F12 Panasonic AVC Networks Germany GmbH
3743000F13 Nisca corporation
3744000F14 Mindray Co., Ltd.
3745000F15 Kjaerulff1 A/S
3746000F16 JAY HOW TECHNOLOGY CO.,
3747000F17 Insta Elektro GmbH
3748000F18 Industrial Control Systems
3749000F19 Guidant Corporation
3750000F1A Gaming Support B.V.
3751000F1B Ego Systems Inc.
3752000F1C DigitAll World Co., Ltd
3753000F1D Cosmo Techs Co., Ltd.
3754000F1E Chengdu KT Electric Co.of High & New Technology
3755000F1F WW PCBA Test
3756000F20 WW Ops
3757000F21 Scientific Atlanta, Inc
3758000F22 Helius, Inc.
3759000F23 Cisco Systems
3760000F24 Cisco Systems
3761000F25 AimValley B.V.
3762000F26 WorldAccxx LLC
3763000F27 TEAL Electronics, Inc.
3764000F28 Itronix Corporation
3765000F29 Augmentix Corporation
3766000F2A Cableware Electronics
3767000F2B GREENBELL SYSTEMS
3768000F2C Uplogix, Inc.
3769001000 CABLE TELEVISION LABORATORIES, INC.
3770001001 MCK COMMUNICATIONS
3771001002 ACTIA
3772001003 IMATRON, INC.
3773001004 THE BRANTLEY COILE COMPANY,INC
3774001005 UEC COMMERCIAL
3775001006 Thales Contact Solutions Ltd.
3776001007 CISCO SYSTEMS, INC.
3777001008 VIENNA SYSTEMS CORPORATION
3778001009 HORO QUARTZ
377900100A WILLIAMS COMMUNICATIONS GROUP
378000100B CISCO SYSTEMS, INC.
378100100C ITO CO., LTD.
378200100D CISCO SYSTEMS, INC.
378300100E MICRO LINEAR COPORATION
378400100F INDUSTRIAL CPU SYSTEMS
3785001010 INITIO CORPORATION
3786001011 CISCO SYSTEMS, INC.
3787001012 PROCESSOR SYSTEMS (I) PVT LTD
3788001013 INDUSTRIAL COMPUTER SOURCE
3789001014 CISCO SYSTEMS, INC.
3790001015 OOmon Inc.
3791001016 T.SQWARE
3792001017 MICOS GmbH
3793001018 BROADCOM CORPORATION
3794001019 SIRONA DENTAL SYSTEMS GmbH & Co. KG
379500101A PictureTel Corp.
379600101B CORNET TECHNOLOGY, INC.
379700101C OHM TECHNOLOGIES INTL, LLC
379800101D WINBOND ELECTRONICS CORP.
379900101E MATSUSHITA ELECTRONIC INSTRUMENTS CORP.
380000101F CISCO SYSTEMS, INC.
3801001020 WELCH ALLYN, DATA COLLECTION
3802001021 ENCANTO NETWORKS, INC.
3803001022 SatCom Media Corporation
3804001023 FLOWWISE NETWORKS, INC.
3805001024 NAGOYA ELECTRIC WORKS CO., LTD
3806001025 GRAYHILL INC.
3807001026 ACCELERATED NETWORKS, INC.
3808001027 L-3 COMMUNICATIONS EAST
3809001028 COMPUTER TECHNICA, INC.
3810001029 CISCO SYSTEMS, INC.
381100102A ZF MICROSYSTEMS, INC.
381200102B UMAX DATA SYSTEMS, INC.
381300102C Lasat Networks A/S
381400102D HITACHI SOFTWARE ENGINEERING
381500102E NETWORK SYSTEMS & TECHNOLOGIES PVT. LTD.
381600102F CISCO SYSTEMS, INC.
3817001030 Wi-LAN, Inc.
3818001031 OBJECTIVE COMMUNICATIONS, INC.
3819001032 ALTA TECHNOLOGY
3820001033 ACCESSLAN COMMUNICATIONS, INC.
3821001034 GNP Computers
3822001035 ELITEGROUP COMPUTER SYSTEMS CO., LTD
3823001036 INTER-TEL INTEGRATED SYSTEMS
3824001037 CYQ've Technology Co., Ltd.
3825001038 MICRO RESEARCH INSTITUTE, INC.
3826001039 Vectron Systems AG
382700103A DIAMOND NETWORK TECH
382800103B HIPPI NETWORKING FORUM
382900103C IC ENSEMBLE, INC.
383000103D PHASECOM, LTD.
383100103E NETSCHOOLS CORPORATION
383200103F TOLLGRADE COMMUNICATIONS, INC.
3833001040 INTERMEC CORPORATION
3834001041 BRISTOL BABCOCK, INC.
3835001042 AlacriTech
3836001043 A2 CORPORATION
3837001044 InnoLabs Corporation
3838001045 Nortel Networks
3839001046 ALCORN MCBRIDE INC.
3840001047 ECHO ELETRIC CO. LTD.
3841001048 HTRC AUTOMATION, INC.
3842001049 SHORELINE TELEWORKS, INC.
384300104A THE PARVUC CORPORATION
384400104B 3COM CORPORATION
384500104C COMPUTER ACCESS TECHNOLOGY
384600104D SURTEC INDUSTRIES, INC.
384700104E CEOLOGIC
384800104F STORAGE TECHNOLOGY CORPORATION
3849001050 RION CO., LTD.
3850001051 CMICRO CORPORATION
3851001052 METTLER-TOLEDO (ALBSTADT) GMBH
3852001053 COMPUTER TECHNOLOGY CORP.
3853001054 CISCO SYSTEMS, INC.
3854001055 FUJITSU MICROELECTRONICS, INC.
3855001056 SODICK CO., LTD.
3856001057 Rebel.com, Inc.
3857001058 ArrowPoint Communications
3858001059 DIABLO RESEARCH CO. LLC
385900105A 3COM CORPORATION
386000105B NET INSIGHT AB
386100105C QUANTUM DESIGNS (H.K.) LTD.
386200105D Draeger Medical
386300105E HEKIMIAN LABORATORIES, INC.
386400105F IN-SNEC
3865001060 BILLIONTON SYSTEMS, INC.
3866001061 HOSTLINK CORP.
3867001062 NX SERVER, ILNC.
3868001063 STARGUIDE DIGITAL NETWORKS
3869001064 DIGITAL EQUIPMENT CORP.
3870001065 RADYNE CORPORATION
3871001066 ADVANCED CONTROL SYSTEMS, INC.
3872001067 REDBACK NETWORKS, INC.
3873001068 COMOS TELECOM
3874001069 HELIOSS COMMUNICATIONS, INC.
387500106A DIGITAL MICROWAVE CORPORATION
387600106B SONUS NETWORKS, INC.
387700106C INFRATEC PLUS GmbH
387800106D INTEGRITY COMMUNICATIONS, INC.
387900106E TADIRAN COM. LTD.
388000106F TRENTON TECHNOLOGY INC.
3881001070 CARADON TREND LTD.
3882001071 ADVANET INC.
3883001072 GVN TECHNOLOGIES, INC.
3884001073 TECHNOBOX, INC.
3885001074 ATEN INTERNATIONAL CO., LTD.
3886001075 Maxtor Corporation
3887001076 EUREM GmbH
3888001077 SAF DRIVE SYSTEMS, LTD.
3889001078 NUERA COMMUNICATIONS, INC.
3890001079 CISCO SYSTEMS, INC.
389100107A AmbiCom, Inc.
389200107B CISCO SYSTEMS, INC.
389300107C P-COM, INC.
389400107D AURORA COMMUNICATIONS, LTD.
389500107E BACHMANN ELECTRONIC GmbH
389600107F CRESTRON ELECTRONICS, INC.
3897001080 METAWAVE COMMUNICATIONS
3898001081 DPS, INC.
3899001082 JNA TELECOMMUNICATIONS LIMITED
3900001083 HEWLETT-PACKARD COMPANY
3901001084 K-BOT COMMUNICATIONS
3902001085 POLARIS COMMUNICATIONS, INC.
3903001086 ATTO TECHNOLOGY, INC.
3904001087 Xstreamis PLC
3905001088 AMERICAN NETWORKS INC.
3906001089 WebSonic
390700108A TeraLogic, Inc.
390800108B LASERANIMATION SOLLINGER GmbH
390900108C FUJITSU TELECOMMUNICATIONS EUROPE, LTD.
391000108D JOHNSON CONTROLS, INC.
391100108E HUGH SYMONS CONCEPT Technologies Ltd.
391200108F RAPTOR SYSTEMS
3913001090 CIMETRICS, INC.
3914001091 NO WIRES NEEDED BV
3915001092 NETCORE INC.
3916001093 CMS COMPUTERS, LTD.
3917001094 Performance Analysis Broadband, Spirent plc
3918001095 Thomson Multimedia, Inc.
3919001096 TRACEWELL SYSTEMS, INC.
3920001097 WinNet Metropolitan Communications Systems, Inc.
3921001098 STARNET TECHNOLOGIES, INC.
3922001099 InnoMedia, Inc.
392300109A NETLINE
392400109B VIXEL CORPORATION
392500109C M-SYSTEM CO., LTD.
392600109D CLARINET SYSTEMS, INC.
392700109E AWARE, INC.
392800109F PAVO, INC.
39290010A0 INNOVEX TECHNOLOGIES, INC.
39300010A1 KENDIN SEMICONDUCTOR, INC.
39310010A2 TNS
39320010A3 OMNITRONIX, INC.
39330010A4 XIRCOM
39340010A5 OXFORD INSTRUMENTS
39350010A6 CISCO SYSTEMS, INC.
39360010A7 UNEX TECHNOLOGY CORPORATION
39370010A8 RELIANCE COMPUTER CORP.
39380010A9 ADHOC TECHNOLOGIES
39390010AA MEDIA4, INC.
39400010AB KOITO INDUSTRIES, LTD.
39410010AC IMCI TECHNOLOGIES
39420010AD SOFTRONICS USB, INC.
39430010AE SHINKO ELECTRIC INDUSTRIES CO.
39440010AF TAC SYSTEMS, INC.
39450010B0 MERIDIAN TECHNOLOGY CORP.
39460010B1 FOR-A CO., LTD.
39470010B2 COACTIVE AESTHETICS
39480010B3 NOKIA MULTIMEDIA TERMINALS
39490010B4 ATMOSPHERE NETWORKS
39500010B5 ACCTON TECHNOLOGY CORPORATION
39510010B6 ENTRATA COMMUNICATIONS CORP.
39520010B7 COYOTE TECHNOLOGIES, LLC
39530010B8 ISHIGAKI COMPUTER SYSTEM CO.
39540010B9 MAXTOR CORP.
39550010BA MARTINHO-DAVIS SYSTEMS, INC.
39560010BB DATA & INFORMATION TECHNOLOGY
39570010BC Aastra Telecom
39580010BD THE TELECOMMUNICATION TECHNOLOGY COMMITTEE
39590010BE TELEXIS CORP.
39600010BF InterAir Wireless
39610010C0 ARMA, INC.
39620010C1 OI ELECTRIC CO., LTD.
39630010C2 WILLNET, INC.
39640010C3 CSI-CONTROL SYSTEMS
39650010C4 MEDIA LINKS CO., LTD.
39660010C5 PROTOCOL TECHNOLOGIES, INC.
39670010C6 USI
39680010C7 DATA TRANSMISSION NETWORK
39690010C8 COMMUNICATIONS ELECTRONICS SECURITY GROUP
39700010C9 MITSUBISHI ELECTRONICS LOGISTIC SUPPORT CO.
39710010CA INTEGRAL ACCESS
39720010CB FACIT K.K.
39730010CC CLP COMPUTER LOGISTIK PLANUNG GmbH
39740010CD INTERFACE CONCEPT
39750010CE VOLAMP, LTD.
39760010CF FIBERLANE COMMUNICATIONS
39770010D0 WITCOM, LTD.
39780010D1 Top Layer Networks, Inc.
39790010D2 NITTO TSUSHINKI CO., LTD
39800010D3 GRIPS ELECTRONIC GMBH
39810010D4 STORAGE COMPUTER CORPORATION
39820010D5 IMASDE CANARIAS, S.A.
39830010D6 ITT - A/CD
39840010D7 ARGOSY RESEARCH INC.
39850010D8 CALISTA
39860010D9 IBM JAPAN, FUJISAWA MT+D
39870010DA MOTION ENGINEERING, INC.
39880010DB NetScreen Technologies, Inc.
39890010DC MICRO-STAR INTERNATIONAL CO., LTD.
39900010DD ENABLE SEMICONDUCTOR, INC.
39910010DE INTERNATIONAL DATACASTING CORPORATION
39920010DF RISE COMPUTER INC.
39930010E0 COBALT MICROSERVER, INC.
39940010E1 S.I. TECH, INC.
39950010E2 ArrayComm, Inc.
39960010E3 COMPAQ COMPUTER CORPORATION
39970010E4 NSI CORPORATION
39980010E5 SOLECTRON TEXAS
39990010E6 APPLIED INTELLIGENT SYSTEMS, INC.
40000010E7 BreezeCom
40010010E8 TELOCITY, INCORPORATED
40020010E9 RAIDTEC LTD.
40030010EA ADEPT TECHNOLOGY
40040010EB SELSIUS SYSTEMS, INC.
40050010EC RPCG, LLC
40060010ED SUNDANCE TECHNOLOGY, INC.
40070010EE CTI PRODUCTS, INC.
40080010EF DBTEL INCORPORATED
40090010F1 I-O CORPORATION
40100010F2 ANTEC
40110010F3 Nexcom International Co., Ltd.
40120010F4 VERTICAL NETWORKS, INC.
40130010F5 AMHERST SYSTEMS, INC.
40140010F6 CISCO SYSTEMS, INC.
40150010F7 IRIICHI TECHNOLOGIES Inc.
40160010F8 KENWOOD TMI CORPORATION
40170010F9 UNIQUE SYSTEMS, INC.
40180010FA ZAYANTE, INC.
40190010FB ZIDA TECHNOLOGIES LIMITED
40200010FC BROADBAND NETWORKS, INC.
40210010FD COCOM A/S
40220010FE DIGITAL EQUIPMENT CORPORATION
40230010FF CISCO SYSTEMS, INC.
4024001C7C PERQ SYSTEMS CORPORATION
4025002000 LEXMARK INTERNATIONAL, INC.
4026002001 DSP SOLUTIONS, INC.
4027002002 SERITECH ENTERPRISE CO., LTD.
4028002003 PIXEL POWER LTD.
4029002004 YAMATAKE-HONEYWELL CO., LTD.
4030002005 SIMPLE TECHNOLOGY
4031002006 GARRETT COMMUNICATIONS, INC.
4032002007 SFA, INC.
4033002008 CABLE & COMPUTER TECHNOLOGY
4034002009 PACKARD BELL ELEC., INC.
403500200A SOURCE-COMM CORP.
403600200B OCTAGON SYSTEMS CORP.
403700200C ADASTRA SYSTEMS CORP.
403800200D CARL ZEISS
403900200E SATELLITE TECHNOLOGY MGMT, INC
404000200F TANBAC CO., LTD.
4041002010 JEOL SYSTEM TECHNOLOGY CO. LTD
4042002011 CANOPUS CO., LTD.
4043002012 CAMTRONICS MEDICAL SYSTEMS
4044002013 DIVERSIFIED TECHNOLOGY, INC.
4045002014 GLOBAL VIEW CO., LTD.
4046002015 ACTIS COMPUTER SA
4047002016 SHOWA ELECTRIC WIRE & CABLE CO
4048002017 ORBOTECH
4049002018 CIS TECHNOLOGY INC.
4050002019 OHLER GmbH
405100201A N-BASE SWITCH COMMUNICATIONS
405200201B NORTHERN TELECOM/NETWORK
405300201C EXCEL, INC.
405400201D KATANA PRODUCTS
405500201E NETQUEST CORPORATION
405600201F BEST POWER TECHNOLOGY, INC.
4057002020 MEGATRON COMPUTER INDUSTRIES PTY, LTD.
4058002021 ALGORITHMS SOFTWARE PVT. LTD.
4059002022 TEKNIQUE, INC.
4060002023 T.C. TECHNOLOGIES PTY. LTD
4061002024 PACIFIC COMMUNICATION SCIENCES
4062002025 CONTROL TECHNOLOGY, INC.
4063002026 AMKLY SYSTEMS, INC.
4064002027 MING FORTUNE INDUSTRY CO., LTD
4065002028 WEST EGG SYSTEMS, INC.
4066002029 TELEPROCESSING PRODUCTS, INC.
406700202A N.V. DZINE
406800202B ADVANCED TELECOMMUNICATIONS MODULES, LTD.
406900202C WELLTRONIX CO., LTD.
407000202D TAIYO CORPORATION
407100202E DAYSTAR DIGITAL
407200202F ZETA COMMUNICATIONS, LTD.
4073002030 ANALOG & DIGITAL SYSTEMS
4074002031 ERTEC GmbH
4075002032 ALCATEL TAISEL
4076002033 SYNAPSE TECHNOLOGIES, INC.
4077002034 ROTEC INDUSTRIEAUTOMATION GMBH
4078002035 IBM CORPORATION
4079002036 BMC SOFTWARE
4080002037 SEAGATE TECHNOLOGY
4081002038 VME MICROSYSTEMS INTERNATIONAL CORPORATION
4082002039 SCINETS
408300203A DIGITAL BI0METRICS INC.
408400203B WISDM LTD.
408500203C EUROTIME AB
408600203D NOVAR ELECTRONICS CORPORATION
408700203E LogiCan Technologies, Inc.
408800203F JUKI CORPORATION
4089002040 Motorola Broadband Communications Sector
4090002041 DATA NET
4091002042 DATAMETRICS CORP.
4092002043 NEURON COMPANY LIMITED
4093002044 GENITECH PTY LTD
4094002045 ION Networks, Inc.
4095002046 CIPRICO, INC.
4096002047 STEINBRECHER CORP.
4097002048 Marconi Communications
4098002049 COMTRON, INC.
409900204A PRONET GMBH
410000204B AUTOCOMPUTER CO., LTD.
410100204C MITRON COMPUTER PTE LTD.
410200204D INOVIS GMBH
410300204E NETWORK SECURITY SYSTEMS, INC.
410400204F DEUTSCHE AEROSPACE AG
4105002050 KOREA COMPUTER INC.
4106002051 Verilink Corporation
4107002052 RAGULA SYSTEMS
4108002053 HUNTSVILLE MICROSYSTEMS, INC.
4109002054 EASTERN RESEARCH, INC.
4110002055 ALTECH CO., LTD.
4111002056 NEOPRODUCTS
4112002057 TITZE DATENTECHNIK GmbH
4113002058 ALLIED SIGNAL INC.
4114002059 MIRO COMPUTER PRODUCTS AG
411500205A COMPUTER IDENTICS
411600205B SKYLINE TECHNOLOGY
411700205C InterNet Systems of Florida, Inc.
411800205D NANOMATIC OY
411900205E CASTLE ROCK, INC.
412000205F GAMMADATA COMPUTER GMBH
4121002060 ALCATEL ITALIA S.p.A.
4122002061 DYNATECH COMMUNICATIONS, INC.
4123002062 SCORPION LOGIC, LTD.
4124002063 WIPRO INFOTECH LTD.
4125002064 PROTEC MICROSYSTEMS, INC.
4126002065 SUPERNET NETWORKING INC.
4127002066 GENERAL MAGIC, INC.
4128002068 ISDYNE
4129002069 ISDN SYSTEMS CORPORATION
413000206A OSAKA COMPUTER CORP.
413100206B KONICA MINOLTA HOLDINGS, INC.
413200206C EVERGREEN TECHNOLOGY CORP.
413300206D DATA RACE, INC.
413400206E XACT, INC.
413500206F FLOWPOINT CORPORATION
4136002070 HYNET, LTD.
4137002071 IBR GMBH
4138002072 WORKLINK INNOVATIONS
4139002073 FUSION SYSTEMS CORPORATION
4140002074 SUNGWOON SYSTEMS
4141002075 MOTOROLA COMMUNICATION ISRAEL
4142002076 REUDO CORPORATION
4143002077 KARDIOS SYSTEMS CORP.
4144002078 RUNTOP, INC.
4145002079 MIKRON GMBH
414600207A WiSE Communications, Inc.
414700207B Intel Corporation
414800207C AUTEC GmbH
414900207D ADVANCED COMPUTER APPLICATIONS
415000207E FINECOM Co., Ltd.
415100207F KYOEI SANGYO CO., LTD.
4152002080 SYNERGY (UK) LTD.
4153002081 TITAN ELECTRONICS
4154002082 ONEAC CORPORATION
4155002083 PRESTICOM INCORPORATED
4156002084 OCE PRINTING SYSTEMS, GMBH
4157002085 EXIDE ELECTRONICS
4158002086 MICROTECH ELECTRONICS LIMITED
4159002087 MEMOTEC COMMUNICATIONS CORP.
4160002088 GLOBAL VILLAGE COMMUNICATION
4161002089 T3PLUS NETWORKING, INC.
416200208A SONIX COMMUNICATIONS, LTD.
416300208B LAPIS TECHNOLOGIES, INC.
416400208C GALAXY NETWORKS, INC.
416500208D CMD TECHNOLOGY
416600208E CHEVIN SOFTWARE ENG. LTD.
416700208F ECI TELECOM LTD.
4168002090 ADVANCED COMPRESSION TECHNOLOGY, INC.
4169002091 J125, NATIONAL SECURITY AGENCY
4170002092 CHESS ENGINEERING B.V.
4171002093 LANDINGS TECHNOLOGY CORP.
4172002094 CUBIX CORPORATION
4173002095 RIVA ELECTRONICS
4174002096 Invensys
4175002097 APPLIED SIGNAL TECHNOLOGY
4176002098 HECTRONIC AB
4177002099 BON ELECTRIC CO., LTD.
417800209A THE 3DO COMPANY
417900209B ERSAT ELECTRONIC GMBH
418000209C PRIMARY ACCESS CORP.
418100209D LIPPERT AUTOMATIONSTECHNIK
418200209E BROWN'S OPERATING SYSTEM SERVICES, LTD.
418300209F MERCURY COMPUTER SYSTEMS, INC.
41840020A0 OA LABORATORY CO., LTD.
41850020A1 DOVATRON
41860020A2 GALCOM NETWORKING LTD.
41870020A3 DIVICOM INC.
41880020A4 MULTIPOINT NETWORKS
41890020A5 API ENGINEERING
41900020A6 PROXIM, INC.
41910020A7 PAIRGAIN TECHNOLOGIES, INC.
41920020A8 SAST TECHNOLOGY CORP.
41930020A9 WHITE HORSE INDUSTRIAL
41940020AA DIGIMEDIA VISION LTD.
41950020AB MICRO INDUSTRIES CORP.
41960020AC INTERFLEX DATENSYSTEME GMBH
41970020AD LINQ SYSTEMS
41980020AE ORNET DATA COMMUNICATION TECH.
41990020AF 3COM CORPORATION
42000020B0 GATEWAY DEVICES, INC.
42010020B1 COMTECH RESEARCH INC.
42020020B2 GKD Gesellschaft Fur Kommunikation Und Datentechnik
42030020B3 SCLTEC COMMUNICATIONS SYSTEMS
42040020B4 TERMA ELEKTRONIK AS
42050020B5 YASKAWA ELECTRIC CORPORATION
42060020B6 AGILE NETWORKS, INC.
42070020B7 NAMAQUA COMPUTERWARE
42080020B8 PRIME OPTION, INC.
42090020B9 METRICOM, INC.
42100020BA CENTER FOR HIGH PERFORMANCE
42110020BB ZAX CORPORATION
42120020BC JTEC PTY LTD.
42130020BD NIOBRARA R & D CORPORATION
42140020BE LAN ACCESS CORP.
42150020BF AEHR TEST SYSTEMS
42160020C0 PULSE ELECTRONICS, INC.
42170020C1 TAIKO ELECTRIC WORKS, LTD.
42180020C2 TEXAS MEMORY SYSTEMS, INC.
42190020C3 COUNTER SOLUTIONS LTD.
42200020C4 INET,INC.
42210020C5 EAGLE TECHNOLOGY
42220020C6 NECTEC
42230020C7 AKAI Professional M.I. Corp.
42240020C8 LARSCOM INCORPORATED
42250020C9 VICTRON BV
42260020CA DIGITAL OCEAN
42270020CB PRETEC ELECTRONICS CORP.
42280020CC DIGITAL SERVICES, LTD.
42290020CD HYBRID NETWORKS, INC.
42300020CE LOGICAL DESIGN GROUP, INC.
42310020CF TEST & MEASUREMENT SYSTEMS INC
42320020D0 VERSALYNX CORPORATION
42330020D1 MICROCOMPUTER SYSTEMS (M) SDN.
42340020D2 RAD DATA COMMUNICATIONS, LTD.
42350020D3 OST (OUEST STANDARD TELEMATIQU
42360020D4 CABLETRON - ZEITTNET INC.
42370020D5 VIPA GMBH
42380020D6 BREEZECOM
42390020D7 JAPAN MINICOMPUTER SYSTEMS CO., Ltd.
42400020D8 Nortel Networks
42410020D9 PANASONIC TECHNOLOGIES, INC./MIECO-US
42420020DA XYLAN CORPORATION
42430020DB XNET TECHNOLOGY, INC.
42440020DC DENSITRON TAIWAN LTD.
42450020DD Cybertec Pty Ltd
42460020DE JAPAN DIGITAL LABORAT'Y CO.LTD
42470020DF KYOSAN ELECTRIC MFG. CO., LTD.
42480020E0 PREMAX ELECTRONICS, INC.
42490020E1 ALAMAR ELECTRONICS
42500020E2 INFORMATION RESOURCE ENGINEERING
42510020E3 MCD KENCOM CORPORATION
42520020E4 HSING TECH ENTERPRISE CO., LTD
42530020E5 APEX DATA, INC.
42540020E6 LIDKOPING MACHINE TOOLS AB
42550020E7 B&W NUCLEAR SERVICE COMPANY
42560020E8 DATATREK CORPORATION
42570020E9 DANTEL
42580020EA EFFICIENT NETWORKS, INC.
42590020EB CINCINNATI MICROWAVE, INC.
42600020EC TECHWARE SYSTEMS CORP.
42610020ED GIGA-BYTE TECHNOLOGY CO., LTD.
42620020EE GTECH CORPORATION
42630020EF USC CORPORATION
42640020F0 UNIVERSAL MICROELECTRONICS CO.
42650020F1 ALTOS INDIA LIMITED
42660020F2 SUN MICROSYSTEMS, INC.
42670020F3 RAYNET CORPORATION
42680020F4 SPECTRIX CORPORATION
42690020F5 PANDATEL AG
42700020F6 NET TEK AND KARLNET, INC.
42710020F7 CYBERDATA
42720020F8 CARRERA COMPUTERS, INC.
42730020F9 PARALINK NETWORKS, INC.
42740020FA GDE SYSTEMS, INC.
42750020FB OCTEL COMMUNICATIONS CORP.
42760020FC MATROX
42770020FD ITV TECHNOLOGIES, INC.
42780020FE TOPWARE INC. / GRAND COMPUTER
42790020FF SYMMETRICAL TECHNOLOGIES
4280003000 ALLWELL TECHNOLOGY CORP.
4281003001 SMP
4282003002 Expand Networks
4283003003 Phasys Ltd.
4284003004 LEADTEK RESEARCH INC.
4285003005 Fujitsu Siemens Computers
4286003006 SUPERPOWER COMPUTER
4287003007 OPTI, INC.
4288003008 AVIO DIGITAL, INC.
4289003009 Tachion Networks, Inc.
429000300A AZTECH SYSTEMS LTD.
429100300B mPHASE Technologies, Inc.
429200300C CONGRUENCY, LTD.
429300300D MMC Technology, Inc.
429400300E Klotz Digital AG
429500300F IMT - Information Management T
4296003010 VISIONETICS INTERNATIONAL
4297003011 HMS FIELDBUS SYSTEMS AB
4298003012 DIGITAL ENGINEERING LTD.
4299003013 NEC Corporation
4300003014 DIVIO, INC.
4301003015 CP CLARE CORP.
4302003016 ISHIDA CO., LTD.
4303003017 TERASTACK LTD.
4304003018 Jetway Information Co., Ltd.
4305003019 CISCO SYSTEMS, INC.
430600301A SMARTBRIDGES PTE. LTD.
430700301B SHUTTLE, INC.
430800301C ALTVATER AIRDATA SYSTEMS
430900301D SKYSTREAM, INC.
431000301E 3COM Europe Ltd.
431100301F OPTICAL NETWORKS, INC.
4312003020 TSI, Inc..
4313003021 HSING TECH. ENTERPRISE CO.,LTD
4314003022 Fong Kai Industrial Co., Ltd.
4315003023 COGENT COMPUTER SYSTEMS, INC.
4316003024 CISCO SYSTEMS, INC.
4317003025 CHECKOUT COMPUTER SYSTEMS, LTD
4318003026 HEITEL
4319003027 KERBANGO, INC.
4320003028 FASE Saldatura srl
4321003029 OPICOM
432200302A SOUTHERN INFORMATION
432300302B INALP NETWORKS, INC.
432400302C SYLANTRO SYSTEMS CORPORATION
432500302D QUANTUM BRIDGE COMMUNICATIONS
432600302E Hoft & Wessel AG
432700302F Smiths Industries
4328003030 HARMONIX CORPORATION
4329003031 LIGHTWAVE COMMUNICATIONS, INC.
4330003032 MagicRam, Inc.
4331003033 ORIENT TELECOM CO., LTD.
4332003036 RMP ELEKTRONIKSYSTEME GMBH
4333003037 Packard Bell Nec Services
4334003038 XCP, INC.
4335003039 SOFTBOOK PRESS
433600303A MAATEL
433700303B PowerCom Technology
433800303C ONNTO CORP.
433900303D IVA CORPORATION
434000303E Radcom Ltd.
434100303F TurboComm Tech Inc.
4342003040 CISCO SYSTEMS, INC.
4343003041 SAEJIN T & M CO., LTD.
4344003042 DeTeWe-Deutsche Telephonwerke
4345003043 IDREAM TECHNOLOGIES, PTE. LTD.
4346003044 Portsmith LLC
4347003045 Village Networks, Inc. (VNI)
4348003046 Controlled Electronic Manageme
4349003047 NISSEI ELECTRIC CO., LTD.
4350003048 Supermicro Computer, Inc.
4351003049 BRYANT TECHNOLOGY, LTD.
435200304A FRAUNHOFER INSTITUTE IMS
435300304B ORBACOM SYSTEMS, INC.
435400304C APPIAN COMMUNICATIONS, INC.
435500304D ESI
435600304E BUSTEC PRODUCTION LTD.
435700304F PLANET Technology Corporation
4358003050 Versa Technology
4359003051 ORBIT AVIONIC & COMMUNICATION
4360003052 ELASTIC NETWORKS
4361003053 Basler AG
4362003054 CASTLENET TECHNOLOGY, INC.
4363003055 Hitachi Semiconductor America,
4364003056 Beck IPC GmbH
4365003057 E-Tel Corporation
4366003058 API MOTION
4367003059 DIGITAL-LOGIC AG
436800305A TELGEN CORPORATION
436900305B MODULE DEPARTMENT
437000305C SMAR Laboratories Corp.
437100305D DIGITRA SYSTEMS, INC.
437200305E Abelko Innovation
437300305F IMACON APS
4374003060 STARMATIX, INC.
4375003061 MobyTEL
4376003062 PATH 1 NETWORK TECHNOL'S INC.
4377003063 SANTERA SYSTEMS, INC.
4378003064 ADLINK TECHNOLOGY, INC.
4379003065 APPLE COMPUTER, INC.
4380003066 DIGITAL WIRELESS CORPORATION
4381003067 BIOSTAR MICROTECH INT'L CORP.
4382003068 CYBERNETICS TECH. CO., LTD.
4383003069 IMPACCT TECHNOLOGY CORP.
438400306A PENTA MEDIA CO., LTD.
438500306B CMOS SYSTEMS, INC.
438600306C Hitex Holding GmbH
438700306D LUCENT TECHNOLOGIES
438800306E HEWLETT PACKARD
438900306F SEYEON TECH. CO., LTD.
4390003070 1Net Corporation
4391003071 Cisco Systems, Inc.
4392003072 INTELLIBYTE INC.
4393003073 International Microsystems, In
4394003074 EQUIINET LTD.
4395003075 ADTECH
4396003076 Akamba Corporation
4397003077 ONPREM NETWORKS
4398003078 Cisco Systems, Inc.
4399003079 CQOS, INC.
440000307A Advanced Technology & Systems
440100307B Cisco Systems, Inc.
440200307C ADID SA
440300307D GRE AMERICA, INC.
440400307E Redflex Communication Systems
440500307F IRLAN LTD.
4406003080 CISCO SYSTEMS, INC.
4407003081 ALTOS C&C
4408003082 TAIHAN ELECTRIC WIRE CO., LTD.
4409003083 Ivron Systems
4410003084 ALLIED TELESYN INTERNAIONAL
4411003085 CISCO SYSTEMS, INC.
4412003086 Transistor Devices, Inc.
4413003087 VEGA GRIESHABER KG
4414003088 Siara Systems, Inc.
4415003089 Spectrapoint Wireless, LLC
441600308A NICOTRA SISTEMI S.P.A
441700308B Brix Networks
441800308C ADVANCED DIGITAL INFORMATION
441900308D PINNACLE SYSTEMS, INC.
442000308E CROSS MATCH TECHNOLOGIES, INC.
442100308F MICRILOR, Inc.
4422003090 CYRA TECHNOLOGIES, INC.
4423003091 TAIWAN FIRST LINE ELEC. CORP.
4424003092 ModuNORM GmbH
4425003093 SONNET TECHNOLOGIES, INC.
4426003094 Cisco Systems, Inc.
4427003095 Procomp Informatics, Ltd.
4428003096 CISCO SYSTEMS, INC.
4429003097 EXOMATIC AB
4430003098 Global Converging Technologies
4431003099 BOENIG UND KALLENBACH OHG
443200309A ASTRO TERRA CORP.
443300309B Smartware
443400309C Timing Applications, Inc.
443500309D Nimble Microsystems, Inc.
443600309E WORKBIT CORPORATION.
443700309F AMBER NETWORKS
44380030A0 TYCO SUBMARINE SYSTEMS, LTD.
44390030A1 WEBGATE Inc.
44400030A2 Lightner Engineering
44410030A3 CISCO SYSTEMS, INC.
44420030A4 Woodwind Communications System
44430030A5 ACTIVE POWER
44440030A6 VIANET TECHNOLOGIES, LTD.
44450030A7 SCHWEITZER ENGINEERING
44460030A8 OL'E COMMUNICATIONS, INC.
44470030A9 Netiverse, Inc.
44480030AA AXUS MICROSYSTEMS, INC.
44490030AB DELTA NETWORKS, INC.
44500030AC Systeme Lauer GmbH & Co., Ltd.
44510030AD SHANGHAI COMMUNICATION
44520030AE Times N System, Inc.
44530030AF Honeywell GmbH
44540030B0 Convergenet Technologies
44550030B1 GOC GESELLSCHAFT FUR OPTISCHE
44560030B2 WESCAM - HEALDSBURG
44570030B3 San Valley Systems, Inc.
44580030B4 INTERSIL CORP.
44590030B5 Tadiran Microwave Networks
44600030B6 CISCO SYSTEMS, INC.
44610030B7 Teletrol Systems, Inc.
44620030B8 RiverDelta Networks
44630030B9 ECTEL
44640030BA AC&T SYSTEM CO., LTD.
44650030BB CacheFlow, Inc.
44660030BC Optronic AG
44670030BD BELKIN COMPONENTS
44680030BE City-Net Technology, Inc.
44690030BF MULTIDATA GMBH
44700030C0 Lara Technology, Inc.
44710030C1 HEWLETT-PACKARD
44720030C2 COMONE
44730030C3 FLUECKIGER ELEKTRONIK AG
44740030C4 Niigata Canotec Co., Inc.
44750030C5 CADENCE DESIGN SYSTEMS
44760030C6 CONTROL SOLUTIONS, INC.
44770030C7 MACROMATE CORP.
44780030C8 GAD LINE, LTD.
44790030C9 LuxN, N
44800030CA Discovery Com
44810030CB OMNI FLOW COMPUTERS, INC.
44820030CC Tenor Networks, Inc.
44830030CD CONEXANT SYSTEMS, INC.
44840030CE Zaffire
44850030CF TWO TECHNOLOGIES, INC.
44860030D1 INOVA CORPORATION
44870030D2 WIN TECHNOLOGIES, CO., LTD.
44880030D3 Agilent Technologies
44890030D4 COMTIER
44900030D5 DResearch GmbH
44910030D6 MSC VERTRIEBS GMBH
44920030D7 Innovative Systems, L.L.C.
44930030D8 SITEK
44940030D9 DATACORE SOFTWARE CORP.
44950030DA COMTREND CO.
44960030DB Mindready Solutions, Inc.
44970030DC RIGHTECH CORPORATION
44980030DD INDIGITA CORPORATION
44990030DE WAGO Kontakttechnik GmbH
45000030DF KB/TEL TELECOMUNICACIONES
45010030E0 OXFORD SEMICONDUCTOR LTD.
45020030E1 ACROTRON SYSTEMS, INC.
45030030E2 GARNET SYSTEMS CO., LTD.
45040030E3 SEDONA NETWORKS CORP.
45050030E4 CHIYODA SYSTEM RIKEN
45060030E5 Amper Datos S.A.
45070030E6 SIEMENS MEDICAL SYSTEMS
45080030E7 CNF MOBILE SOLUTIONS, INC.
45090030E8 ENSIM CORP.
45100030E9 GMA COMMUNICATION MANUFACT'G
45110030EA TeraForce Technology Corporation
45120030EB TURBONET COMMUNICATIONS, INC.
45130030EC BORGARDT
45140030ED Expert Magnetics Corp.
45150030EE DSG Technology, Inc.
45160030EF NEON TECHNOLOGY, INC.
45170030F0 Uniform Industrial Corp.
45180030F1 Accton Technology Corp.
45190030F2 CISCO SYSTEMS, INC.
45200030F3 At Work Computers
45210030F4 STARDOT TECHNOLOGIES
45220030F5 Wild Lab. Ltd.
45230030F6 SECURELOGIX CORPORATION
45240030F7 RAMIX INC.
45250030F8 Dynapro Systems, Inc.
45260030F9 Sollae Systems Co., Ltd.
45270030FA TELICA, INC.
45280030FB AZS Technology AG
45290030FC Terawave Communications, Inc.
45300030FD INTEGRATED SYSTEMS DESIGN
45310030FE DSA GmbH
45320030FF DATAFAB SYSTEMS, INC.
4533004000 PCI COMPONENTES DA AMZONIA LTD
4534004001 ZYXEL COMMUNICATIONS, INC.
4535004002 PERLE SYSTEMS LIMITED
4536004003 WESTINGHOUSE PROCESS CONTROL
4537004004 ICM CO. LTD.
4538004005 ANI COMMUNICATIONS INC.
4539004006 SAMPO TECHNOLOGY CORPORATION
4540004007 TELMAT INFORMATIQUE
4541004008 A PLUS INFO CORPORATION
4542004009 TACHIBANA TECTRON CO., LTD.
454300400A PIVOTAL TECHNOLOGIES, INC.
454400400B CISCO SYSTEMS, INC.
454500400C GENERAL MICRO SYSTEMS, INC.
454600400D LANNET DATA COMMUNICATIONS,LTD
454700400E MEMOTEC COMMUNICATIONS, INC.
454800400F DATACOM TECHNOLOGIES
4549004010 SONIC SYSTEMS, INC.
4550004011 ANDOVER CONTROLS CORPORATION
4551004012 WINDATA, INC.
4552004013 NTT DATA COMM. SYSTEMS CORP.
4553004014 COMSOFT GMBH
4554004015 ASCOM INFRASYS AG
4555004016 HADAX ELECTRONICS, INC.
4556004017 XCD INC.
4557004018 ADOBE SYSTEMS, INC.
4558004019 AEON SYSTEMS, INC.
455900401A FUJI ELECTRIC CO., LTD.
456000401B PRINTER SYSTEMS CORP.
456100401C AST RESEARCH, INC.
456200401D INVISIBLE SOFTWARE, INC.
456300401E ICC
456400401F COLORGRAPH LTD
4565004020 PINACL COMMUNICATION
4566004021 RASTER GRAPHICS
4567004022 KLEVER COMPUTERS, INC.
4568004023 LOGIC CORPORATION
4569004024 COMPAC INC.
4570004025 MOLECULAR DYNAMICS
4571004026 MELCO, INC.
4572004027 SMC MASSACHUSETTS, INC.
4573004028 NETCOMM LIMITED
4574004029 COMPEX
457500402A CANOGA-PERKINS
457600402B TRIGEM COMPUTER, INC.
457700402C ISIS DISTRIBUTED SYSTEMS, INC.
457800402D HARRIS ADACOM CORPORATION
457900402E PRECISION SOFTWARE, INC.
458000402F XLNT DESIGNS INC.
4581004030 GK COMPUTER
4582004031 KOKUSAI ELECTRIC CO., LTD
4583004032 DIGITAL COMMUNICATIONS
4584004033 ADDTRON TECHNOLOGY CO., LTD.
4585004034 BUSTEK CORPORATION
4586004035 OPCOM
4587004036 TRIBE COMPUTER WORKS, INC.
4588004037 SEA-ILAN, INC.
4589004038 TALENT ELECTRIC INCORPORATED
4590004039 OPTEC DAIICHI DENKO CO., LTD.
459100403A IMPACT TECHNOLOGIES
459200403B SYNERJET INTERNATIONAL CORP.
459300403C FORKS, INC.
459400403D TERADATA
459500403E RASTER OPS CORPORATION
459600403F SSANGYONG COMPUTER SYSTEMS
4597004040 RING ACCESS, INC.
4598004041 FUJIKURA LTD.
4599004042 N.A.T. GMBH
4600004043 NOKIA TELECOMMUNICATIONS
4601004044 QNIX COMPUTER CO., LTD.
4602004045 TWINHEAD CORPORATION
4603004046 UDC RESEARCH LIMITED
4604004047 WIND RIVER SYSTEMS
4605004048 SMD INFORMATICA S.A.
4606004049 TEGIMENTA AG
460700404A WEST AUSTRALIAN DEPARTMENT
460800404B MAPLE COMPUTER SYSTEMS
460900404C HYPERTEC PTY LTD.
461000404D TELECOMMUNICATIONS TECHNIQUES
461100404E FLUENT, INC.
461200404F SPACE & NAVAL WARFARE SYSTEMS
4613004050 IRONICS, INCORPORATED
4614004051 GRACILIS, INC.
4615004052 STAR TECHNOLOGIES, INC.
4616004053 AMPRO COMPUTERS
4617004054 CONNECTION MACHINES SERVICES
4618004055 METRONIX GMBH
4619004056 MCM JAPAN LTD.
4620004057 LOCKHEED - SANDERS
4621004058 KRONOS, INC.
4622004059 YOSHIDA KOGYO K. K.
462300405A GOLDSTAR INFORMATION & COMM.
462400405B FUNASSET LIMITED
462500405C FUTURE SYSTEMS, INC.
462600405D STAR-TEK, INC.
462700405E NORTH HILLS ISRAEL
462800405F AFE COMPUTERS LTD.
4629004060 COMENDEC LTD
4630004061 DATATECH ENTERPRISES CO., LTD.
4631004062 E-SYSTEMS, INC./GARLAND DIV.
4632004063 VIA TECHNOLOGIES, INC.
4633004064 KLA INSTRUMENTS CORPORATION
4634004065 GTE SPACENET
4635004066 HITACHI CABLE, LTD.
4636004067 OMNIBYTE CORPORATION
4637004068 EXTENDED SYSTEMS
4638004069 LEMCOM SYSTEMS, INC.
463900406A KENTEK INFORMATION SYSTEMS,INC
464000406B SYSGEN
464100406C COPERNIQUE
464200406D LANCO, INC.
464300406E COROLLARY, INC.
464400406F SYNC RESEARCH INC.
4645004070 INTERWARE CO., LTD.
4646004071 ATM COMPUTER GMBH
4647004072 Applied Innovation Inc.
4648004073 BASS ASSOCIATES
4649004074 CABLE AND WIRELESS
4650004075 M-TRADE (UK) LTD
4651004076 Sun Conversion Technologies
4652004077 MAXTON TECHNOLOGY CORPORATION
4653004078 WEARNES AUTOMATION PTE LTD
4654004079 JUKO MANUFACTURE COMPANY, LTD.
465500407A SOCIETE D'EXPLOITATION DU CNIT
465600407B SCIENTIFIC ATLANTA
465700407C QUME CORPORATION
465800407D EXTENSION TECHNOLOGY CORP.
465900407E EVERGREEN SYSTEMS, INC.
466000407F FLIR Systems
4661004080 ATHENIX CORPORATION
4662004081 MANNESMANN SCANGRAPHIC GMBH
4663004082 LABORATORY EQUIPMENT CORP.
4664004083 TDA INDUSTRIA DE PRODUTOS
4665004084 HONEYWELL INC.
4666004085 SAAB INSTRUMENTS AB
4667004086 MICHELS & KLEBERHOFF COMPUTER
4668004087 UBITREX CORPORATION
4669004088 MOBIUS TECHNOLOGIES, INC.
4670004089 MEIDENSHA CORPORATION
467100408A TPS TELEPROCESSING SYS. GMBH
467200408B RAYLAN CORPORATION
467300408C AXIS COMMUNICATIONS AB
467400408D THE GOODYEAR TIRE & RUBBER CO.
467500408E DIGILOG, INC.
467600408F WM-DATA MINFO AB
4677004090 ANSEL COMMUNICATIONS
4678004091 PROCOMP INDUSTRIA ELETRONICA
4679004092 ASP COMPUTER PRODUCTS, INC.
4680004093 PAXDATA NETWORKS LTD.
4681004094 SHOGRAPHICS, INC.
4682004095 R.P.T. INTERGROUPS INT'L LTD.
4683004096 Aironet Wireless Communication
4684004097 DATEX DIVISION OF
4685004098 DRESSLER GMBH & CO.
4686004099 NEWGEN SYSTEMS CORP.
468700409A NETWORK EXPRESS, INC.
468800409B HAL COMPUTER SYSTEMS INC.
468900409C TRANSWARE
469000409D DIGIBOARD, INC.
469100409E CONCURRENT TECHNOLOGIES LTD.
469200409F LANCAST/CASAT TECHNOLOGY, INC.
46930040A0 GOLDSTAR CO., LTD.
46940040A1 ERGO COMPUTING
46950040A2 KINGSTAR TECHNOLOGY INC.
46960040A3 MICROUNITY SYSTEMS ENGINEERING
46970040A4 ROSE ELECTRONICS
46980040A5 CLINICOMP INTL.
46990040A6 Cray, Inc.
47000040A7 ITAUTEC PHILCO S.A.
47010040A8 IMF INTERNATIONAL LTD.
47020040A9 DATACOM INC.
47030040AA VALMET AUTOMATION INC.
47040040AB ROLAND DG CORPORATION
47050040AC SUPER WORKSTATION, INC.
47060040AD SMA REGELSYSTEME GMBH
47070040AE DELTA CONTROLS, INC.
47080040AF DIGITAL PRODUCTS, INC.
47090040B0 BYTEX CORPORATION, ENGINEERING
47100040B1 CODONICS INC.
47110040B2 SYSTEMFORSCHUNG
47120040B3 PAR MICROSYSTEMS CORPORATION
47130040B4 NEXTCOM K.K.
47140040B5 VIDEO TECHNOLOGY COMPUTERS LTD
47150040B6 COMPUTERM CORPORATION
47160040B7 STEALTH COMPUTER SYSTEMS
47170040B8 IDEA ASSOCIATES
47180040B9 MACQ ELECTRONIQUE SA
47190040BA ALLIANT COMPUTER SYSTEMS CORP.
47200040BB GOLDSTAR CABLE CO., LTD.
47210040BC ALGORITHMICS LTD.
47220040BD STARLIGHT NETWORKS, INC.
47230040BE BOEING DEFENSE & SPACE
47240040BF CHANNEL SYSTEMS INTERN'L INC.
47250040C0 VISTA CONTROLS CORPORATION
47260040C1 BIZERBA-WERKE WILHEIM KRAUT
47270040C2 APPLIED COMPUTING DEVICES
47280040C3 FISCHER AND PORTER CO.
47290040C4 KINKEI SYSTEM CORPORATION
47300040C5 MICOM COMMUNICATIONS INC.
47310040C6 FIBERNET RESEARCH, INC.
47320040C7 RUBY TECH CORPORATION
47330040C8 MILAN TECHNOLOGY CORPORATION
47340040C9 NCUBE
47350040CA FIRST INTERNAT'L COMPUTER, INC
47360040CB LANWAN TECHNOLOGIES
47370040CC SILCOM MANUF'G TECHNOLOGY INC.
47380040CD TERA MICROSYSTEMS, INC.
47390040CE NET-SOURCE, INC.
47400040CF STRAWBERRY TREE, INC.
47410040D0 MITAC INTERNATIONAL CORP.
47420040D1 FUKUDA DENSHI CO., LTD.
47430040D2 PAGINE CORPORATION
47440040D3 KIMPSION INTERNATIONAL CORP.
47450040D4 GAGE TALKER CORP.
47460040D5 SARTORIUS AG
47470040D6 LOCAMATION B.V.
47480040D7 STUDIO GEN INC.
47490040D8 OCEAN OFFICE AUTOMATION LTD.
47500040D9 AMERICAN MEGATRENDS INC.
47510040DA TELSPEC LTD
47520040DB ADVANCED TECHNICAL SOLUTIONS
47530040DC TRITEC ELECTRONIC GMBH
47540040DD HONG TECHNOLOGIES
47550040DE ELETTRONICA SAN GIORGIO
47560040DF DIGALOG SYSTEMS, INC.
47570040E0 ATOMWIDE LTD.
47580040E1 MARNER INTERNATIONAL, INC.
47590040E2 MESA RIDGE TECHNOLOGIES, INC.
47600040E3 QUIN SYSTEMS LTD
47610040E4 E-M TECHNOLOGY, INC.
47620040E5 SYBUS CORPORATION
47630040E6 C.A.E.N.
47640040E7 ARNOS INSTRUMENTS & COMPUTER
47650040E8 CHARLES RIVER DATA SYSTEMS,INC
47660040E9 ACCORD SYSTEMS, INC.
47670040EA PLAIN TREE SYSTEMS INC
47680040EB MARTIN MARIETTA CORPORATION
47690040EC MIKASA SYSTEM ENGINEERING
47700040ED NETWORK CONTROLS INT'NATL INC.
47710040EE OPTIMEM
47720040EF HYPERCOM, INC.
47730040F0 MICRO SYSTEMS, INC.
47740040F1 CHUO ELECTRONICS CO., LTD.
47750040F2 JANICH & KLASS COMPUTERTECHNIK
47760040F3 NETCOR
47770040F4 CAMEO COMMUNICATIONS, INC.
47780040F5 OEM ENGINES
47790040F6 KATRON COMPUTERS INC.
47800040F7 POLAROID MEDICAL IMAGING SYS.
47810040F8 SYSTEMHAUS DISCOM
47820040F9 COMBINET
47830040FA MICROBOARDS, INC.
47840040FB CASCADE COMMUNICATIONS CORP.
47850040FC IBR COMPUTER TECHNIK GMBH
47860040FD LXE
47870040FE SYMPLEX COMMUNICATIONS
47880040FF TELEBIT CORPORATION
4789004252 RLX Technologies
4790005000 NEXO COMMUNICATIONS, INC.
4791005001 YAMASHITA SYSTEMS CORP.
4792005002 OMNISEC AG
4793005003 GRETAG MACBETH AG
4794005004 3COM CORPORATION
4795005006 TAC AB
4796005007 SIEMENS TELECOMMUNICATION SYSTEMS LIMITED
4797005008 TIVA MICROCOMPUTER CORP. (TMC)
4798005009 PHILIPS BROADBAND NETWORKS
479900500A IRIS TECHNOLOGIES, INC.
480000500B CISCO SYSTEMS, INC.
480100500C e-Tek Labs, Inc.
480200500D SATORI ELECTORIC CO., LTD.
480300500E CHROMATIS NETWORKS, INC.
480400500F CISCO SYSTEMS, INC.
4805005010 NovaNET Learning, Inc.
4806005012 CBL - GMBH
4807005013 Chaparral Network Storage
4808005014 CISCO SYSTEMS, INC.
4809005015 BRIGHT STAR ENGINEERING
4810005016 SST/WOODHEAD INDUSTRIES
4811005017 RSR S.R.L.
4812005018 ADVANCED MULTIMEDIA INTERNET TECHNOLOGY INC.
4813005019 SPRING TIDE NETWORKS, INC.
481400501A UISIQN
481500501B ABL CANADA, INC.
481600501C JATOM SYSTEMS, INC.
481700501E Miranda Technologies, Inc.
481800501F MRG SYSTEMS, LTD.
4819005020 MEDIASTAR CO., LTD.
4820005021 EIS INTERNATIONAL, INC.
4821005022 ZONET TECHNOLOGY, INC.
4822005023 PG DESIGN ELECTRONICS, INC.
4823005024 NAVIC SYSTEMS, INC.
4824005026 COSYSTEMS, INC.
4825005027 GENICOM CORPORATION
4826005028 AVAL COMMUNICATIONS
4827005029 1394 PRINTER WORKING GROUP
482800502A CISCO SYSTEMS, INC.
482900502B GENRAD LTD.
483000502C SOYO COMPUTER, INC.
483100502D ACCEL, INC.
483200502E CAMBEX CORPORATION
483300502F TollBridge Technologies, Inc.
4834005030 FUTURE PLUS SYSTEMS
4835005031 AEROFLEX LABORATORIES, INC.
4836005032 PICAZO COMMUNICATIONS, INC.
4837005033 MAYAN NETWORKS
4838005036 NETCAM, LTD.
4839005037 KOGA ELECTRONICS CO.
4840005038 DAIN TELECOM CO., LTD.
4841005039 MARINER NETWORKS
484200503A DATONG ELECTRONICS LTD.
484300503B MEDIAFIRE CORPORATION
484400503C TSINGHUA NOVEL ELECTRONICS
484500503E CISCO SYSTEMS, INC.
484600503F ANCHOR GAMES
4847005040 EMWARE, INC.
4848005041 CTX OPTO ELECTRONIC CORP.
4849005042 SCI MANUFACTURING SINGAPORE PTE, LTD.
4850005043 MARVELL SEMICONDUCTOR, INC.
4851005044 ASACA CORPORATION
4852005045 RIOWORKS SOLUTIONS, INC.
4853005046 MENICX INTERNATIONAL CO., LTD.
4854005048 INFOLIBRIA
4855005049 ELLACOYA NETWORKS, INC.
485600504A ELTECO A.S.
485700504B BARCONET N.V.
485800504C GALIL MOTION CONTROL, INC.
485900504D TOKYO ELECTRON DEVICE LTD.
486000504E SIERRA MONITOR CORP.
486100504F OLENCOM ELECTRONICS
4862005050 CISCO SYSTEMS, INC.
4863005051 IWATSU ELECTRIC CO., LTD.
4864005052 TIARA NETWORKS, INC.
4865005053 CISCO SYSTEMS, INC.
4866005054 CISCO SYSTEMS, INC.
4867005055 DOMS A/S
4868005056 VMWare, Inc.
4869005057 BROADBAND ACCESS SYSTEMS
4870005058 VEGASTREAM LIMITED
4871005059 SUITE TECHNOLOGY SYSTEMS NETWORK
487200505A NETWORK ALCHEMY, INC.
487300505B KAWASAKI LSI U.S.A., INC.
487400505C TUNDO CORPORATION
487500505E DIGITEK MICROLOGIC S.A.
487600505F BRAND INNOVATORS
4877005060 TANDBERG TELECOM AS
4878005062 KOUWELL ELECTRONICS CORP. **
4879005063 OY COMSEL SYSTEM AB
4880005064 CAE ELECTRONICS
4881005065 DENSEI-LAMBAD Co., Ltd.
4882005066 AtecoM GmbH advanced telecomunication modules
4883005067 AEROCOMM, INC.
4884005068 ELECTRONIC INDUSTRIES ASSOCIATION
4885005069 PixStream Incorporated
488600506A EDEVA, INC.
488700506B SPX-ATEG
488800506C G & L BEIJER ELECTRONICS AB
488900506D VIDEOJET SYSTEMS
489000506E CORDER ENGINEERING CORPORATION
489100506F G-CONNECT
4892005070 CHAINTECH COMPUTER CO., LTD.
4893005071 AIWA CO., LTD.
4894005072 CORVIS CORPORATION
4895005073 CISCO SYSTEMS, INC.
4896005074 ADVANCED HI-TECH CORP.
4897005075 KESTREL SOLUTIONS
4898005076 IBM
4899005077 PROLIFIC TECHNOLOGY, INC.
4900005078 MEGATON HOUSE, LTD.
490100507A XPEED, INC.
490200507B MERLOT COMMUNICATIONS
490300507C VIDEOCON AG
490400507D IFP
490500507E NEWER TECHNOLOGY
490600507F DrayTek Corp.
4907005080 CISCO SYSTEMS, INC.
4908005081 MURATA MACHINERY, LTD.
4909005082 FORESSON CORPORATION
4910005083 GILBARCO, INC.
4911005084 ATL PRODUCTS
4912005086 TELKOM SA, LTD.
4913005087 TERASAKI ELECTRIC CO., LTD.
4914005088 AMANO CORPORATION
4915005089 SAFETY MANAGEMENT SYSTEMS
491600508B COMPAQ COMPUTER CORPORATION
491700508C RSI SYSTEMS
491800508D ABIT COMPUTER CORPORATION
491900508E OPTIMATION, INC.
492000508F ASITA TECHNOLOGIES INT'L LTD.
4921005090 DCTRI
4922005091 NETACCESS, INC.
4923005092 RIGAKU INDUSTRIAL CORPORATION
4924005093 BOEING
4925005094 PACE MICRO TECHNOLOGY PLC
4926005095 PERACOM NETWORKS
4927005096 SALIX TECHNOLOGIES, INC.
4928005097 MMC-EMBEDDED COMPUTERTECHNIK GmbH
4929005098 GLOBALOOP, LTD.
4930005099 3COM EUROPE, LTD.
493100509A TAG ELECTRONIC SYSTEMS
493200509B SWITCHCORE AB
493300509C BETA RESEARCH
493400509D THE INDUSTREE B.V.
493500509E Les Technologies SoftAcoustik Inc.
493600509F HORIZON COMPUTER
49370050A0 DELTA COMPUTER SYSTEMS, INC.
49380050A1 CARLO GAVAZZI, INC.
49390050A2 CISCO SYSTEMS, INC.
49400050A3 TransMedia Communications, Inc.
49410050A4 IO TECH, INC.
49420050A5 CAPITOL BUSINESS SYSTEMS, LTD.
49430050A6 OPTRONICS
49440050A7 CISCO SYSTEMS, INC.
49450050A8 OpenCon Systems, Inc.
49460050A9 MOLDAT WIRELESS TECHNOLGIES
49470050AA KONICA MINOLTA HOLDINGS, INC.
49480050AB NALTEC, INC.
49490050AC MAPLE COMPUTER CORPORATION
49500050AD CommUnique Wireless Corp.
49510050AE IWAKI ELECTRONICS CO., LTD.
49520050AF INTERGON, INC.
49530050B0 TECHNOLOGY ATLANTA CORPORATION
49540050B1 GIDDINGS & LEWIS
49550050B2 BRODEL AUTOMATION
49560050B3 VOICEBOARD CORPORATION
49570050B4 SATCHWELL CONTROL SYSTEMS, LTD
49580050B5 FICHET-BAUCHE
49590050B6 GOOD WAY IND. CO., LTD.
49600050B7 BOSER TECHNOLOGY CO., LTD.
49610050B8 INOVA COMPUTERS GMBH & CO. KG
49620050B9 XITRON TECHNOLOGIES, INC.
49630050BA D-LINK
49640050BB CMS TECHNOLOGIES
49650050BC HAMMER STORAGE SOLUTIONS
49660050BD CISCO SYSTEMS, INC.
49670050BE FAST MULTIMEDIA AG
49680050BF MOTOTECH INC.
49690050C0 GATAN, INC.
49700050C1 GEMFLEX NETWORKS, LTD.
49710050C2 IEEE REGISTRATION AUTHORITY
49720050C4 IMD
49730050C5 ADS TECHNOLOGIES, INC.
49740050C6 LOOP TELECOMMUNICATION INTERNATIONAL, INC.
49750050C8 ADDONICS COMMUNICATIONS, INC.
49760050C9 MASPRO DENKOH CORP.
49770050CA NET TO NET TECHNOLOGIES
49780050CB JETTER
49790050CC XYRATEX
49800050CD DIGIANSWER A/S
49810050CE LG INTERNATIONAL CORP.
49820050CF VANLINK COMMUNICATION TECHNOLOGY RESEARCH INSTITUTE
49830050D0 MINERVA SYSTEMS
49840050D1 CISCO SYSTEMS, INC.
49850050D2 BAE Systems Canada, Inc.
49860050D3 DIGITAL AUDIO PROCESSING PTY. LTD.
49870050D4 JOOHONG INFORMATION &
49880050D5 AD SYSTEMS CORP.
49890050D6 ATLAS COPCO TOOLS AB
49900050D7 TELSTRAT
49910050D8 UNICORN COMPUTER CORP.
49920050D9 ENGETRON-ENGENHARIA ELETRONICA IND. e COM. LTDA
49930050DA 3COM CORPORATION
49940050DB CONTEMPORARY CONTROL
49950050DC TAS TELEFONBAU A. SCHWABE GMBH & CO. KG
49960050DD SERRA SOLDADURA, S.A.
49970050DE SIGNUM SYSTEMS CORP.
49980050DF AirFiber, Inc.
49990050E1 NS TECH ELECTRONICS SDN BHD
50000050E2 CISCO SYSTEMS, INC.
50010050E3 Terayon Communications Systems
50020050E4 APPLE COMPUTER, INC.
50030050E6 HAKUSAN CORPORATION
50040050E7 PARADISE INNOVATIONS (ASIA)
50050050E8 NOMADIX INC.
50060050EA XEL COMMUNICATIONS, INC.
50070050EB ALPHA-TOP CORPORATION
50080050EC OLICOM A/S
50090050ED ANDA NETWORKS
50100050EE TEK DIGITEL CORPORATION
50110050EF SPE Systemhaus GmbH
50120050F0 CISCO SYSTEMS, INC.
50130050F1 LIBIT SIGNAL PROCESSING, LTD.
50140050F2 MICROSOFT CORP.
50150050F3 GLOBAL NET INFORMATION CO., Ltd.
50160050F4 SIGMATEK GMBH & CO. KG
50170050F6 PAN-INTERNATIONAL INDUSTRIAL CORP.
50180050F7 VENTURE MANUFACTURING (SINGAPORE) LTD.
50190050F8 ENTREGA TECHNOLOGIES, INC.
50200050FA OXTEL, LTD.
50210050FB VSK ELECTRONICS
50220050FC EDIMAX TECHNOLOGY CO., LTD.
50230050FD VISIONCOMM CO., LTD.
50240050FE PCTVnet ASA
50250050FF HAKKO ELECTRONICS CO., LTD.
5026006000 XYCOM INC.
5027006001 InnoSys, Inc.
5028006002 SCREEN SUBTITLING SYSTEMS, LTD
5029006003 TERAOKA WEIGH SYSTEM PTE, LTD.
5030006004 COMPUTADORES MODULARES SA
5031006005 FEEDBACK DATA LTD.
5032006006 SOTEC CO., LTD
5033006007 ACRES GAMING, INC.
5034006008 3COM CORPORATION
5035006009 CISCO SYSTEMS, INC.
503600600A SORD COMPUTER CORPORATION
503700600B LOGWARE GmbH
503800600C APPLIED DATA SYSTEMS, INC.
503900600D Digital Logic GmbH
504000600E WAVENET INTERNATIONAL, INC.
504100600F WESTELL, INC.
5042006010 NETWORK MACHINES, INC.
5043006011 CRYSTAL SEMICONDUCTOR CORP.
5044006012 POWER COMPUTING CORPORATION
5045006013 NETSTAL MASCHINEN AG
5046006014 EDEC CO., LTD.
5047006015 NET2NET CORPORATION
5048006016 CLARIION
5049006017 TOKIMEC INC.
5050006018 STELLAR ONE CORPORATION
5051006019 Roche Diagnostics
505200601A KEITHLEY INSTRUMENTS
505300601B MESA ELECTRONICS
505400601C TELXON CORPORATION
505500601D LUCENT TECHNOLOGIES
505600601E SOFTLAB, INC.
505700601F STALLION TECHNOLOGIES
5058006020 PIVOTAL NETWORKING, INC.
5059006021 DSC CORPORATION
5060006022 VICOM SYSTEMS, INC.
5061006023 PERICOM SEMICONDUCTOR CORP.
5062006024 GRADIENT TECHNOLOGIES, INC.
5063006025 ACTIVE IMAGING PLC
5064006026 VIKING COMPONENTS, INC.
5065006027 Superior Modular Products
5066006028 MACROVISION CORPORATION
5067006029 CARY PERIPHERALS INC.
506800602A SYMICRON COMPUTER COMMUNICATIONS, LTD.
506900602B PEAK AUDIO
507000602C LINX Data Terminals, Inc.
507100602D ALERTON TECHNOLOGIES, INC.
507200602E CYCLADES CORPORATION
507300602F CISCO SYSTEMS, INC.
5074006030 VILLAGE TRONIC ENTWICKLUNG
5075006031 HRK SYSTEMS
5076006032 I-CUBE, INC.
5077006033 ACUITY IMAGING, INC.
5078006034 ROBERT BOSCH GmbH
5079006035 DALLAS SEMICONDUCTOR, INC.
5080006036 AUSTRIAN RESEARCH CENTER SEIBERSDORF
5081006037 PHILIPS SEMICONDUCTORS
5082006038 Nortel Networks
5083006039 SanCom Technology, Inc.
508400603A QUICK CONTROLS LTD.
508500603B AMTEC spa
508600603C HAGIWARA SYS-COM CO., LTD.
508700603D 3CX
508800603E CISCO SYSTEMS, INC.
508900603F PATAPSCO DESIGNS
5090006040 NETRO CORP.
5091006041 Yokogawa Electric Corporation
5092006042 TKS (USA), INC.
5093006043 ComSoft Systems, Inc.
5094006044 LITTON/POLY-SCIENTIFIC
5095006045 PATHLIGHT TECHNOLOGIES
5096006046 VMETRO, INC.
5097006047 CISCO SYSTEMS, INC.
5098006048 EMC CORPORATION
5099006049 VINA TECHNOLOGIES
510000604A SAIC IDEAS GROUP
510100604B BIODATA GmbH
510200604C SAT
510300604D MMC NETWORKS, INC.
510400604E CYCLE COMPUTER CORPORATION, INC.
510500604F SUZUKI MFG. CO., LTD.
5106006050 INTERNIX INC.
5107006051 QUALITY SEMICONDUCTOR
5108006052 PERIPHERALS ENTERPRISE CO., Ltd.
5109006053 TOYODA MACHINE WORKS, LTD.
5110006054 CONTROLWARE GMBH
5111006055 CORNELL UNIVERSITY
5112006056 NETWORK TOOLS, INC.
5113006057 MURATA MANUFACTURING CO., LTD.
5114006058 COPPER MOUNTAIN COMMUNICATIONS, INC.
5115006059 TECHNICAL COMMUNICATIONS CORP.
511600605A CELCORE, INC.
511700605B IntraServer Technology, Inc.
511800605C CISCO SYSTEMS, INC.
511900605D SCANIVALVE CORP.
512000605E LIBERTY TECHNOLOGY NETWORKING
512100605F NIPPON UNISOFT CORPORATION
5122006060 DAWNING TECHNOLOGIES, INC.
5123006061 WHISTLE COMMUNICATIONS CORP.
5124006062 TELESYNC, INC.
5125006063 PSION DACOM PLC.
5126006064 NETCOMM LIMITED
5127006065 BERNECKER & RAINER INDUSTRIE-ELEKTRONIC GmbH
5128006066 LACROIX TECHNOLGIE
5129006067 ACER NETXUS INC.
5130006068 EICON TECHNOLOGY CORPORATION
5131006069 BROCADE COMMUNICATIONS SYSTEMS, Inc.
513200606A MITSUBISHI WIRELESS COMMUNICATIONS. INC.
513300606B Synclayer Inc.
513400606C ARESCOM
513500606D DIGITAL EQUIPMENT CORP.
513600606E DAVICOM SEMICONDUCTOR, INC.
513700606F CLARION CORPORATION OF AMERICA
5138006070 CISCO SYSTEMS, INC.
5139006071 MIDAS LAB, INC.
5140006072 VXL INSTRUMENTS, LIMITED
5141006073 REDCREEK COMMUNICATIONS, INC.
5142006074 QSC AUDIO PRODUCTS
5143006075 PENTEK, INC.
5144006076 SCHLUMBERGER TECHNOLOGIES RETAIL PETROLEUM SYSTEMS
5145006077 PRISA NETWORKS
5146006078 POWER MEASUREMENT LTD.
5147006079 Mainstream Data, Inc.
514800607A DVS GmbH
514900607B FORE SYSTEMS, INC.
515000607C WaveAccess, Ltd.
515100607D SENTIENT NETWORKS INC.
515200607E GIGALABS, INC.
515300607F AURORA TECHNOLOGIES, INC.
5154006080 MICROTRONIX DATACOM LTD.
5155006081 TV/COM INTERNATIONAL
5156006082 NOVALINK TECHNOLOGIES, INC.
5157006083 CISCO SYSTEMS, INC.
5158006084 DIGITAL VIDEO
5159006085 Storage Concepts
5160006086 LOGIC REPLACEMENT TECH. LTD.
5161006087 KANSAI ELECTRIC CO., LTD.
5162006088 WHITE MOUNTAIN DSP, INC.
5163006089 XATA
516400608A CITADEL COMPUTER
516500608B ConferTech International
516600608C 3COM CORPORATION
516700608D UNIPULSE CORP.
516800608E HE ELECTRONICS, TECHNOLOGIE & SYSTEMTECHNIK GmbH
516900608F TEKRAM TECHNOLOGY CO., LTD.
5170006090 ABLE COMMUNICATIONS, INC.
5171006091 FIRST PACIFIC NETWORKS, INC.
5172006092 MICRO/SYS, INC.
5173006093 VARIAN
5174006094 IBM CORP.
5175006095 ACCU-TIME SYSTEMS, INC.
5176006096 T.S. MICROTECH INC.
5177006097 3COM CORPORATION
5178006098 HT COMMUNICATIONS
5179006099 LAN MEDIA CORPORATION
518000609A NJK TECHNO CO.
518100609B ASTRO-MED, INC.
518200609C Perkin-Elmer Incorporated
518300609D PMI FOOD EQUIPMENT GROUP
518400609E ASC X3 - INFORMATION TECHNOLOGY STANDARDS SECRETARIATS
518500609F PHAST CORPORATION
51860060A0 SWITCHED NETWORK TECHNOLOGIES, INC.
51870060A1 VPNet, Inc.
51880060A2 NIHON UNISYS LIMITED CO.
51890060A3 CONTINUUM TECHNOLOGY CORP.
51900060A4 GRINAKER SYSTEM TECHNOLOGIES
51910060A5 PERFORMANCE TELECOM CORP.
51920060A6 PARTICLE MEASURING SYSTEMS
51930060A7 MICROSENS GmbH & CO. KG
51940060A8 TIDOMAT AB
51950060A9 GESYTEC MbH
51960060AA INTELLIGENT DEVICES INC. (IDI)
51970060AB LARSCOM INCORPORATED
51980060AC RESILIENCE CORPORATION
51990060AD MegaChips Corporation
52000060AE TRIO INFORMATION SYSTEMS AB
52010060AF PACIFIC MICRO DATA, INC.
52020060B0 HEWLETT-PACKARD CO.
52030060B1 INPUT/OUTPUT, INC.
52040060B2 PROCESS CONTROL CORP.
52050060B3 Z-COM, INC.
52060060B4 GLENAYRE R&D INC.
52070060B5 KEBA GmbH
52080060B6 LAND COMPUTER CO., LTD.
52090060B7 CHANNELMATIC, INC.
52100060B8 CORELIS INC.
52110060B9 NITSUKO CORPORATION
52120060BA SAHARA NETWORKS, INC.
52130060BB CABLETRON - NETLINK, INC.
52140060BC KeunYoung Electronics & Communication Co., Ltd.
52150060BD HUBBELL-PULSECOM
52160060BE WEBTRONICS
52170060BF MACRAIGOR SYSTEMS, INC.
52180060C0 NERA AS
52190060C1 WaveSpan Corporation
52200060C2 MPL AG
52210060C3 NETVISION CORPORATION
52220060C4 SOLITON SYSTEMS K.K.
52230060C5 ANCOT CORP.
52240060C6 DCS AG
52250060C7 AMATI COMMUNICATIONS CORP.
52260060C8 KUKA WELDING SYSTEMS & ROBOTS
52270060C9 ControlNet, Inc.
52280060CA HARMONIC SYSTEMS INCORPORATED
52290060CB HITACHI ZOSEN CORPORATION
52300060CC EMTRAK, INCORPORATED
52310060CD VideoServer, Inc.
52320060CE ACCLAIM COMMUNICATIONS
52330060CF ALTEON NETWORKS, INC.
52340060D0 SNMP RESEARCH INCORPORATED
52350060D1 CASCADE COMMUNICATIONS
52360060D2 LUCENT TECHNOLOGIES TAIWAN TELECOMMUNICATIONS CO., LTD.
52370060D3 AT&T
52380060D4 ELDAT COMMUNICATION LTD.
52390060D5 MIYACHI TECHNOS CORP.
52400060D6 NovAtel Wireless Technologies Ltd.
52410060D7 ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (EPFL)
52420060D8 ELMIC SYSTEMS, INC.
52430060D9 TRANSYS NETWORKS INC.
52440060DA JBM ELECTRONICS CO.
52450060DB NTP ELEKTRONIK A/S
52460060DC TOYO COMMUNICATION EQUIPMENT Co., Ltd.
52470060DD MYRICOM, INC.
52480060DE KAYSER-THREDE GmbH
52490060DF CNT Corporation
52500060E0 AXIOM TECHNOLOGY CO., LTD.
52510060E1 ORCKIT COMMUNICATIONS LTD.
52520060E2 QUEST ENGINEERING & DEVELOPMENT
52530060E3 ARBIN INSTRUMENTS
52540060E4 COMPUSERVE, INC.
52550060E5 FUJI AUTOMATION CO., LTD.
52560060E6 SHOMITI SYSTEMS INCORPORATED
52570060E7 RANDATA
52580060E8 HITACHI COMPUTER PRODUCTS (AMERICA), INC.
52590060E9 ATOP TECHNOLOGIES, INC.
52600060EA StreamLogic
52610060EB FOURTHTRACK SYSTEMS
52620060EC HERMARY OPTO ELECTRONICS INC.
52630060ED RICARDO TEST AUTOMATION LTD.
52640060EE APOLLO
52650060EF FLYTECH TECHNOLOGY CO., LTD.
52660060F0 JOHNSON & JOHNSON MEDICAL, INC
52670060F1 EXP COMPUTER, INC.
52680060F2 LASERGRAPHICS, INC.
52690060F3 Performance Analysis Broadband, Spirent plc
52700060F4 ADVANCED COMPUTER SOLUTIONS, Inc.
52710060F5 ICON WEST, INC.
52720060F6 NEXTEST COMMUNICATIONS PRODUCTS, INC.
52730060F7 DATAFUSION SYSTEMS
52740060F8 Loran International Technologies Inc.
52750060F9 DIAMOND LANE COMMUNICATIONS
52760060FA EDUCATIONAL TECHNOLOGY RESOURCES, INC.
52770060FB PACKETEER, INC.
52780060FC CONSERVATION THROUGH INNOVATION LTD.
52790060FD NetICs, Inc.
52800060FE LYNX SYSTEM DEVELOPERS, INC.
52810060FF QuVis, Inc.
52820070B0 M/A-COM INC. COMPANIES
52830070B3 DATA RECALL LTD.
5284008000 MULTITECH SYSTEMS, INC.
5285008001 PERIPHONICS CORPORATION
5286008002 SATELCOM (UK) LTD
5287008003 HYTEC ELECTRONICS LTD.
5288008004 ANTLOW COMMUNICATIONS, LTD.
5289008005 CACTUS COMPUTER INC.
5290008006 COMPUADD CORPORATION
5291008007 DLOG NC-SYSTEME
5292008008 DYNATECH COMPUTER SYSTEMS
5293008009 JUPITER SYSTEMS, INC.
529400800A JAPAN COMPUTER CORP.
529500800B CSK CORPORATION
529600800C VIDECOM LIMITED
529700800D VOSSWINKEL F.U.
529800800E ATLANTIX CORPORATION
529900800F STANDARD MICROSYSTEMS
5300008010 COMMODORE INTERNATIONAL
5301008011 DIGITAL SYSTEMS INT'L. INC.
5302008012 INTEGRATED MEASUREMENT SYSTEMS
5303008013 THOMAS-CONRAD CORPORATION
5304008014 ESPRIT SYSTEMS
5305008015 SEIKO SYSTEMS, INC.
5306008016 WANDEL AND GOLTERMANN
5307008017 PFU LIMITED
5308008018 KOBE STEEL, LTD.
5309008019 DAYNA COMMUNICATIONS, INC.
531000801A BELL ATLANTIC
531100801B KODIAK TECHNOLOGY
531200801C NEWPORT SYSTEMS SOLUTIONS
531300801D INTEGRATED INFERENCE MACHINES
531400801E XINETRON, INC.
531500801F KRUPP ATLAS ELECTRONIK GMBH
5316008020 NETWORK PRODUCTS
5317008021 Alcatel Canada Inc.
5318008022 SCAN-OPTICS
5319008023 INTEGRATED BUSINESS NETWORKS
5320008024 KALPANA, INC.
5321008025 STOLLMANN GMBH
5322008026 NETWORK PRODUCTS CORPORATION
5323008027 ADAPTIVE SYSTEMS, INC.
5324008028 TRADPOST (HK) LTD
5325008029 EAGLE TECHNOLOGY, INC.
532600802A TEST SYSTEMS & SIMULATIONS INC
532700802B INTEGRATED MARKETING CO
532800802C THE SAGE GROUP PLC
532900802D XYLOGICS INC
533000802E CASTLE ROCK COMPUTING
533100802F NATIONAL INSTRUMENTS CORP.
5332008030 NEXUS ELECTRONICS
5333008031 BASYS, CORP.
5334008032 ACCESS CO., LTD.
5335008033 FORMATION, INC.
5336008034 SMT GOUPIL
5337008035 TECHNOLOGY WORKS, INC.
5338008036 REFLEX MANUFACTURING SYSTEMS
5339008037 Ericsson Group
5340008038 DATA RESEARCH & APPLICATIONS
5341008039 ALCATEL STC AUSTRALIA
534200803A VARITYPER, INC.
534300803B APT COMMUNICATIONS, INC.
534400803C TVS ELECTRONICS LTD
534500803D SURIGIKEN CO., LTD.
534600803E SYNERNETICS
534700803F TATUNG COMPANY
5348008040 JOHN FLUKE MANUFACTURING CO.
5349008041 VEB KOMBINAT ROBOTRON
5350008042 FORCE COMPUTERS
5351008043 NETWORLD, INC.
5352008044 SYSTECH COMPUTER CORP.
5353008045 MATSUSHITA ELECTRIC IND. CO
5354008046 UNIVERSITY OF TORONTO
5355008047 IN-NET CORP.
5356008048 COMPEX INCORPORATED
5357008049 NISSIN ELECTRIC CO., LTD.
535800804A PRO-LOG
535900804B EAGLE TECHNOLOGIES PTY.LTD.
536000804C CONTEC CO., LTD.
536100804D CYCLONE MICROSYSTEMS, INC.
536200804E APEX COMPUTER COMPANY
536300804F DAIKIN INDUSTRIES, LTD.
5364008050 ZIATECH CORPORATION
5365008051 FIBERMUX
5366008052 TECHNICALLY ELITE CONCEPTS
5367008053 INTELLICOM, INC.
5368008054 FRONTIER TECHNOLOGIES CORP.
5369008055 FERMILAB
5370008056 SPHINX ELEKTRONIK GMBH
5371008057 ADSOFT, LTD.
5372008058 PRINTER SYSTEMS CORPORATION
5373008059 STANLEY ELECTRIC CO., LTD
537400805A TULIP COMPUTERS INTERNAT'L B.V
537500805B CONDOR SYSTEMS, INC.
537600805C AGILIS CORPORATION
537700805D CANSTAR
537800805E LSI LOGIC CORPORATION
537900805F COMPAQ COMPUTER CORPORATION
5380008060 NETWORK INTERFACE CORPORATION
5381008061 LITTON SYSTEMS, INC.
5382008062 INTERFACE CO.
5383008063 RICHARD HIRSCHMANN GMBH & CO.
5384008064 WYSE TECHNOLOGY
5385008065 CYBERGRAPHIC SYSTEMS PTY LTD.
5386008066 ARCOM CONTROL SYSTEMS, LTD.
5387008067 SQUARE D COMPANY
5388008068 YAMATECH SCIENTIFIC LTD.
5389008069 COMPUTONE SYSTEMS
539000806A ERI (EMPAC RESEARCH INC.)
539100806B SCHMID TELECOMMUNICATION
539200806C CEGELEC PROJECTS LTD
539300806D CENTURY SYSTEMS CORP.
539400806E NIPPON STEEL CORPORATION
539500806F ONELAN LTD.
5396008070 COMPUTADORAS MICRON
5397008071 SAI TECHNOLOGY
5398008072 MICROPLEX SYSTEMS LTD.
5399008073 DWB ASSOCIATES
5400008074 FISHER CONTROLS
5401008075 PARSYTEC GMBH
5402008076 MCNC
5403008077 BROTHER INDUSTRIES, LTD.
5404008078 PRACTICAL PERIPHERALS, INC.
5405008079 MICROBUS DESIGNS LTD.
540600807A AITECH SYSTEMS LTD.
540700807B ARTEL COMMUNICATIONS CORP.
540800807C FIBERCOM, INC.
540900807D EQUINOX SYSTEMS INC.
541000807E SOUTHERN PACIFIC LTD.
541100807F DY-4 INCORPORATED
5412008080 DATAMEDIA CORPORATION
5413008081 KENDALL SQUARE RESEARCH CORP.
5414008082 PEP MODULAR COMPUTERS GMBH
5415008083 AMDAHL
5416008084 THE CLOUD INC.
5417008085 H-THREE SYSTEMS CORPORATION
5418008086 COMPUTER GENERATION INC.
5419008087 OKI ELECTRIC INDUSTRY CO., LTD
5420008088 VICTOR COMPANY OF JAPAN, LTD.
5421008089 TECNETICS (PTY) LTD.
542200808A SUMMIT MICROSYSTEMS CORP.
542300808B DACOLL LIMITED
542400808C NetScout Systems, Inc.
542500808D WESTCOAST TECHNOLOGY B.V.
542600808E RADSTONE TECHNOLOGY
542700808F C. ITOH ELECTRONICS, INC.
5428008090 MICROTEK INTERNATIONAL, INC.
5429008091 TOKYO ELECTRIC CO.,LTD
5430008092 JAPAN COMPUTER INDUSTRY, INC.
5431008093 XYRON CORPORATION
5432008094 ALFA LAVAL AUTOMATION AB
5433008095 BASIC MERTON HANDELSGES.M.B.H.
5434008096 HUMAN DESIGNED SYSTEMS, INC.
5435008097 CENTRALP AUTOMATISMES
5436008098 TDK CORPORATION
5437008099 KLOCKNER MOELLER IPC
543800809A NOVUS NETWORKS LTD
543900809B JUSTSYSTEM CORPORATION
544000809C LUXCOM, INC.
544100809D Commscraft Ltd.
544200809E DATUS GMBH
544300809F ALCATEL BUSINESS SYSTEMS
54440080A0 EDISA HEWLETT PACKARD S/A
54450080A1 MICROTEST, INC.
54460080A2 CREATIVE ELECTRONIC SYSTEMS
54470080A3 LANTRONIX
54480080A4 LIBERTY ELECTRONICS
54490080A5 SPEED INTERNATIONAL
54500080A6 REPUBLIC TECHNOLOGY, INC.
54510080A7 MEASUREX CORP.
54520080A8 VITACOM CORPORATION
54530080A9 CLEARPOINT RESEARCH
54540080AA MAXPEED
54550080AB DUKANE NETWORK INTEGRATION
54560080AC IMLOGIX, DIVISION OF GENESYS
54570080AD CNET TECHNOLOGY, INC.
54580080AE HUGHES NETWORK SYSTEMS
54590080AF ALLUMER CO., LTD.
54600080B0 ADVANCED INFORMATION
54610080B1 SOFTCOM A/S
54620080B2 NETWORK EQUIPMENT TECHNOLOGIES
54630080B3 AVAL DATA CORPORATION
54640080B4 SOPHIA SYSTEMS
54650080B5 UNITED NETWORKS INC.
54660080B6 THEMIS COMPUTER
54670080B7 STELLAR COMPUTER
54680080B8 BUG, INCORPORATED
54690080B9 ARCHE TECHNOLIGIES INC.
54700080BA SPECIALIX (ASIA) PTE, LTD
54710080BB HUGHES LAN SYSTEMS
54720080BC HITACHI ENGINEERING CO., LTD
54730080BD THE FURUKAWA ELECTRIC CO., LTD
54740080BE ARIES RESEARCH
54750080BF TAKAOKA ELECTRIC MFG. CO. LTD.
54760080C0 PENRIL DATACOMM
54770080C1 LANEX CORPORATION
54780080C2 IEEE 802.1 COMMITTEE
54790080C3 BICC INFORMATION SYSTEMS & SVC
54800080C4 DOCUMENT TECHNOLOGIES, INC.
54810080C5 NOVELLCO DE MEXICO
54820080C6 NATIONAL DATACOMM CORPORATION
54830080C7 XIRCOM
54840080C8 D-LINK SYSTEMS, INC.
54850080C9 ALBERTA MICROELECTRONIC CENTRE
54860080CA NETCOM RESEARCH INCORPORATED
54870080CB FALCO DATA PRODUCTS
54880080CC MICROWAVE BYPASS SYSTEMS
54890080CD MICRONICS COMPUTER, INC.
54900080CE BROADCAST TELEVISION SYSTEMS
54910080CF EMBEDDED PERFORMANCE INC.
54920080D0 COMPUTER PERIPHERALS, INC.
54930080D1 KIMTRON CORPORATION
54940080D2 SHINNIHONDENKO CO., LTD.
54950080D3 SHIVA CORP.
54960080D4 CHASE RESEARCH LTD.
54970080D5 CADRE TECHNOLOGIES
54980080D6 NUVOTECH, INC.
54990080D7 Fantum Engineering
55000080D8 NETWORK PERIPHERALS INC.
55010080D9 EMK ELEKTRONIK
55020080DA BRUEL & KJAER
55030080DB GRAPHON CORPORATION
55040080DC PICKER INTERNATIONAL
55050080DD GMX INC/GIMIX
55060080DE GIPSI S.A.
55070080DF ADC CODENOLL TECHNOLOGY CORP.
55080080E0 XTP SYSTEMS, INC.
55090080E1 STMICROELECTRONICS
55100080E2 T.D.I. CO., LTD.
55110080E3 CORAL NETWORK CORPORATION
55120080E4 NORTHWEST DIGITAL SYSTEMS, INC
55130080E5 MYLEX CORPORATION
55140080E6 PEER NETWORKS, INC.
55150080E7 LYNWOOD SCIENTIFIC DEV. LTD.
55160080E8 CUMULUS CORPORATIION
55170080E9 Madge Ltd.
55180080EA ADVA Optical Networking Ltd.
55190080EB COMPCONTROL B.V.
55200080EC SUPERCOMPUTING SOLUTIONS, INC.
55210080ED IQ TECHNOLOGIES, INC.
55220080EE THOMSON CSF
55230080EF RATIONAL
55240080F0 Panasonic Communications Co., Ltd.
55250080F1 OPUS SYSTEMS
55260080F2 RAYCOM SYSTEMS INC
55270080F3 SUN ELECTRONICS CORP.
55280080F4 TELEMECANIQUE ELECTRIQUE
55290080F5 QUANTEL LTD
55300080F6 SYNERGY MICROSYSTEMS
55310080F7 ZENITH ELECTRONICS
55320080F8 MIZAR, INC.
55330080F9 HEURIKON CORPORATION
55340080FA RWT GMBH
55350080FB BVM LIMITED
55360080FC AVATAR CORPORATION
55370080FD EXSCEED CORPRATION
55380080FE AZURE TECHNOLOGIES, INC.
55390080FF SOC. DE TELEINFORMATIQUE RTC
5540009000 DIAMOND MULTIMEDIA
5541009001 NISHIMU ELECTRONICS INDUSTRIES CO., LTD.
5542009002 ALLGON AB
5543009003 APLIO
5544009004 3COM EUROPE LTD.
5545009005 PROTECH SYSTEMS CO., LTD.
5546009006 HAMAMATSU PHOTONICS K.K.
5547009007 DOMEX TECHNOLOGY CORP.
5548009008 HanA Systems Inc.
5549009009 i Controls, Inc.
555000900A PROTON ELECTRONIC INDUSTRIAL CO., LTD.
555100900B LANNER ELECTRONICS, INC.
555200900C CISCO SYSTEMS, INC.
555300900D OVERLAND DATA INC.
555400900E HANDLINK TECHNOLOGIES, INC.
555500900F KAWASAKI HEAVY INDUSTRIES, LTD
5556009010 SIMULATION LABORATORIES, INC.
5557009011 WAVTrace, Inc.
5558009012 GLOBESPAN SEMICONDUCTOR, INC.
5559009013 SAMSAN CORP.
5560009014 ROTORK INSTRUMENTS, LTD.
5561009015 CENTIGRAM COMMUNICATIONS CORP.
5562009016 ZAC
5563009017 ZYPCOM, INC.
5564009018 ITO ELECTRIC INDUSTRY CO, LTD.
5565009019 HERMES ELECTRONICS CO., LTD.
556600901A UNISPHERE SOLUTIONS
556700901B DIGITAL CONTROLS
556800901C mps Software Gmbh
556900901D PEC (NZ) LTD.
557000901E SELESTA INGEGNE RIA S.P.A.
557100901F ADTEC PRODUCTIONS, INC.
5572009020 PHILIPS ANALYTICAL X-RAY B.V.
5573009021 CISCO SYSTEMS, INC.
5574009022 IVEX
5575009023 ZILOG INC.
5576009024 PIPELINKS, INC.
5577009025 VISION SYSTEMS LTD. PTY
5578009026 ADVANCED SWITCHING COMMUNICATIONS, INC.
5579009027 INTEL CORPORATION
5580009028 NIPPON SIGNAL CO., LTD.
5581009029 CRYPTO AG
558200902A COMMUNICATION DEVICES, INC.
558300902B CISCO SYSTEMS, INC.
558400902C DATA & CONTROL EQUIPMENT LTD.
558500902D DATA ELECTRONICS (AUST.) PTY, LTD.
558600902E NAMCO LIMITED
558700902F NETCORE SYSTEMS, INC.
5588009030 HONEYWELL-DATING
5589009031 MYSTICOM, LTD.
5590009032 PELCOMBE GROUP LTD.
5591009033 INNOVAPHONE GmbH
5592009034 IMAGIC, INC.
5593009035 ALPHA TELECOM, INC.
5594009036 ens, inc.
5595009037 ACUCOMM, INC.
5596009038 FOUNTAIN TECHNOLOGIES, INC.
5597009039 SHASTA NETWORKS
559800903A NIHON MEDIA TOOL INC.
559900903B TriEMS Research Lab, Inc.
560000903C ATLANTIC NETWORK SYSTEMS
560100903D BIOPAC SYSTEMS, INC.
560200903E N.V. PHILIPS INDUSTRIAL ACTIVITIES
560300903F AZTEC RADIOMEDIA
5604009040 Siemens Network Convergence LLC
5605009041 APPLIED DIGITAL ACCESS
5606009042 ECCS, Inc.
5607009043 NICHIBEI DENSHI CO., LTD.
5608009044 ASSURED DIGITAL, INC.
5609009045 Marconi Communications
5610009046 DEXDYNE, LTD.
5611009047 GIGA FAST E. LTD.
5612009048 ZEAL CORPORATION
5613009049 ENTRIDIA CORPORATION
561400904A CONCUR SYSTEM TECHNOLOGIES
561500904B GemTek Technology Co., Ltd.
561600904C EPIGRAM, INC.
561700904D SPEC S.A.
561800904E DELEM BV
561900904F ABB POWER T&D COMPANY, INC.
5620009050 TELESTE OY
5621009051 ULTIMATE TECHNOLOGY CORP.
5622009052 SELCOM ELETTRONICA S.R.L.
5623009053 DAEWOO ELECTRONICS CO., LTD.
5624009054 INNOVATIVE SEMICONDUCTORS, INC
5625009055 PARKER HANNIFIN CORPORATION COMPUMOTOR DIVISION
5626009056 TELESTREAM, INC.
5627009057 AANetcom, Inc.
5628009058 Ultra Electronics Ltd., Command and Control Systems
5629009059 TELECOM DEVICE K.K.
563000905A DEARBORN GROUP, INC.
563100905B RAYMOND AND LAE ENGINEERING
563200905C EDMI
563300905D NETCOM SICHERHEITSTECHNIK GmbH
563400905E RAULAND-BORG CORPORATION
563500905F CISCO SYSTEMS, INC.
5636009060 SYSTEM CREATE CORP.
5637009061 PACIFIC RESEARCH & ENGINEERING CORPORATION
5638009062 ICP VORTEX COMPUTERSYSTEME GmbH
5639009063 COHERENT COMMUNICATIONS SYSTEMS CORPORATION
5640009064 THOMSON BROADCAST SYSTEMS
5641009065 FINISAR CORPORATION
5642009066 Troika Networks, Inc.
5643009067 WalkAbout Computers, Inc.
5644009068 DVT CORP.
5645009069 JUNIPER NETWORKS, INC.
564600906A TURNSTONE SYSTEMS, INC.
564700906B APPLIED RESOURCES, INC.
564800906C GWT GLOBAL WEIGHING TECHNOLOGIES GmbH
564900906D CISCO SYSTEMS, INC.
565000906E PRAXON, INC.
565100906F CISCO SYSTEMS, INC.
5652009070 NEO NETWORKS, INC.
5653009071 Applied Innovation Inc.
5654009072 SIMRAD AS
5655009073 GAIO TECHNOLOGY
5656009074 ARGON NETWORKS, INC.
5657009075 NEC DO BRASIL S.A.
5658009076 FMT AIRCRAFT GATE SUPPORT SYSTEMS AB
5659009077 ADVANCED FIBRE COMMUNICATIONS
5660009078 MER TELEMANAGEMENT SOLUTIONS, LTD.
5661009079 ClearOne, Inc.
566200907A SPECTRALINK CORP.
566300907B E-TECH, INC.
566400907C DIGITALCAST, INC.
566500907D Lake Communications
566600907E VETRONIX CORP.
566700907F WatchGuard Technologies, Inc.
5668009080 NOT LIMITED, INC.
5669009081 ALOHA NETWORKS, INC.
5670009082 FORCE INSTITUTE
5671009083 TURBO COMMUNICATION, INC.
5672009084 ATECH SYSTEM
5673009085 GOLDEN ENTERPRISES, INC.
5674009086 CISCO SYSTEMS, INC.
5675009087 ITIS
5676009088 BAXALL SECURITY LTD.
5677009089 SOFTCOM MICROSYSTEMS, INC.
567800908A BAYLY COMMUNICATIONS, INC.
567900908B CELL COMPUTING, INC.
568000908C ETREND ELECTRONICS, INC.
568100908D VICKERS ELECTRONICS SYSTEMS
568200908E Nortel Networks Broadband Access
568300908F AUDIO CODES LTD.
5684009090 I-BUS
5685009091 DigitalScape, Inc.
5686009092 CISCO SYSTEMS, INC.
5687009093 NANAO CORPORATION
5688009094 OSPREY TECHNOLOGIES, INC.
5689009095 UNIVERSAL AVIONICS
5690009096 ASKEY COMPUTER CORP.
5691009097 SYCAMORE NETWORKS
5692009098 SBC DESIGNS, INC.
5693009099 ALLIED TELESIS, K.K.
569400909A ONE WORLD SYSTEMS, INC.
569500909B MARKPOINT AB
569600909C Terayon Communications Systems
569700909D GSE SYSTEMS, INC.
569800909E Critical IO, LLC
569900909F DIGI-DATA CORPORATION
57000090A0 8X8 INC.
57010090A1 FLYING PIG SYSTEMS, LTD.
57020090A2 CYBERTAN TECHNOLOGY, INC.
57030090A3 Corecess Inc.
57040090A4 ALTIGA NETWORKS
57050090A5 SPECTRA LOGIC
57060090A6 CISCO SYSTEMS, INC.
57070090A7 CLIENTEC CORPORATION
57080090A8 NineTiles Networks, Ltd.
57090090A9 WESTERN DIGITAL
57100090AA INDIGO ACTIVE VISION SYSTEMS LIMITED
57110090AB CISCO SYSTEMS, INC.
57120090AC OPTIVISION, INC.
57130090AD ASPECT ELECTRONICS, INC.
57140090AE ITALTEL S.p.A.
57150090AF J. MORITA MFG. CORP.
57160090B0 VADEM
57170090B1 CISCO SYSTEMS, INC.
57180090B2 AVICI SYSTEMS INC.
57190090B3 AGRANAT SYSTEMS
57200090B4 WILLOWBROOK TECHNOLOGIES
57210090B5 NIKON CORPORATION
57220090B6 FIBEX SYSTEMS
57230090B7 DIGITAL LIGHTWAVE, INC.
57240090B8 ROHDE & SCHWARZ GMBH & CO. KG
57250090B9 BERAN INSTRUMENTS LTD.
57260090BA VALID NETWORKS, INC.
57270090BB TAINET COMMUNICATION SYSTEM Corp.
57280090BC TELEMANN CO., LTD.
57290090BD OMNIA COMMUNICATIONS, INC.
57300090BE IBC/INTEGRATED BUSINESS COMPUTERS
57310090BF CISCO SYSTEMS, INC.
57320090C0 K.J. LAW ENGINEERS, INC.
57330090C1 Peco II, Inc.
57340090C2 JK microsystems, Inc.
57350090C3 TOPIC SEMICONDUCTOR CORP.
57360090C4 JAVELIN SYSTEMS, INC.
57370090C5 INTERNET MAGIC, INC.
57380090C6 OPTIM SYSTEMS, INC.
57390090C7 ICOM INC.
57400090C8 WAVERIDER COMMUNICATIONS (CANADA) INC.
57410090C9 DPAC Technologies
57420090CA ACCORD VIDEO TELECOMMUNICATIONS, LTD.
57430090CB Wireless OnLine, Inc.
57440090CC PLANET COMMUNICATIONS, INC.
57450090CD ENT-EMPRESA NACIONAL DE TELECOMMUNICACOES, S.A.
57460090CE TETRA GmbH
57470090CF NORTEL
57480090D0 Thomson Belgium
57490090D1 LEICHU ENTERPRISE CO., LTD.
57500090D2 ARTEL VIDEO SYSTEMS
57510090D3 GIESECKE & DEVRIENT GmbH
57520090D4 BindView Development Corp.
57530090D5 EUPHONIX, INC.
57540090D6 CRYSTAL GROUP
57550090D7 NetBoost Corp.
57560090D8 WHITECROSS SYSTEMS
57570090D9 CISCO SYSTEMS, INC.
57580090DA DYNARC, INC.
57590090DB NEXT LEVEL COMMUNICATIONS
57600090DC TECO INFORMATION SYSTEMS
57610090DD THE MIHARU COMMUNICATIONS CO., LTD.
57620090DE CARDKEY SYSTEMS, INC.
57630090DF MITSUBISHI CHEMICAL AMERICA, INC.
57640090E0 SYSTRAN CORP.
57650090E1 TELENA S.P.A.
57660090E2 DISTRIBUTED PROCESSING TECHNOLOGY
57670090E3 AVEX ELECTRONICS INC.
57680090E4 NEC AMERICA, INC.
57690090E5 TEKNEMA, INC.
57700090E6 ACER LABORATORIES, INC.
57710090E7 HORSCH ELEKTRONIK AG
57720090E8 MOXA TECHNOLOGIES CORP., LTD.
57730090E9 JANZ COMPUTER AG
57740090EA ALPHA TECHNOLOGIES, INC.
57750090EB SENTRY TELECOM SYSTEMS
57760090EC PYRESCOM
57770090ED CENTRAL SYSTEM RESEARCH CO., LTD.
57780090EE PERSONAL COMMUNICATIONS TECHNOLOGIES
57790090EF INTEGRIX, INC.
57800090F0 HARMONIC LIGHTWAVES, LTD.
57810090F1 DOT HILL SYSTEMS CORPORATION
57820090F2 CISCO SYSTEMS, INC.
57830090F3 ASPECT COMMUNICATIONS
57840090F4 LIGHTNING INSTRUMENTATION
57850090F5 CLEVO CO.
57860090F6 ESCALATE NETWORKS, INC.
57870090F7 NBASE COMMUNICATIONS LTD.
57880090F8 MEDIATRIX TELECOM
57890090F9 LEITCH
57900090FA GigaNet, Inc.
57910090FB PORTWELL, INC.
57920090FC NETWORK COMPUTING DEVICES
57930090FD CopperCom, Inc.
57940090FE ELECOM CO., LTD. (LANEED DIV.)
57950090FF TELLUS TECHNOLOGY INC.
57960091D6 Crystal Group, Inc.
5797009D8E CARDIAC RECORDERS, INC.
579800A000 CENTILLION NETWORKS, INC.
579900A001 WATKINS-JOHNSON COMPANY
580000A002 LEEDS & NORTHRUP AUSTRALIA PTY LTD
580100A003 STAEFA CONTROL SYSTEM
580200A004 NETPOWER, INC.
580300A005 DANIEL INSTRUMENTS, LTD.
580400A006 IMAGE DATA PROCESSING SYSTEM GROUP
580500A007 APEXX TECHNOLOGY, INC.
580600A008 NETCORP
580700A009 WHITETREE NETWORK
580800A00A R.D.C. COMMUNICATION
580900A00B COMPUTEX CO., LTD.
581000A00C KINGMAX TECHNOLOGY, INC.
581100A00D THE PANDA PROJECT
581200A00E VISUAL NETWORKS, INC.
581300A00F Broadband Technologies
581400A010 SYSLOGIC DATENTECHNIK AG
581500A011 MUTOH INDUSTRIES LTD.
581600A012 B.A.T.M. ADVANCED TECHNOLOGIES
581700A013 TELTREND LTD.
581800A014 CSIR
581900A015 WYLE
582000A016 MICROPOLIS CORP.
582100A017 J B M CORPORATION
582200A018 CREATIVE CONTROLLERS, INC.
582300A019 NEBULA CONSULTANTS, INC.
582400A01A BINAR ELEKTRONIK AB
582500A01B PREMISYS COMMUNICATIONS, INC.
582600A01C NASCENT NETWORKS CORPORATION
582700A01D SIXNET
582800A01E EST CORPORATION
582900A01F TRICORD SYSTEMS, INC.
583000A020 CITICORP/TTI
583100A021 GENERAL DYNAMICS-
583200A022 CENTRE FOR DEVELOPMENT OF ADVANCED COMPUTING
583300A023 APPLIED CREATIVE TECHNOLOGY, INC.
583400A024 3COM CORPORATION
583500A025 REDCOM LABS INC.
583600A026 TELDAT, S.A.
583700A027 FIREPOWER SYSTEMS, INC.
583800A028 CONNER PERIPHERALS
583900A029 COULTER CORPORATION
584000A02A TRANCELL SYSTEMS
584100A02B TRANSITIONS RESEARCH CORP.
584200A02C interWAVE Communications
584300A02D 1394 Trade Association
584400A02E BRAND COMMUNICATIONS, LTD.
584500A02F PIRELLI CAVI
584600A030 CAPTOR NV/SA
584700A031 HAZELTINE CORPORATION, MS 1-17
584800A032 GES SINGAPORE PTE. LTD.
584900A033 imc MeBsysteme GmbH
585000A034 AXEL
585100A035 CYLINK CORPORATION
585200A036 APPLIED NETWORK TECHNOLOGY
585300A037 DATASCOPE CORPORATION
585400A038 EMAIL ELECTRONICS
585500A039 ROSS TECHNOLOGY, INC.
585600A03A KUBOTEK CORPORATION
585700A03B TOSHIN ELECTRIC CO., LTD.
585800A03C EG&G NUCLEAR INSTRUMENTS
585900A03D OPTO-22
586000A03E ATM FORUM
586100A03F COMPUTER SOCIETY MICROPROCESSOR & MICROPROCESSOR STANDARDS C
586200A040 APPLE COMPUTER
586300A041 LEYBOLD-INFICON
586400A042 SPUR PRODUCTS CORP.
586500A043 AMERICAN TECHNOLOGY LABS, INC.
586600A044 NTT IT CO., LTD.
586700A045 PHOENIX CONTACT GMBH & CO.
586800A046 SCITEX CORP. LTD.
586900A047 INTEGRATED FITNESS CORP.
587000A048 QUESTECH, LTD.
587100A049 DIGITECH INDUSTRIES, INC.
587200A04A NISSHIN ELECTRIC CO., LTD.
587300A04B TFL LAN INC.
587400A04C INNOVATIVE SYSTEMS & TECHNOLOGIES, INC.
587500A04D EDA INSTRUMENTS, INC.
587600A04E VOELKER TECHNOLOGIES, INC.
587700A04F AMERITEC CORP.
587800A050 CYPRESS SEMICONDUCTOR
587900A051 ANGIA COMMUNICATIONS. INC.
588000A052 STANILITE ELECTRONICS PTY. LTD
588100A053 COMPACT DEVICES, INC.
588200A055 Data Device Corporation
588300A056 MICROPROSS
588400A057 LANCOM Systems GmbH
588500A058 GLORY, LTD.
588600A059 HAMILTON HALLMARK
588700A05A KOFAX IMAGE PRODUCTS
588800A05B MARQUIP, INC.
588900A05C INVENTORY CONVERSION, INC./
589000A05D CS COMPUTER SYSTEME GmbH
589100A05E MYRIAD LOGIC INC.
589200A05F BTG ENGINEERING BV
589300A060 ACER PERIPHERALS, INC.
589400A061 PURITAN BENNETT
589500A062 AES PRODATA
589600A063 JRL SYSTEMS, INC.
589700A064 KVB/ANALECT
589800A065 NEXLAND, INC.
589900A066 ISA CO., LTD.
590000A067 NETWORK SERVICES GROUP
590100A068 BHP LIMITED
590200A069 Symmetricom, Inc.
590300A06A Verilink Corporation
590400A06B DMS DORSCH MIKROSYSTEM GMBH
590500A06C SHINDENGEN ELECTRIC MFG. CO., LTD.
590600A06D MANNESMANN TALLY CORPORATION
590700A06E AUSTRON, INC.
590800A06F THE APPCON GROUP, INC.
590900A070 COASTCOM
591000A071 VIDEO LOTTERY TECHNOLOGIES,INC
591100A072 OVATION SYSTEMS LTD.
591200A073 COM21, INC.
591300A074 PERCEPTION TECHNOLOGY
591400A075 MICRON TECHNOLOGY, INC.
591500A076 CARDWARE LAB, INC.
591600A077 FUJITSU NEXION, INC.
591700A078 Marconi Communications
591800A079 ALPS ELECTRIC (USA), INC.
591900A07A ADVANCED PERIPHERALS TECHNOLOGIES, INC.
592000A07B DAWN COMPUTER INCORPORATION
592100A07C TONYANG NYLON CO., LTD.
592200A07D SEEQ TECHNOLOGY, INC.
592300A07E AVID TECHNOLOGY, INC.
592400A07F GSM-SYNTEL, LTD.
592500A080 ANTARES MICROSYSTEMS
592600A081 ALCATEL DATA NETWORKS
592700A082 NKT ELEKTRONIK A/S
592800A083 ASIMMPHONY TURKEY
592900A084 DATAPLEX PTY. LTD.
593000A086 AMBER WAVE SYSTEMS, INC.
593100A087 Zarlink Semiconductor Ltd.
593200A088 ESSENTIAL COMMUNICATIONS
593300A089 XPOINT TECHNOLOGIES, INC.
593400A08A BROOKTROUT TECHNOLOGY, INC.
593500A08B ASTON ELECTRONIC DESIGNS LTD.
593600A08C MultiMedia LANs, Inc.
593700A08D JACOMO CORPORATION
593800A08E Nokia Internet Communications
593900A08F DESKNET SYSTEMS, INC.
594000A090 TimeStep Corporation
594100A091 APPLICOM INTERNATIONAL
594200A092 H. BOLLMANN MANUFACTURERS, LTD
594300A093 B/E AEROSPACE, Inc.
594400A094 COMSAT CORPORATION
594500A095 ACACIA NETWORKS, INC.
594600A096 MITUMI ELECTRIC CO., LTD.
594700A097 JC INFORMATION SYSTEMS
594800A098 NETWORK APPLIANCE CORP.
594900A099 K-NET LTD.
595000A09A NIHON KOHDEN AMERICA
595100A09B QPSX COMMUNICATIONS, LTD.
595200A09C Xyplex, Inc.
595300A09D JOHNATHON FREEMAN TECHNOLOGIES
595400A09E ICTV
595500A09F COMMVISION CORP.
595600A0A0 COMPACT DATA, LTD.
595700A0A1 EPIC DATA INC.
595800A0A2 DIGICOM S.P.A.
595900A0A3 RELIABLE POWER METERS
596000A0A4 MICROS SYSTEMS, INC.
596100A0A5 TEKNOR MICROSYSTEME, INC.
596200A0A6 M.I. SYSTEMS, K.K.
596300A0A7 VORAX CORPORATION
596400A0A8 RENEX CORPORATION
596500A0A9 GN NETTEST (CANADA) NAVTEL DIVISION
596600A0AA SPACELABS MEDICAL
596700A0AB NETCS INFORMATIONSTECHNIK GMBH
596800A0AC GILAT SATELLITE NETWORKS, LTD.
596900A0AD MARCONI SPA
597000A0AE NUCOM SYSTEMS, INC.
597100A0AF WMS INDUSTRIES
597200A0B0 I-O DATA DEVICE, INC.
597300A0B1 FIRST VIRTUAL CORPORATION
597400A0B2 SHIMA SEIKI
597500A0B3 ZYKRONIX
597600A0B4 TEXAS MICROSYSTEMS, INC.
597700A0B5 3H TECHNOLOGY
597800A0B6 SANRITZ AUTOMATION CO., LTD.
597900A0B7 CORDANT, INC.
598000A0B8 SYMBIOS LOGIC INC.
598100A0B9 EAGLE TECHNOLOGY, INC.
598200A0BA PATTON ELECTRONICS CO.
598300A0BB HILAN GMBH
598400A0BC VIASAT, INCORPORATED
598500A0BD I-TECH CORP.
598600A0BE INTEGRATED CIRCUIT SYSTEMS, INC. COMMUNICATIONS GROUP
598700A0BF WIRELESS DATA GROUP MOTOROLA
598800A0C0 DIGITAL LINK CORP.
598900A0C1 ORTIVUS MEDICAL AB
599000A0C2 R.A. SYSTEMS CO., LTD.
599100A0C3 UNICOMPUTER GMBH
599200A0C4 CRISTIE ELECTRONICS LTD.
599300A0C5 ZYXEL COMMUNICATION
599400A0C6 QUALCOMM INCORPORATED
599500A0C7 TADIRAN TELECOMMUNICATIONS
599600A0C8 ADTRAN INC.
599700A0C9 INTEL CORPORATION - HF1-06
599800A0CA FUJITSU DENSO LTD.
599900A0CB ARK TELECOMMUNICATIONS, INC.
600000A0CC LITE-ON COMMUNICATIONS, INC.
600100A0CD DR. JOHANNES HEIDENHAIN GmbH
600200A0CE ASTROCOM CORPORATION
600300A0CF SOTAS, INC.
600400A0D0 TEN X TECHNOLOGY, INC.
600500A0D1 INVENTEC CORPORATION
600600A0D2 ALLIED TELESIS INTERNATIONAL CORPORATION
600700A0D3 INSTEM COMPUTER SYSTEMS, LTD.
600800A0D4 RADIOLAN, INC.
600900A0D5 SIERRA WIRELESS INC.
601000A0D6 SBE, INC.
601100A0D7 KASTEN CHASE APPLIED RESEARCH
601200A0D8 SPECTRA - TEK
601300A0D9 CONVEX COMPUTER CORPORATION
601400A0DA INTEGRATED SYSTEMS Technology, Inc.
601500A0DB FISHER & PAYKEL PRODUCTION
601600A0DC O.N. ELECTRONIC CO., LTD.
601700A0DD AZONIX CORPORATION
601800A0DE YAMAHA CORPORATION
601900A0DF STS TECHNOLOGIES, INC.
602000A0E0 TENNYSON TECHNOLOGIES PTY LTD
602100A0E1 WESTPORT RESEARCH ASSOCIATES, INC.
602200A0E2 KEISOKU GIKEN CORP.
602300A0E3 XKL SYSTEMS CORP.
602400A0E4 OPTIQUEST
602500A0E5 NHC COMMUNICATIONS
602600A0E6 DIALOGIC CORPORATION
602700A0E7 CENTRAL DATA CORPORATION
602800A0E8 REUTERS HOLDINGS PLC
602900A0E9 ELECTRONIC RETAILING SYSTEMS INTERNATIONAL
603000A0EA ETHERCOM CORP.
603100A0EB Encore Networks
603200A0EC TRANSMITTON LTD.
603300A0ED PRI AUTOMATION
603400A0EE NASHOBA NETWORKS
603500A0EF LUCIDATA LTD.
603600A0F0 TORONTO MICROELECTRONICS INC.
603700A0F1 MTI
603800A0F2 INFOTEK COMMUNICATIONS, INC.
603900A0F3 STAUBLI
604000A0F4 GE
604100A0F5 RADGUARD LTD.
604200A0F6 AutoGas Systems Inc.
604300A0F7 V.I COMPUTER CORP.
604400A0F8 SYMBOL TECHNOLOGIES, INC.
604500A0F9 BINTEC COMMUNICATIONS GMBH
604600A0FA Marconi Communication GmbH
604700A0FB TORAY ENGINEERING CO., LTD.
604800A0FC IMAGE SCIENCES, INC.
604900A0FD SCITEX DIGITAL PRINTING, INC.
605000A0FE BOSTON TECHNOLOGY, INC.
605100A0FF TELLABS OPERATIONS, INC.
605200AA00 INTEL CORPORATION
605300AA01 INTEL CORPORATION
605400AA02 INTEL CORPORATION
605500AA3C OLIVETTI TELECOM SPA (OLTECO)
605600B009 Grass Valley Group
605700B017 InfoGear Technology Corp.
605800B019 Casi-Rusco
605900B01C Westport Technologies
606000B01E Rantic Labs, Inc.
606100B02A ORSYS GmbH
606200B02D ViaGate Technologies, Inc.
606300B03B HiQ Networks
606400B048 Marconi Communications Inc.
606500B04A Cisco Systems, Inc.
606600B052 Intellon Corporation
606700B064 Cisco Systems, Inc.
606800B069 Honewell Oy
606900B06D Jones Futurex Inc.
607000B080 Mannesmann Ipulsys B.V.
607100B086 LocSoft Limited
607200B08E Cisco Systems, Inc.
607300B091 Transmeta Corp.
607400B094 Alaris, Inc.
607500B09A Morrow Technologies Corp.
607600B09D Point Grey Research Inc.
607700B0AC SIAE-Microelettronica S.p.A.
607800B0AE Symmetricom
607900B0B3 Xstreamis PLC
608000B0C2 Cisco Systems, Inc.
608100B0C7 Tellabs Operations, Inc.
608200B0CE TECHNOLOGY RESCUE
608300B0D0 Dell Computer Corp.
608400B0DB Nextcell, Inc.
608500B0DF Reliable Data Technology, Inc.
608600B0E7 British Federal Ltd.
608700B0EC EACEM
608800B0EE Ajile Systems, Inc.
608900B0F0 CALY NETWORKS
609000B0F5 NetWorth Technologies, Inc.
609100BB01 OCTOTHORPE CORP.
609200BBF0 UNGERMANN-BASS INC.
609300C000 LANOPTICS, LTD.
609400C001 DIATEK PATIENT MANAGMENT
609500C002 SERCOMM CORPORATION
609600C003 GLOBALNET COMMUNICATIONS
609700C004 JAPAN BUSINESS COMPUTER CO.LTD
609800C005 LIVINGSTON ENTERPRISES, INC.
609900C006 NIPPON AVIONICS CO., LTD.
610000C007 PINNACLE DATA SYSTEMS, INC.
610100C008 SECO SRL
610200C009 KT TECHNOLOGY (S) PTE LTD
610300C00A MICRO CRAFT
610400C00B NORCONTROL A.S.
610500C00C RELIA TECHNOLGIES
610600C00D ADVANCED LOGIC RESEARCH, INC.
610700C00E PSITECH, INC.
610800C00F QUANTUM SOFTWARE SYSTEMS LTD.
610900C010 HIRAKAWA HEWTECH CORP.
611000C011 INTERACTIVE COMPUTING DEVICES
611100C012 NETSPAN CORPORATION
611200C013 NETRIX
611300C014 TELEMATICS CALABASAS INT'L,INC
611400C015 NEW MEDIA CORPORATION
611500C016 ELECTRONIC THEATRE CONTROLS
611600C017 FORTE NETWORKS
611700C018 LANART CORPORATION
611800C019 LEAP TECHNOLOGY, INC.
611900C01A COROMETRICS MEDICAL SYSTEMS
612000C01B SOCKET COMMUNICATIONS, INC.
612100C01C INTERLINK COMMUNICATIONS LTD.
612200C01D GRAND JUNCTION NETWORKS, INC.
612300C01E LA FRANCAISE DES JEUX
612400C01F S.E.R.C.E.L.
612500C020 ARCO ELECTRONIC, CONTROL LTD.
612600C021 NETEXPRESS
612700C022 LASERMASTER TECHNOLOGIES, INC.
612800C023 TUTANKHAMON ELECTRONICS
612900C024 EDEN SISTEMAS DE COMPUTACAO SA
613000C025 DATAPRODUCTS CORPORATION
613100C026 LANS TECHNOLOGY CO., LTD.
613200C027 CIPHER SYSTEMS, INC.
613300C028 JASCO CORPORATION
613400C029 Nexans Deutschland AG - ANS
613500C02A OHKURA ELECTRIC CO., LTD.
613600C02B GERLOFF GESELLSCHAFT FUR
613700C02C CENTRUM COMMUNICATIONS, INC.
613800C02D FUJI PHOTO FILM CO., LTD.
613900C02E NETWIZ
614000C02F OKUMA CORPORATION
614100C030 INTEGRATED ENGINEERING B. V.
614200C031 DESIGN RESEARCH SYSTEMS, INC.
614300C032 I-CUBED LIMITED
614400C033 TELEBIT COMMUNICATIONS APS
614500C034 TRANSACTION NETWORK
614600C035 QUINTAR COMPANY
614700C036 RAYTECH ELECTRONIC CORP.
614800C037 DYNATEM
614900C038 RASTER IMAGE PROCESSING SYSTEM
615000C039 TDK SEMICONDUCTOR CORPORATION
615100C03A MEN-MIKRO ELEKTRONIK GMBH
615200C03B MULTIACCESS COMPUTING CORP.
615300C03C TOWER TECH S.R.L.
615400C03D WIESEMANN & THEIS GMBH
615500C03E FA. GEBR. HELLER GMBH
615600C03F STORES AUTOMATED SYSTEMS, INC.
615700C040 ECCI
615800C041 DIGITAL TRANSMISSION SYSTEMS
615900C042 DATALUX CORP.
616000C043 STRATACOM
616100C044 EMCOM CORPORATION
616200C045 ISOLATION SYSTEMS, LTD.
616300C046 KEMITRON LTD.
616400C047 UNIMICRO SYSTEMS, INC.
616500C048 BAY TECHNICAL ASSOCIATES
616600C049 U.S. ROBOTICS, INC.
616700C04A GROUP 2000 AG
616800C04B CREATIVE MICROSYSTEMS
616900C04C DEPARTMENT OF FOREIGN AFFAIRS
617000C04D MITEC, INC.
617100C04E COMTROL CORPORATION
617200C04F DELL COMPUTER CORPORATION
617300C050 TOYO DENKI SEIZO K.K.
617400C051 ADVANCED INTEGRATION RESEARCH
617500C052 BURR-BROWN
617600C053 DAVOX CORPORATION
617700C054 NETWORK PERIPHERALS, LTD.
617800C055 MODULAR COMPUTING TECHNOLOGIES
617900C056 SOMELEC
618000C057 MYCO ELECTRONICS
618100C058 DATAEXPERT CORP.
618200C059 NIPPON DENSO CO., LTD.
618300C05A SEMAPHORE COMMUNICATIONS CORP.
618400C05B NETWORKS NORTHWEST, INC.
618500C05C ELONEX PLC
618600C05D L&N TECHNOLOGIES
618700C05E VARI-LITE, INC.
618800C05F FINE-PAL COMPANY LIMITED
618900C060 ID SCANDINAVIA AS
619000C061 SOLECTEK CORPORATION
619100C062 IMPULSE TECHNOLOGY
619200C063 MORNING STAR TECHNOLOGIES, INC
619300C064 GENERAL DATACOMM IND. INC.
619400C065 SCOPE COMMUNICATIONS, INC.
619500C066 DOCUPOINT, INC.
619600C067 UNITED BARCODE INDUSTRIES
619700C068 PHILIP DRAKE ELECTRONICS LTD.
619800C069 Axxcelera Broadband Wireless
619900C06A ZAHNER-ELEKTRIK GMBH & CO. KG
620000C06B OSI PLUS CORPORATION
620100C06C SVEC COMPUTER CORP.
620200C06D BOCA RESEARCH, INC.
620300C06E HAFT TECHNOLOGY, INC.
620400C06F KOMATSU LTD.
620500C070 SECTRA SECURE-TRANSMISSION AB
620600C071 AREANEX COMMUNICATIONS, INC.
620700C072 KNX LTD.
620800C073 XEDIA CORPORATION
620900C074 TOYODA AUTOMATIC LOOM
621000C075 XANTE CORPORATION
621100C076 I-DATA INTERNATIONAL A-S
621200C077 DAEWOO TELECOM LTD.
621300C078 COMPUTER SYSTEMS ENGINEERING
621400C079 FONSYS CO.,LTD.
621500C07A PRIVA B.V.
621600C07B ASCEND COMMUNICATIONS, INC.
621700C07C HIGHTECH INFORMATION
621800C07D RISC DEVELOPMENTS LTD.
621900C07E KUBOTA CORPORATION ELECTRONIC
622000C07F NUPON COMPUTING CORP.
622100C080 NETSTAR, INC.
622200C081 METRODATA LTD.
622300C082 MOORE PRODUCTS CO.
622400C083 TRACE MOUNTAIN PRODUCTS, INC.
622500C084 DATA LINK CORP. LTD.
622600C085 ELECTRONICS FOR IMAGING, INC.
622700C086 THE LYNK CORPORATION
622800C087 UUNET TECHNOLOGIES, INC.
622900C088 EKF ELEKTRONIK GMBH
623000C089 TELINDUS DISTRIBUTION
623100C08A LAUTERBACH DATENTECHNIK GMBH
623200C08B RISQ MODULAR SYSTEMS, INC.
623300C08C PERFORMANCE TECHNOLOGIES, INC.
623400C08D TRONIX PRODUCT DEVELOPMENT
623500C08E NETWORK INFORMATION TECHNOLOGY
623600C08F MATSUSHITA ELECTRIC WORKS, LTD
623700C090 PRAIM S.R.L.
623800C091 JABIL CIRCUIT, INC.
623900C092 MENNEN MEDICAL INC.
624000C093 ALTA RESEARCH CORP.
624100C094 VMX INC.
624200C095 ZNYX
624300C096 TAMURA CORPORATION
624400C097 ARCHIPEL SA
624500C098 CHUNTEX ELECTRONIC CO., LTD.
624600C099 YOSHIKI INDUSTRIAL CO.,LTD.
624700C09A PHOTONICS CORPORATION
624800C09B RELIANCE COMM/TEC, R-TEC
624900C09C TOA ELECTRONIC LTD.
625000C09D DISTRIBUTED SYSTEMS INT'L, INC
625100C09E CACHE COMPUTERS, INC.
625200C09F QUANTA COMPUTER, INC.
625300C0A0 ADVANCE MICRO RESEARCH, INC.
625400C0A1 TOKYO DENSHI SEKEI CO.
625500C0A2 INTERMEDIUM A/S
625600C0A3 DUAL ENTERPRISES CORPORATION
625700C0A4 UNIGRAF OY
625800C0A5 DICKENS DATA SYSTEMS
625900C0A6 EXICOM AUSTRALIA PTY. LTD
626000C0A7 SEEL LTD.
626100C0A8 GVC CORPORATION
626200C0A9 BARRON MCCANN LTD.
626300C0AA SILICON VALLEY COMPUTER
626400C0AB Telco Systems, Inc.
626500C0AC GAMBIT COMPUTER COMMUNICATIONS
626600C0AD MARBEN COMMUNICATION SYSTEMS
626700C0AE TOWERCOM CO. INC. DBA PC HOUSE
626800C0AF TEKLOGIX INC.
626900C0B0 GCC TECHNOLOGIES,INC.
627000C0B1 GENIUS NET CO.
627100C0B2 NORAND CORPORATION
627200C0B3 COMSTAT DATACOMM CORPORATION
627300C0B4 MYSON TECHNOLOGY, INC.
627400C0B5 CORPORATE NETWORK SYSTEMS,INC.
627500C0B6 Snap Appliance, Inc.
627600C0B7 AMERICAN POWER CONVERSION CORP
627700C0B8 FRASER'S HILL LTD.
627800C0B9 FUNK SOFTWARE, INC.
627900C0BA NETVANTAGE
628000C0BB FORVAL CREATIVE, INC.
628100C0BC TELECOM AUSTRALIA/CSSC
628200C0BD INEX TECHNOLOGIES, INC.
628300C0BE ALCATEL - SEL
628400C0BF TECHNOLOGY CONCEPTS, LTD.
628500C0C0 SHORE MICROSYSTEMS, INC.
628600C0C1 QUAD/GRAPHICS, INC.
628700C0C2 INFINITE NETWORKS LTD.
628800C0C3 ACUSON COMPUTED SONOGRAPHY
628900C0C4 COMPUTER OPERATIONAL
629000C0C5 SID INFORMATICA
629100C0C6 PERSONAL MEDIA CORP.
629200C0C7 SPARKTRUM MICROSYSTEMS, INC.
629300C0C8 MICRO BYTE PTY. LTD.
629400C0C9 ELSAG BAILEY PROCESS
629500C0CA ALFA, INC.
629600C0CB CONTROL TECHNOLOGY CORPORATION
629700C0CC TELESCIENCES CO SYSTEMS, INC.
629800C0CD COMELTA, S.A.
629900C0CE CEI SYSTEMS & ENGINEERING PTE
630000C0CF IMATRAN VOIMA OY
630100C0D0 RATOC SYSTEM INC.
630200C0D1 COMTREE TECHNOLOGY CORPORATION
630300C0D2 SYNTELLECT, INC.
630400C0D3 OLYMPUS IMAGE SYSTEMS, INC.
630500C0D4 AXON NETWORKS, INC.
630600C0D5 QUANCOM ELECTRONIC GMBH
630700C0D6 J1 SYSTEMS, INC.
630800C0D7 TAIWAN TRADING CENTER DBA
630900C0D8 UNIVERSAL DATA SYSTEMS
631000C0D9 QUINTE NETWORK CONFIDENTIALITY
631100C0DA NICE SYSTEMS LTD.
631200C0DB IPC CORPORATION (PTE) LTD.
631300C0DC EOS TECHNOLOGIES, INC.
631400C0DD QLogic Corporation
631500C0DE ZCOMM, INC.
631600C0DF KYE Systems Corp.
631700C0E0 DSC COMMUNICATION CORP.
631800C0E1 SONIC SOLUTIONS
631900C0E2 CALCOMP, INC.
632000C0E3 OSITECH COMMUNICATIONS, INC.
632100C0E4 SIEMENS BUILDING
632200C0E5 GESPAC, S.A.
632300C0E6 Verilink Corporation
632400C0E7 FIBERDATA AB
632500C0E8 PLEXCOM, INC.
632600C0E9 OAK SOLUTIONS, LTD.
632700C0EA ARRAY TECHNOLOGY LTD.
632800C0EB SEH COMPUTERTECHNIK GMBH
632900C0EC DAUPHIN TECHNOLOGY
633000C0ED US ARMY ELECTRONIC
633100C0EE KYOCERA CORPORATION
633200C0EF ABIT CORPORATION
633300C0F0 KINGSTON TECHNOLOGY CORP.
633400C0F1 SHINKO ELECTRIC CO., LTD.
633500C0F2 TRANSITION NETWORKS
633600C0F3 NETWORK COMMUNICATIONS CORP.
633700C0F4 INTERLINK SYSTEM CO., LTD.
633800C0F5 METACOMP, INC.
633900C0F6 CELAN TECHNOLOGY INC.
634000C0F7 ENGAGE COMMUNICATION, INC.
634100C0F8 ABOUT COMPUTING INC.
634200C0F9 HARRIS AND JEFFRIES, INC.
634300C0FA CANARY COMMUNICATIONS, INC.
634400C0FB ADVANCED TECHNOLOGY LABS
634500C0FC ELASTIC REALITY, INC.
634600C0FD PROSUM
634700C0FE APTEC COMPUTER SYSTEMS, INC.
634800C0FF DOT HILL SYSTEMS CORPORATION
634900CBBD Cambridge Broadband Ltd.
635000CF1C COMMUNICATION MACHINERY CORP.
635100D000 FERRAN SCIENTIFIC, INC.
635200D001 VST TECHNOLOGIES, INC.
635300D002 DITECH CORPORATION
635400D003 COMDA ENTERPRISES CORP.
635500D004 PENTACOM LTD.
635600D005 ZHS ZEITMANAGEMENTSYSTEME
635700D006 CISCO SYSTEMS, INC.
635800D007 MIC ASSOCIATES, INC.
635900D008 MACTELL CORPORATION
636000D009 HSING TECH. ENTERPRISE CO. LTD
636100D00A LANACCESS TELECOM S.A.
636200D00B RHK TECHNOLOGY, INC.
636300D00C SNIJDER MICRO SYSTEMS
636400D00D MICROMERITICS INSTRUMENT
636500D00E PLURIS, INC.
636600D00F SPEECH DESIGN GMBH
636700D010 CONVERGENT NETWORKS, INC.
636800D011 PRISM VIDEO, INC.
636900D012 GATEWORKS CORP.
637000D013 PRIMEX AEROSPACE COMPANY
637100D014 ROOT, INC.
637200D015 UNIVEX MICROTECHNOLOGY CORP.
637300D016 SCM MICROSYSTEMS, INC.
637400D017 SYNTECH INFORMATION CO., LTD.
637500D018 QWES. COM, INC.
637600D019 DAINIPPON SCREEN CORPORATE
637700D01A URMET SUD S.P.A.
637800D01B MIMAKI ENGINEERING CO., LTD.
637900D01C SBS TECHNOLOGIES,
638000D01D FURUNO ELECTRIC CO., LTD.
638100D01E PINGTEL CORP.
638200D01F CTAM PTY. LTD.
638300D020 AIM SYSTEM, INC.
638400D021 REGENT ELECTRONICS CORP.
638500D022 INCREDIBLE TECHNOLOGIES, INC.
638600D023 INFORTREND TECHNOLOGY, INC.
638700D024 Cognex Corporation
638800D025 XROSSTECH, INC.
638900D026 HIRSCHMANN AUSTRIA GMBH
639000D027 APPLIED AUTOMATION, INC.
639100D028 OMNEON VIDEO NETWORKS
639200D029 WAKEFERN FOOD CORPORATION
639300D02A Voxent Systems Ltd.
639400D02B JETCELL, INC.
639500D02C CAMPBELL SCIENTIFIC, INC.
639600D02D ADEMCO
639700D02E COMMUNICATION AUTOMATION CORP.
639800D02F VLSI TECHNOLOGY INC.
639900D030 SAFETRAN SYSTEMS CORP.
640000D031 INDUSTRIAL LOGIC CORPORATION
640100D032 YANO ELECTRIC CO., LTD.
640200D033 DALIAN DAXIAN NETWORK
640300D034 ORMEC SYSTEMS CORP.
640400D035 BEHAVIOR TECH. COMPUTER CORP.
640500D036 TECHNOLOGY ATLANTA CORP.
640600D037 PHILIPS-DVS-LO BDR
640700D038 FIVEMERE, LTD.
640800D039 UTILICOM, INC.
640900D03A ZONEWORX, INC.
641000D03B VISION PRODUCTS PTY. LTD.
641100D03C Vieo, Inc.
641200D03D GALILEO TECHNOLOGY, LTD.
641300D03E ROCKETCHIPS, INC.
641400D03F AMERICAN COMMUNICATION
641500D040 SYSMATE CO., LTD.
641600D041 AMIGO TECHNOLOGY CO., LTD.
641700D042 MAHLO GMBH & CO. UG
641800D043 ZONAL RETAIL DATA SYSTEMS
641900D044 ALIDIAN NETWORKS, INC.
642000D045 KVASER AB
642100D046 DOLBY LABORATORIES, INC.
642200D047 XN TECHNOLOGIES
642300D048 ECTON, INC.
642400D049 IMPRESSTEK CO., LTD.
642500D04A PRESENCE TECHNOLOGY GMBH
642600D04B LA CIE GROUP S.A.
642700D04C EUROTEL TELECOM LTD.
642800D04D DIV OF RESEARCH & STATISTICS
642900D04E LOGIBAG
643000D04F BITRONICS, INC.
643100D050 ISKRATEL
643200D051 O2 MICRO, INC.
643300D052 ASCEND COMMUNICATIONS, INC.
643400D053 CONNECTED SYSTEMS
643500D054 SAS INSTITUTE INC.
643600D055 KATHREIN-WERKE KG
643700D056 SOMAT CORPORATION
643800D057 ULTRAK, INC.
643900D058 CISCO SYSTEMS, INC.
644000D059 AMBIT MICROSYSTEMS CORP.
644100D05A SYMBIONICS, LTD.
644200D05B ACROLOOP MOTION CONTROL
644300D05C TECHNOTREND SYSTEMTECHNIK GMBH
644400D05D INTELLIWORXX, INC.
644500D05E STRATABEAM TECHNOLOGY, INC.
644600D05F VALCOM, INC.
644700D060 PANASONIC EUROPEAN
644800D061 TREMON ENTERPRISES CO., LTD.
644900D062 DIGIGRAM
645000D063 CISCO SYSTEMS, INC.
645100D064 MULTITEL
645200D065 TOKO ELECTRIC
645300D066 WINTRISS ENGINEERING CORP.
645400D067 CAMPIO COMMUNICATIONS
645500D068 IWILL CORPORATION
645600D069 TECHNOLOGIC SYSTEMS
645700D06A LINKUP SYSTEMS CORPORATION
645800D06B SR TELECOM INC.
645900D06C SHAREWAVE, INC.
646000D06D ACRISON, INC.
646100D06E TRENDVIEW RECORDERS LTD.
646200D06F KMC CONTROLS
646300D070 LONG WELL ELECTRONICS CORP.
646400D071 ECHELON CORP.
646500D072 BROADLOGIC
646600D073 ACN ADVANCED COMMUNICATIONS
646700D074 TAQUA SYSTEMS, INC.
646800D075 ALARIS MEDICAL SYSTEMS, INC.
646900D076 MERRILL LYNCH & CO., INC.
647000D077 LUCENT TECHNOLOGIES
647100D078 ELTEX OF SWEDEN AB
647200D079 CISCO SYSTEMS, INC.
647300D07A AMAQUEST COMPUTER CORP.
647400D07B COMCAM INTERNATIONAL LTD.
647500D07C KOYO ELECTRONICS INC. CO.,LTD.
647600D07D COSINE COMMUNICATIONS
647700D07E KEYCORP LTD.
647800D07F STRATEGY & TECHNOLOGY, LIMITED
647900D080 EXABYTE CORPORATION
648000D081 REAL TIME DEVICES USA, INC.
648100D082 IOWAVE INC.
648200D083 INVERTEX, INC.
648300D084 NEXCOMM SYSTEMS, INC.
648400D085 OTIS ELEVATOR COMPANY
648500D086 FOVEON, INC.
648600D087 MICROFIRST INC.
648700D088 Terayon Communications Systems
648800D089 DYNACOLOR, INC.
648900D08A PHOTRON USA
649000D08B ADVA Limited
649100D08C GENOA TECHNOLOGY, INC.
649200D08D PHOENIX GROUP, INC.
649300D08E NVISION INC.
649400D08F ARDENT TECHNOLOGIES, INC.
649500D090 CISCO SYSTEMS, INC.
649600D091 SMARTSAN SYSTEMS, INC.
649700D092 GLENAYRE WESTERN MULTIPLEX
649800D093 TQ - COMPONENTS GMBH
649900D094 TIMELINE VISTA, INC.
650000D095 XYLAN CORPORATION
650100D096 3COM EUROPE LTD.
650200D097 CISCO SYSTEMS, INC.
650300D098 Photon Dynamics Canada Inc.
650400D099 ELCARD OY
650500D09A FILANET CORPORATION
650600D09B SPECTEL LTD.
650700D09C KAPADIA COMMUNICATIONS
650800D09D VERIS INDUSTRIES
650900D09E 2WIRE, INC.
651000D09F NOVTEK TEST SYSTEMS
651100D0A0 MIPS DENMARK
651200D0A1 OSKAR VIERLING GMBH + CO. KG
651300D0A2 INTEGRATED DEVICE
651400D0A3 VOCAL DATA, INC.
651500D0A4 ALANTRO COMMUNICATIONS
651600D0A5 AMERICAN ARIUM
651700D0A6 LANBIRD TECHNOLOGY CO., LTD.
651800D0A7 TOKYO SOKKI KENKYUJO CO., LTD.
651900D0A8 NETWORK ENGINES, INC.
652000D0A9 SHINANO KENSHI CO., LTD.
652100D0AA CHASE COMMUNICATIONS
652200D0AB DELTAKABEL TELECOM CV
652300D0AC GRAYSON WIRELESS
652400D0AD TL INDUSTRIES
652500D0AE ORESIS COMMUNICATIONS, INC.
652600D0AF CUTLER-HAMMER, INC.
652700D0B0 BITSWITCH LTD.
652800D0B1 OMEGA ELECTRONICS SA
652900D0B2 XIOTECH CORPORATION
653000D0B3 DRS FLIGHT SAFETY AND
653100D0B4 KATSUJIMA CO., LTD.
653200D0B5 IPricot formerly DotCom
653300D0B6 CRESCENT NETWORKS, INC.
653400D0B7 INTEL CORPORATION
653500D0B8 IOMEGA CORP.
653600D0B9 MICROTEK INTERNATIONAL, INC.
653700D0BA CISCO SYSTEMS, INC.
653800D0BB CISCO SYSTEMS, INC.
653900D0BC CISCO SYSTEMS, INC.
654000D0BD SICAN GMBH
654100D0BE EMUTEC INC.
654200D0BF PIVOTAL TECHNOLOGIES
654300D0C0 CISCO SYSTEMS, INC.
654400D0C1 HARMONIC DATA SYSTEMS, LTD.
654500D0C2 BALTHAZAR TECHNOLOGY AB
654600D0C3 VIVID TECHNOLOGY PTE, LTD.
654700D0C4 TERATECH CORPORATION
654800D0C5 COMPUTATIONAL SYSTEMS, INC.
654900D0C6 THOMAS & BETTS CORP.
655000D0C7 PATHWAY, INC.
655100D0C8 I/O CONSULTING A/S
655200D0C9 ADVANTECH CO., LTD.
655300D0CA INTRINSYC SOFTWARE INC.
655400D0CB DASAN CO., LTD.
655500D0CC TECHNOLOGIES LYRE INC.
655600D0CD ATAN TECHNOLOGY INC.
655700D0CE ASYST ELECTRONIC
655800D0CF MORETON BAY
655900D0D0 ZHONGXING TELECOM LTD.
656000D0D1 SIROCCO SYSTEMS, INC.
656100D0D2 EPILOG CORPORATION
656200D0D3 CISCO SYSTEMS, INC.
656300D0D4 V-BITS, INC.
656400D0D5 GRUNDIG AG
656500D0D6 AETHRA TELECOMUNICAZIONI
656600D0D7 B2C2, INC.
656700D0D8 3Com Corporation
656800D0D9 DEDICATED MICROCOMPUTERS
656900D0DA TAICOM DATA SYSTEMS CO., LTD.
657000D0DB MCQUAY INTERNATIONAL
657100D0DC MODULAR MINING SYSTEMS, INC.
657200D0DD SUNRISE TELECOM, INC.
657300D0DE PHILIPS MULTIMEDIA NETWORK
657400D0DF KUZUMI ELECTRONICS, INC.
657500D0E0 DOOIN ELECTRONICS CO.
657600D0E1 AVIONITEK ISRAEL INC.
657700D0E2 MRT MICRO, INC.
657800D0E3 ELE-CHEM ENGINEERING CO., LTD.
657900D0E4 CISCO SYSTEMS, INC.
658000D0E5 SOLIDUM SYSTEMS CORP.
658100D0E6 IBOND INC.
658200D0E7 VCON TELECOMMUNICATION LTD.
658300D0E8 MAC SYSTEM CO., LTD.
658400D0E9 ADVANTAGE CENTURY
658500D0EA NEXTONE COMMUNICATIONS, INC.
658600D0EB LIGHTERA NETWORKS, INC.
658700D0EC NAKAYO TELECOMMUNICATIONS, INC
658800D0ED XIOX
658900D0EE DICTAPHONE CORPORATION
659000D0EF IGT
659100D0F0 CONVISION TECHNOLOGY GMBH
659200D0F1 SEGA ENTERPRISES, LTD.
659300D0F2 MONTEREY NETWORKS
659400D0F3 SOLARI DI UDINE SPA
659500D0F4 CARINTHIAN TECH INSTITUTE
659600D0F5 ORANGE MICRO, INC.
659700D0F6 Alcatel Canada
659800D0F7 NEXT NETS CORPORATION
659900D0F8 FUJIAN STAR TERMINAL
660000D0F9 ACUTE COMMUNICATIONS CORP.
660100D0FA RACAL GUARDATA
660200D0FB TEK MICROSYSTEMS, INCORPORATED
660300D0FC GRANITE MICROSYSTEMS
660400D0FD OPTIMA TELE.COM, INC.
660500D0FE ASTRAL POINT
660600D0FF CISCO SYSTEMS, INC.
660700DD00 UNGERMANN-BASS INC.
660800DD01 UNGERMANN-BASS INC.
660900DD02 UNGERMANN-BASS INC.
661000DD03 UNGERMANN-BASS INC.
661100DD04 UNGERMANN-BASS INC.
661200DD05 UNGERMANN-BASS INC.
661300DD06 UNGERMANN-BASS INC.
661400DD07 UNGERMANN-BASS INC.
661500DD08 UNGERMANN-BASS INC.
661600DD09 UNGERMANN-BASS INC.
661700DD0A UNGERMANN-BASS INC.
661800DD0B UNGERMANN-BASS INC.
661900DD0C UNGERMANN-BASS INC.
662000DD0D UNGERMANN-BASS INC.
662100DD0E UNGERMANN-BASS INC.
662200DD0F UNGERMANN-BASS INC.
662300E000 FUJITSU, LTD
662400E001 STRAND LIGHTING LIMITED
662500E002 CROSSROADS SYSTEMS, INC.
662600E003 NOKIA WIRELESS BUSINESS COMMUN
662700E004 PMC-SIERRA, INC.
662800E005 TECHNICAL CORP.
662900E006 SILICON INTEGRATED SYS. CORP.
663000E007 NETWORK ALCHEMY LTD.
663100E008 AMAZING CONTROLS! INC.
663200E009 MARATHON TECHNOLOGIES CORP.
663300E00A DIBA, INC.
663400E00B ROOFTOP COMMUNICATIONS CORP.
663500E00C MOTOROLA
663600E00D RADIANT SYSTEMS
663700E00E AVALON IMAGING SYSTEMS, INC.
663800E00F SHANGHAI BAUD DATA
663900E010 HESS SB-AUTOMATENBAU GmbH
664000E011 UNIDEN SAN DIEGO R&D CENTER, INC.
664100E012 PLUTO TECHNOLOGIES INTERNATIONAL INC.
664200E013 EASTERN ELECTRONIC CO., LTD.
664300E014 CISCO SYSTEMS, INC.
664400E015 HEIWA CORPORATION
664500E016 RAPID CITY COMMUNICATIONS
664600E017 EXXACT GmbH
664700E018 ASUSTEK COMPUTER INC.
664800E019 ING. GIORDANO ELETTRONICA
664900E01A COMTEC SYSTEMS. CO., LTD.
665000E01B SPHERE COMMUNICATIONS, INC.
665100E01C MOBILITY ELECTRONICSY
665200E01D WebTV NETWORKS, INC.
665300E01E CISCO SYSTEMS, INC.
665400E01F AVIDIA Systems, Inc.
665500E020 TECNOMEN OY
665600E021 FREEGATE CORP.
665700E022 MediaLight, Inc.
665800E023 TELRAD
665900E024 GADZOOX NETWORKS
666000E025 dit CO., LTD.
666100E026 EASTMAN KODAK CO.
666200E027 DUX, INC.
666300E028 APTIX CORPORATION
666400E029 STANDARD MICROSYSTEMS CORP.
666500E02A TANDBERG TELEVISION AS
666600E02B EXTREME NETWORKS
666700E02C AST COMPUTER
666800E02D InnoMediaLogic, Inc.
666900E02E SPC ELECTRONICS CORPORATION
667000E02F MCNS HOLDINGS, L.P.
667100E030 MELITA INTERNATIONAL CORP.
667200E031 HAGIWARA ELECTRIC CO., LTD.
667300E032 MISYS FINANCIAL SYSTEMS, LTD.
667400E033 E.E.P.D. GmbH
667500E034 CISCO SYSTEMS, INC.
667600E035 LOUGHBOROUGH SOUND IMAGES, PLC
667700E036 PIONEER CORPORATION
667800E037 CENTURY CORPORATION
667900E038 PROXIMA CORPORATION
668000E039 PARADYNE CORP.
668100E03A CABLETRON SYSTEMS, INC.
668200E03B PROMINET CORPORATION
668300E03C AdvanSys
668400E03D FOCON ELECTRONIC SYSTEMS A/S
668500E03E ALFATECH, INC.
668600E03F JATON CORPORATION
668700E040 DeskStation Technology, Inc.
668800E041 CSPI
668900E042 Pacom Systems Ltd.
669000E043 VitalCom
669100E044 LSICS CORPORATION
669200E045 TOUCHWAVE, INC.
669300E046 BENTLY NEVADA CORP.
669400E047 INFOCUS SYSTEMS
669500E048 SDL COMMUNICATIONS, INC.
669600E049 MICROWI ELECTRONIC GmbH
669700E04A ENHANCED MESSAGING SYSTEMS, INC
669800E04B JUMP INDUSTRIELLE COMPUTERTECHNIK GmbH
669900E04C REALTEK SEMICONDUCTOR CORP.
670000E04D INTERNET INITIATIVE JAPAN, INC
670100E04E SANYO DENKI CO., LTD.
670200E04F CISCO SYSTEMS, INC.
670300E050 EXECUTONE INFORMATION SYSTEMS, INC.
670400E051 TALX CORPORATION
670500E052 FOUNDRY NETWORKS, INC.
670600E053 CELLPORT LABS, INC.
670700E054 KODAI HITEC CO., LTD.
670800E055 INGENIERIA ELECTRONICA COMERCIAL INELCOM S.A.
670900E056 HOLONTECH CORPORATION
671000E057 HAN MICROTELECOM. CO., LTD.
671100E058 PHASE ONE DENMARK A/S
671200E059 CONTROLLED ENVIRONMENTS, LTD.
671300E05A GALEA NETWORK SECURITY
671400E05B WEST END SYSTEMS CORP.
671500E05C MATSUSHITA KOTOBUKI ELECTRONICS INDUSTRIES, LTD.
671600E05D UNITEC CO., LTD.
671700E05E JAPAN AVIATION ELECTRONICS INDUSTRY, LTD.
671800E05F e-Net, Inc.
671900E060 SHERWOOD
672000E061 EdgePoint Networks, Inc.
672100E062 HOST ENGINEERING
672200E063 CABLETRON - YAGO SYSTEMS, INC.
672300E064 SAMSUNG ELECTRONICS
672400E065 OPTICAL ACCESS INTERNATIONAL
672500E066 ProMax Systems, Inc.
672600E067 eac AUTOMATION-CONSULTING GmbH
672700E068 MERRIMAC SYSTEMS INC.
672800E069 JAYCOR
672900E06A KAPSCH AG
673000E06B W&G SPECIAL PRODUCTS
673100E06C AEP Systems International Ltd
673200E06D COMPUWARE CORPORATION
673300E06E FAR SYSTEMS S.p.A.
673400E06F Terayon Communications Systems
673500E070 DH TECHNOLOGY
673600E071 EPIS MICROCOMPUTER
673700E072 LYNK
673800E073 NATIONAL AMUSEMENT NETWORK, INC.
673900E074 TIERNAN COMMUNICATIONS, INC.
674000E075 Verilink Corporation
674100E076 DEVELOPMENT CONCEPTS, INC.
674200E077 WEBGEAR, INC.
674300E078 BERKELEY NETWORKS
674400E079 A.T.N.R.
674500E07A MIKRODIDAKT AB
674600E07B BAY NETWORKS
674700E07C METTLER-TOLEDO, INC.
674800E07D NETRONIX, INC.
674900E07E WALT DISNEY IMAGINEERING
675000E07F LOGISTISTEM s.r.l.
675100E080 CONTROL RESOURCES CORPORATION
675200E081 TYAN COMPUTER CORP.
675300E082 ANERMA
675400E083 JATO TECHNOLOGIES, INC.
675500E084 COMPULITE R&D
675600E085 GLOBAL MAINTECH, INC.
675700E086 CYBEX COMPUTER PRODUCTS
675800E087 LeCroy - Networking Productions Division
675900E088 LTX CORPORATION
676000E089 ION Networks, Inc.
676100E08A GEC AVERY, LTD.
676200E08B QLogic Corp.
676300E08C NEOPARADIGM LABS, INC.
676400E08D PRESSURE SYSTEMS, INC.
676500E08E UTSTARCOM
676600E08F CISCO SYSTEMS, INC.
676700E090 BECKMAN LAB. AUTOMATION DIV.
676800E091 LG ELECTRONICS, INC.
676900E092 ADMTEK INCORPORATED
677000E093 ACKFIN NETWORKS
677100E094 OSAI SRL
677200E095 ADVANCED-VISION TECHNOLGIES CORP.
677300E096 SHIMADZU CORPORATION
677400E097 CARRIER ACCESS CORPORATION
677500E098 AboCom Systems, Inc.
677600E099 SAMSON AG
677700E09A POSITRON INDUSTRIES, INC.
677800E09B ENGAGE NETWORKS, INC.
677900E09C MII
678000E09D SARNOFF CORPORATION
678100E09E QUANTUM CORPORATION
678200E09F PIXEL VISION
678300E0A0 WILTRON CO.
678400E0A1 HIMA PAUL HILDEBRANDT GmbH Co. KG
678500E0A2 MICROSLATE INC.
678600E0A3 CISCO SYSTEMS, INC.
678700E0A4 ESAOTE S.p.A.
678800E0A5 ComCore Semiconductor, Inc.
678900E0A6 TELOGY NETWORKS, INC.
679000E0A7 IPC INFORMATION SYSTEMS, INC.
679100E0A8 SAT GmbH & Co.
679200E0A9 FUNAI ELECTRIC CO., LTD.
679300E0AA ELECTROSONIC LTD.
679400E0AB DIMAT S.A.
679500E0AC MIDSCO, INC.
679600E0AD EES TECHNOLOGY, LTD.
679700E0AE XAQTI CORPORATION
679800E0AF GENERAL DYNAMICS INFORMATION SYSTEMS
679900E0B0 CISCO SYSTEMS, INC.
680000E0B1 PACKET ENGINES, INC.
680100E0B2 TELMAX COMMUNICATIONS CORP.
680200E0B3 EtherWAN Systems, Inc.
680300E0B4 TECHNO SCOPE CO., LTD.
680400E0B5 ARDENT COMMUNICATIONS CORP.
680500E0B6 Entrada Networks
680600E0B7 PI GROUP, LTD.
680700E0B8 GATEWAY 2000
680800E0B9 BYAS SYSTEMS
680900E0BA BERGHOF AUTOMATIONSTECHNIK GmbH
681000E0BB NBX CORPORATION
681100E0BC SYMON COMMUNICATIONS, INC.
681200E0BD INTERFACE SYSTEMS, INC.
681300E0BE GENROCO INTERNATIONAL, INC.
681400E0BF TORRENT NETWORKING TECHNOLOGIES CORP.
681500E0C0 SEIWA ELECTRIC MFG. CO., LTD.
681600E0C1 MEMOREX TELEX JAPAN, LTD.
681700E0C2 NECSY S.p.A.
681800E0C3 SAKAI SYSTEM DEVELOPMENT CORP.
681900E0C4 HORNER ELECTRIC, INC.
682000E0C5 BCOM ELECTRONICS INC.
682100E0C6 LINK2IT, L.L.C.
682200E0C7 EUROTECH SRL
682300E0C8 VIRTUAL ACCESS, LTD.
682400E0C9 AutomatedLogic Corporation
682500E0CA BEST DATA PRODUCTS
682600E0CB RESON, INC.
682700E0CC HERO SYSTEMS, LTD.
682800E0CD SENSIS CORPORATION
682900E0CE ARN
683000E0CF INTEGRATED DEVICE TECHNOLOGY, INC.
683100E0D0 NETSPEED, INC.
683200E0D1 TELSIS LIMITED
683300E0D2 VERSANET COMMUNICATIONS, INC.
683400E0D3 DATENTECHNIK GmbH
683500E0D4 EXCELLENT COMPUTER
683600E0D5 ARCXEL TECHNOLOGIES, INC.
683700E0D6 COMPUTER & COMMUNICATION RESEARCH LAB.
683800E0D7 SUNSHINE ELECTRONICS, INC.
683900E0D8 LANBit Computer, Inc.
684000E0D9 TAZMO CO., LTD.
684100E0DA ASSURED ACCESS TECHNOLOGY, INC.
684200E0DB ViaVideo Communications, Inc.
684300E0DC NEXWARE CORP.
684400E0DD ZENITH ELECTRONICS CORPORATION
684500E0DE DATAX NV
684600E0DF KE KOMMUNIKATIONS-ELECTRONIK
684700E0E0 SI ELECTRONICS, LTD.
684800E0E1 G2 NETWORKS, INC.
684900E0E2 INNOVA CORP.
685000E0E3 SK-ELEKTRONIK GmbH
685100E0E4 FANUC ROBOTICS NORTH AMERICA, Inc.
685200E0E5 CINCO NETWORKS, INC.
685300E0E6 INCAA DATACOM B.V.
685400E0E7 RAYTHEON E-SYSTEMS, INC.
685500E0E8 GRETACODER Data Systems AG
685600E0E9 DATA LABS, INC.
685700E0EA INNOVAT COMMUNICATIONS, INC.
685800E0EB DIGICOM SYSTEMS, INCORPORATED
685900E0EC CELESTICA INC.
686000E0ED SILICOM, LTD.
686100E0EE MAREL HF
686200E0EF DIONEX
686300E0F0 ABLER TECHNOLOGY, INC.
686400E0F1 THAT CORPORATION
686500E0F2 ARLOTTO COMNET, INC.
686600E0F3 WebSprint Communications, Inc.
686700E0F4 INSIDE Technology A/S
686800E0F5 TELES AG
686900E0F6 DECISION EUROPE
687000E0F7 CISCO SYSTEMS, INC.
687100E0F8 DICNA CONTROL AB
687200E0F9 CISCO SYSTEMS, INC.
687300E0FA TRL TECHNOLOGY, LTD.
687400E0FB LEIGHTRONIX, INC.
687500E0FC HUAWEI TECHNOLOGIES CO., LTD.
687600E0FD A-TREND TECHNOLOGY CO., LTD.
687700E0FE CISCO SYSTEMS, INC.
687800E0FF SECURITY DYNAMICS TECHNOLOGIES, Inc.
687900E6D3 NIXDORF COMPUTER CORP.
6880020701 RACAL-DATACOM
6881021C7C PERQ SYSTEMS CORPORATION
6882026086 LOGIC REPLACEMENT TECH. LTD.
688302608C 3COM CORPORATION
6884027001 RACAL-DATACOM
68850270B0 M/A-COM INC. COMPANIES
68860270B3 DATA RECALL LTD
6887029D8E CARDIAC RECORDERS INC.
688802AA3C OLIVETTI TELECOMM SPA (OLTECO)
688902BB01 OCTOTHORPE CORP.
689002C08C 3COM CORPORATION
689102CF1C COMMUNICATION MACHINERY CORP.
689202E6D3 NIXDORF COMPUTER CORPORATION
6893040AE0 XMIT AG COMPUTER NETWORKS
689404E0C4 TRIUMPH-ADLER AG
6895080001 COMPUTERVISION CORPORATION
6896080002 BRIDGE COMMUNICATIONS INC.
6897080003 ADVANCED COMPUTER COMM.
6898080004 CROMEMCO INCORPORATED
6899080005 SYMBOLICS INC.
6900080006 SIEMENS AG
6901080007 APPLE COMPUTER INC.
6902080008 BOLT BERANEK AND NEWMAN INC.
6903080009 HEWLETT PACKARD
690408000A NESTAR SYSTEMS INCORPORATED
690508000B UNISYS CORPORATION
690608000C MIKLYN DEVELOPMENT CO.
690708000D INTERNATIONAL COMPUTERS LTD.
690808000E NCR CORPORATION
690908000F MITEL CORPORATION
6910080011 TEKTRONIX INC.
6911080012 BELL ATLANTIC INTEGRATED SYST.
6912080013 EXXON
6913080014 EXCELAN
6914080015 STC BUSINESS SYSTEMS
6915080016 BARRISTER INFO SYS CORP
6916080017 NATIONAL SEMICONDUCTOR
6917080018 PIRELLI FOCOM NETWORKS
6918080019 GENERAL ELECTRIC CORPORATION
691908001A TIARA/ 10NET
692008001B DATA GENERAL
692108001C KDD-KOKUSAI DEBNSIN DENWA CO.
692208001D ABLE COMMUNICATIONS INC.
692308001E APOLLO COMPUTER INC.
692408001F SHARP CORPORATION
6925080020 SUN MICROSYSTEMS INC.
6926080021 3M COMPANY
6927080022 NBI INC.
6928080023 Panasonic Communications Co., Ltd.
6929080024 10NET COMMUNICATIONS/DCA
6930080025 CONTROL DATA
6931080026 NORSK DATA A.S.
6932080027 CADMUS COMPUTER SYSTEMS
6933080028 Texas Instruments
6934080029 MEGATEK CORPORATION
693508002A MOSAIC TECHNOLOGIES INC.
693608002B DIGITAL EQUIPMENT CORPORATION
693708002C BRITTON LEE INC.
693808002D LAN-TEC INC.
693908002E METAPHOR COMPUTER SYSTEMS
694008002F PRIME COMPUTER INC.
6941080030 NETWORK RESEARCH CORPORATION
6942080030 CERN
6943080030 ROYAL MELBOURNE INST OF TECH
6944080031 LITTLE MACHINES INC.
6945080032 TIGAN INCORPORATED
6946080033 BAUSCH & LOMB
6947080034 FILENET CORPORATION
6948080035 MICROFIVE CORPORATION
6949080036 INTERGRAPH CORPORATION
6950080037 FUJI-XEROX CO. LTD.
6951080038 CII HONEYWELL BULL
6952080039 SPIDER SYSTEMS LIMITED
695308003A ORCATECH INC.
695408003B TORUS SYSTEMS LIMITED
695508003C SCHLUMBERGER WELL SERVICES
695608003D CADNETIX CORPORATIONS
695708003E CODEX CORPORATION
695808003F FRED KOSCHARA ENTERPRISES
6959080040 FERRANTI COMPUTER SYS. LIMITED
6960080041 RACAL-MILGO INFORMATION SYS..
6961080042 JAPAN MACNICS CORP.
6962080043 PIXEL COMPUTER INC.
6963080044 DAVID SYSTEMS INC.
6964080045 CONCURRENT COMPUTER CORP.
6965080046 SONY CORPORATION LTD.
6966080047 SEQUENT COMPUTER SYSTEMS INC.
6967080048 EUROTHERM GAUGING SYSTEMS
6968080049 UNIVATION
696908004A BANYAN SYSTEMS INC.
697008004B PLANNING RESEARCH CORP.
697108004C HYDRA COMPUTER SYSTEMS INC.
697208004D CORVUS SYSTEMS INC.
697308004E 3COM EUROPE LTD.
697408004F CYGNET SYSTEMS
6975080050 DAISY SYSTEMS CORP.
6976080051 EXPERDATA
6977080052 INSYSTEC
6978080053 MIDDLE EAST TECH. UNIVERSITY
6979080055 STANFORD TELECOMM. INC.
6980080056 STANFORD LINEAR ACCEL. CENTER
6981080057 EVANS & SUTHERLAND
6982080058 SYSTEMS CONCEPTS
6983080059 A/S MYCRON
698408005A IBM CORPORATION
698508005B VTA TECHNOLOGIES INC.
698608005C FOUR PHASE SYSTEMS
698708005D GOULD INC.
698808005E COUNTERPOINT COMPUTER INC.
698908005F SABER TECHNOLOGY CORP.
6990080060 INDUSTRIAL NETWORKING INC.
6991080061 JAROGATE LTD.
6992080062 GENERAL DYNAMICS
6993080063 PLESSEY
6994080064 AUTOPHON AG
6995080065 GENRAD INC.
6996080066 AGFA CORPORATION
6997080067 COMDESIGN
6998080068 RIDGE COMPUTERS
6999080069 SILICON GRAPHICS INC.
700008006A ATT BELL LABORATORIES
700108006B ACCEL TECHNOLOGIES INC.
700208006C SUNTEK TECHNOLOGY INT'L
700308006D WHITECHAPEL COMPUTER WORKS
700408006E MASSCOMP
700508006F PHILIPS APELDOORN B.V.
7006080070 MITSUBISHI ELECTRIC CORP.
7007080071 MATRA (DSIE)
7008080072 XEROX CORP UNIV GRANT PROGRAM
7009080073 TECMAR INC.
7010080074 CASIO COMPUTER CO. LTD.
7011080075 DANSK DATA ELECTRONIK
7012080076 PC LAN TECHNOLOGIES
7013080077 TSL COMMUNICATIONS LTD.
7014080078 ACCELL CORPORATION
7015080079 THE DROID WORKS
701608007A INDATA
701708007B SANYO ELECTRIC CO. LTD.
701808007C VITALINK COMMUNICATIONS CORP.
701908007E AMALGAMATED WIRELESS(AUS) LTD
702008007F CARNEGIE-MELLON UNIVERSITY
7021080080 AES DATA INC.
7022080081 ,ASTECH INC.
7023080082 VERITAS SOFTWARE
7024080083 Seiko Instruments Inc.
7025080084 TOMEN ELECTRONICS CORP.
7026080085 ELXSI
7027080086 KONICA MINOLTA HOLDINGS, INC.
7028080087 XYPLEX
7029080088 MCDATA CORPORATION
7030080089 KINETICS
703108008A PERFORMANCE TECHNOLOGY
703208008B PYRAMID TECHNOLOGY CORP.
703308008C NETWORK RESEARCH CORPORATION
703408008D XYVISION INC.
703508008E TANDEM COMPUTERS
703608008F CHIPCOM CORPORATION
7037080090 SONOMA SYSTEMS
7038081443 UNIBRAIN S.A.
703908BBCC AK-NORD EDV VERTRIEBSGES. mbH
704010005A IBM CORPORATION
70411000E8 NATIONAL SEMICONDUCTOR
7042800010 ATT BELL LABORATORIES
7043A06A00 Verilink Corporation
7044AA0000 DIGITAL EQUIPMENT CORPORATION
7045AA0001 DIGITAL EQUIPMENT CORPORATION
7046AA0002 DIGITAL EQUIPMENT CORPORATION
7047AA0003 DIGITAL EQUIPMENT CORPORATION
7048AA0004 DIGITAL EQUIPMENT CORPORATION
diff --git a/drivers/ieee1394/oui2c.sh b/drivers/ieee1394/oui2c.sh
deleted file mode 100644
index b9d0e8f10abb..000000000000
--- a/drivers/ieee1394/oui2c.sh
+++ /dev/null
@@ -1,22 +0,0 @@
1#!/bin/sh
2
3cat <<EOF
4/* Generated file for OUI database */
5
6
7#ifdef CONFIG_IEEE1394_OUI_DB
8struct oui_list_struct {
9 int oui;
10 char *name;
11} oui_list[] = {
12EOF
13
14while read oui name; do
15 echo " { 0x$oui, \"$name\" },"
16done
17
18cat <<EOF
19};
20
21#endif /* CONFIG_IEEE1394_OUI_DB */
22EOF
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index ad2108f27a04..a77a832828c8 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -636,27 +636,32 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
636 636
637 case RAW1394_REQ_SET_CARD: 637 case RAW1394_REQ_SET_CARD:
638 spin_lock_irqsave(&host_info_lock, flags); 638 spin_lock_irqsave(&host_info_lock, flags);
639 if (req->req.misc < host_count) { 639 if (req->req.misc >= host_count) {
640 list_for_each_entry(hi, &host_info_list, list) {
641 if (!req->req.misc--)
642 break;
643 }
644 get_device(&hi->host->device); // XXX Need to handle failure case
645 list_add_tail(&fi->list, &hi->file_info_list);
646 fi->host = hi->host;
647 fi->state = connected;
648
649 req->req.error = RAW1394_ERROR_NONE;
650 req->req.generation = get_hpsb_generation(fi->host);
651 req->req.misc = (fi->host->node_id << 16)
652 | fi->host->node_count;
653 if (fi->protocol_version > 3) {
654 req->req.misc |=
655 NODEID_TO_NODE(fi->host->irm_id) << 8;
656 }
657 } else {
658 req->req.error = RAW1394_ERROR_INVALID_ARG; 640 req->req.error = RAW1394_ERROR_INVALID_ARG;
641 goto out_set_card;
659 } 642 }
643 list_for_each_entry(hi, &host_info_list, list)
644 if (!req->req.misc--)
645 break;
646 get_device(&hi->host->device); /* FIXME handle failure case */
647 list_add_tail(&fi->list, &hi->file_info_list);
648
649 /* prevent unloading of the host's low-level driver */
650 if (!try_module_get(hi->host->driver->owner)) {
651 req->req.error = RAW1394_ERROR_ABORTED;
652 goto out_set_card;
653 }
654 WARN_ON(fi->host);
655 fi->host = hi->host;
656 fi->state = connected;
657
658 req->req.error = RAW1394_ERROR_NONE;
659 req->req.generation = get_hpsb_generation(fi->host);
660 req->req.misc = (fi->host->node_id << 16)
661 | fi->host->node_count;
662 if (fi->protocol_version > 3)
663 req->req.misc |= NODEID_TO_NODE(fi->host->irm_id) << 8;
664out_set_card:
660 spin_unlock_irqrestore(&host_info_lock, flags); 665 spin_unlock_irqrestore(&host_info_lock, flags);
661 666
662 req->req.length = 0; 667 req->req.length = 0;
@@ -2955,6 +2960,11 @@ static int raw1394_release(struct inode *inode, struct file *file)
2955 put_device(&fi->host->device); 2960 put_device(&fi->host->device);
2956 } 2961 }
2957 2962
2963 spin_lock_irqsave(&host_info_lock, flags);
2964 if (fi->host)
2965 module_put(fi->host->driver->owner);
2966 spin_unlock_irqrestore(&host_info_lock, flags);
2967
2958 kfree(fi); 2968 kfree(fi);
2959 2969
2960 return 0; 2970 return 0;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 4325aac7733d..4edfff46b1e6 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -51,7 +51,6 @@
51 * Grep for inline FIXME comments below. 51 * Grep for inline FIXME comments below.
52 */ 52 */
53 53
54#include <linux/blkdev.h>
55#include <linux/compiler.h> 54#include <linux/compiler.h>
56#include <linux/delay.h> 55#include <linux/delay.h>
57#include <linux/device.h> 56#include <linux/device.h>
@@ -304,10 +303,11 @@ static struct scsi_host_template sbp2_shost_template = {
304 .use_clustering = ENABLE_CLUSTERING, 303 .use_clustering = ENABLE_CLUSTERING,
305 .cmd_per_lun = SBP2_MAX_CMDS, 304 .cmd_per_lun = SBP2_MAX_CMDS,
306 .can_queue = SBP2_MAX_CMDS, 305 .can_queue = SBP2_MAX_CMDS,
307 .emulated = 1,
308 .sdev_attrs = sbp2_sysfs_sdev_attrs, 306 .sdev_attrs = sbp2_sysfs_sdev_attrs,
309}; 307};
310 308
309/* for match-all entries in sbp2_workarounds_table */
310#define SBP2_ROM_VALUE_WILDCARD 0x1000000
311 311
312/* 312/*
313 * List of devices with known bugs. 313 * List of devices with known bugs.
@@ -329,22 +329,14 @@ static const struct {
329 }, 329 },
330 /* Initio bridges, actually only needed for some older ones */ { 330 /* Initio bridges, actually only needed for some older ones */ {
331 .firmware_revision = 0x000200, 331 .firmware_revision = 0x000200,
332 .model_id = SBP2_ROM_VALUE_WILDCARD,
332 .workarounds = SBP2_WORKAROUND_INQUIRY_36, 333 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
333 }, 334 },
334 /* Symbios bridge */ { 335 /* Symbios bridge */ {
335 .firmware_revision = 0xa0b800, 336 .firmware_revision = 0xa0b800,
337 .model_id = SBP2_ROM_VALUE_WILDCARD,
336 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, 338 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
337 }, 339 },
338 /*
339 * Note about the following Apple iPod blacklist entries:
340 *
341 * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our
342 * matching logic treats 0 as a wildcard, we cannot match this ID
343 * without rewriting the matching routine. Fortunately these iPods
344 * do not feature the read_capacity bug according to one report.
345 * Read_capacity behaviour as well as model_id could change due to
346 * Apple-supplied firmware updates though.
347 */
348 /* iPod 4th generation */ { 340 /* iPod 4th generation */ {
349 .firmware_revision = 0x0a2700, 341 .firmware_revision = 0x0a2700,
350 .model_id = 0x000021, 342 .model_id = 0x000021,
@@ -1307,11 +1299,13 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
1307 1299
1308 if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) 1300 if (!(workarounds & SBP2_WORKAROUND_OVERRIDE))
1309 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { 1301 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
1310 if (sbp2_workarounds_table[i].firmware_revision && 1302 if (sbp2_workarounds_table[i].firmware_revision !=
1303 SBP2_ROM_VALUE_WILDCARD &&
1311 sbp2_workarounds_table[i].firmware_revision != 1304 sbp2_workarounds_table[i].firmware_revision !=
1312 (firmware_revision & 0xffff00)) 1305 (firmware_revision & 0xffff00))
1313 continue; 1306 continue;
1314 if (sbp2_workarounds_table[i].model_id && 1307 if (sbp2_workarounds_table[i].model_id !=
1308 SBP2_ROM_VALUE_WILDCARD &&
1315 sbp2_workarounds_table[i].model_id != ud->model_id) 1309 sbp2_workarounds_table[i].model_id != ud->model_id)
1316 continue; 1310 continue;
1317 workarounds |= sbp2_workarounds_table[i].workarounds; 1311 workarounds |= sbp2_workarounds_table[i].workarounds;
@@ -2017,7 +2011,6 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2017{ 2011{
2018 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; 2012 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
2019 2013
2020 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2021 sdev->use_10_for_rw = 1; 2014 sdev->use_10_for_rw = 1;
2022 2015
2023 if (sdev->type == TYPE_ROM) 2016 if (sdev->type == TYPE_ROM)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 598b19fc5989..f4d1ec00af65 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -489,6 +489,9 @@ static void wakeup_dma_ir_ctx(unsigned long l)
489 reset_ir_status(d, i); 489 reset_ir_status(d, i);
490 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY; 490 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
491 do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]); 491 do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]);
492 dma_region_sync_for_cpu(&d->dma,
493 d->buffer_prg_assignment[i] * d->buf_size,
494 d->buf_size);
492 } 495 }
493 } 496 }
494 497
@@ -1096,6 +1099,8 @@ static long video1394_ioctl(struct file *file,
1096 DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d", 1099 DBGMSG(ohci->host->id, "Starting iso transmit DMA ctx=%d",
1097 d->ctx); 1100 d->ctx);
1098 put_timestamp(ohci, d, d->last_buffer); 1101 put_timestamp(ohci, d, d->last_buffer);
1102 dma_region_sync_for_device(&d->dma,
1103 v.buffer * d->buf_size, d->buf_size);
1099 1104
1100 /* Tell the controller where the first program is */ 1105 /* Tell the controller where the first program is */
1101 reg_write(ohci, d->cmdPtr, 1106 reg_write(ohci, d->cmdPtr,
@@ -1111,6 +1116,9 @@ static long video1394_ioctl(struct file *file,
1111 "Waking up iso transmit dma ctx=%d", 1116 "Waking up iso transmit dma ctx=%d",
1112 d->ctx); 1117 d->ctx);
1113 put_timestamp(ohci, d, d->last_buffer); 1118 put_timestamp(ohci, d, d->last_buffer);
1119 dma_region_sync_for_device(&d->dma,
1120 v.buffer * d->buf_size, d->buf_size);
1121
1114 reg_write(ohci, d->ctrlSet, 0x1000); 1122 reg_write(ohci, d->ctrlSet, 0x1000);
1115 } 1123 }
1116 } 1124 }
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index af939796750d..d2bb5a9a303f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -360,8 +360,7 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
360 if (event == NETEVENT_NEIGH_UPDATE) { 360 if (event == NETEVENT_NEIGH_UPDATE) {
361 struct neighbour *neigh = ctx; 361 struct neighbour *neigh = ctx;
362 362
363 if (neigh->dev->type == ARPHRD_INFINIBAND && 363 if (neigh->nud_state & NUD_VALID) {
364 (neigh->nud_state & NUD_VALID)) {
365 set_timeout(jiffies); 364 set_timeout(jiffies);
366 } 365 }
367 } 366 }
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed141ebd1c8..13efd4170349 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -642,7 +642,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 642 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
643} 643}
644 644
645static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num, 645static void build_smp_wc(struct ib_qp *qp,
646 u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
646 struct ib_wc *wc) 647 struct ib_wc *wc)
647{ 648{
648 memset(wc, 0, sizeof *wc); 649 memset(wc, 0, sizeof *wc);
@@ -652,7 +653,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
652 wc->pkey_index = pkey_index; 653 wc->pkey_index = pkey_index;
653 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); 654 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
654 wc->src_qp = IB_QP0; 655 wc->src_qp = IB_QP0;
655 wc->qp_num = IB_QP0; 656 wc->qp = qp;
656 wc->slid = slid; 657 wc->slid = slid;
657 wc->sl = 0; 658 wc->sl = 0;
658 wc->dlid_path_bits = 0; 659 wc->dlid_path_bits = 0;
@@ -713,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
713 goto out; 714 goto out;
714 } 715 }
715 716
716 build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid), 717 build_smp_wc(mad_agent_priv->agent.qp,
718 send_wr->wr_id, be16_to_cpu(smp->dr_slid),
717 send_wr->wr.ud.pkey_index, 719 send_wr->wr.ud.pkey_index,
718 send_wr->wr.ud.port_num, &mad_wc); 720 send_wr->wr.ud.port_num, &mad_wc);
719 721
@@ -2355,7 +2357,8 @@ static void local_completions(struct work_struct *work)
2355 * Defined behavior is to complete response 2357 * Defined behavior is to complete response
2356 * before request 2358 * before request
2357 */ 2359 */
2358 build_smp_wc((unsigned long) local->mad_send_wr, 2360 build_smp_wc(recv_mad_agent->agent.qp,
2361 (unsigned long) local->mad_send_wr,
2359 be16_to_cpu(IB_LID_PERMISSIVE), 2362 be16_to_cpu(IB_LID_PERMISSIVE),
2360 0, recv_mad_agent->agent.port_num, &wc); 2363 0, recv_mad_agent->agent.port_num, &wc);
2361 2364
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 743247ec065e..df1efbc10882 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -933,7 +933,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
933 resp->wc[i].vendor_err = wc[i].vendor_err; 933 resp->wc[i].vendor_err = wc[i].vendor_err;
934 resp->wc[i].byte_len = wc[i].byte_len; 934 resp->wc[i].byte_len = wc[i].byte_len;
935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data; 935 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
936 resp->wc[i].qp_num = wc[i].qp_num; 936 resp->wc[i].qp_num = wc[i].qp->qp_num;
937 resp->wc[i].src_qp = wc[i].src_qp; 937 resp->wc[i].src_qp = wc[i].src_qp;
938 resp->wc[i].wc_flags = wc[i].wc_flags; 938 resp->wc[i].wc_flags = wc[i].wc_flags;
939 resp->wc[i].pkey_index = wc[i].pkey_index; 939 resp->wc[i].pkey_index = wc[i].pkey_index;
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 05c9154d46f4..5175c99ee586 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -153,7 +153,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
153 153
154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce)); 154 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
155 entry->wr_id = ce->hdr.context; 155 entry->wr_id = ce->hdr.context;
156 entry->qp_num = ce->handle; 156 entry->qp = &qp->ibqp;
157 entry->wc_flags = 0; 157 entry->wc_flags = 0;
158 entry->slid = 0; 158 entry->slid = 0;
159 entry->sl = 0; 159 entry->sl = 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1c722032319c..cf95ee474b0f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -119,13 +119,14 @@ struct ehca_qp {
119 struct ipz_qp_handle ipz_qp_handle; 119 struct ipz_qp_handle ipz_qp_handle;
120 struct ehca_pfqp pf; 120 struct ehca_pfqp pf;
121 struct ib_qp_init_attr init_attr; 121 struct ib_qp_init_attr init_attr;
122 u64 uspace_squeue;
123 u64 uspace_rqueue;
124 u64 uspace_fwh;
125 struct ehca_cq *send_cq; 122 struct ehca_cq *send_cq;
126 struct ehca_cq *recv_cq; 123 struct ehca_cq *recv_cq;
127 unsigned int sqerr_purgeflag; 124 unsigned int sqerr_purgeflag;
128 struct hlist_node list_entries; 125 struct hlist_node list_entries;
126 /* mmap counter for resources mapped into user space */
127 u32 mm_count_squeue;
128 u32 mm_count_rqueue;
129 u32 mm_count_galpa;
129}; 130};
130 131
131/* must be power of 2 */ 132/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
142 struct ipz_cq_handle ipz_cq_handle; 143 struct ipz_cq_handle ipz_cq_handle;
143 struct ehca_pfcq pf; 144 struct ehca_pfcq pf;
144 spinlock_t cb_lock; 145 spinlock_t cb_lock;
145 u64 uspace_queue;
146 u64 uspace_fwh;
147 struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; 146 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
148 struct list_head entry; 147 struct list_head entry;
149 u32 nr_callbacks; 148 u32 nr_callbacks;
150 spinlock_t task_lock; 149 spinlock_t task_lock;
151 u32 ownpid; 150 u32 ownpid;
151 /* mmap counter for resources mapped into user space */
152 u32 mm_count_queue;
153 u32 mm_count_galpa;
152}; 154};
153 155
154enum ehca_mr_flag { 156enum ehca_mr_flag {
@@ -248,20 +250,6 @@ struct ehca_ucontext {
248 struct ib_ucontext ib_ucontext; 250 struct ib_ucontext ib_ucontext;
249}; 251};
250 252
251struct ehca_module *ehca_module_new(void);
252
253int ehca_module_delete(struct ehca_module *me);
254
255int ehca_eq_ctor(struct ehca_eq *eq);
256
257int ehca_eq_dtor(struct ehca_eq *eq);
258
259struct ehca_shca *ehca_shca_new(void);
260
261int ehca_shca_delete(struct ehca_shca *me);
262
263struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor);
264
265int ehca_init_pd_cache(void); 253int ehca_init_pd_cache(void);
266void ehca_cleanup_pd_cache(void); 254void ehca_cleanup_pd_cache(void);
267int ehca_init_cq_cache(void); 255int ehca_init_cq_cache(void);
@@ -283,7 +271,6 @@ extern int ehca_port_act_time;
283extern int ehca_use_hp_mr; 271extern int ehca_use_hp_mr;
284 272
285struct ipzu_queue_resp { 273struct ipzu_queue_resp {
286 u64 queue; /* points to first queue entry */
287 u32 qe_size; /* queue entry size */ 274 u32 qe_size; /* queue entry size */
288 u32 act_nr_of_sg; 275 u32 act_nr_of_sg;
289 u32 queue_length; /* queue length allocated in bytes */ 276 u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +283,6 @@ struct ehca_create_cq_resp {
296 u32 cq_number; 283 u32 cq_number;
297 u32 token; 284 u32 token;
298 struct ipzu_queue_resp ipz_queue; 285 struct ipzu_queue_resp ipz_queue;
299 struct h_galpas galpas;
300}; 286};
301 287
302struct ehca_create_qp_resp { 288struct ehca_create_qp_resp {
@@ -309,7 +295,6 @@ struct ehca_create_qp_resp {
309 u32 dummy; /* padding for 8 byte alignment */ 295 u32 dummy; /* padding for 8 byte alignment */
310 struct ipzu_queue_resp ipz_squeue; 296 struct ipzu_queue_resp ipz_squeue;
311 struct ipzu_queue_resp ipz_rqueue; 297 struct ipzu_queue_resp ipz_rqueue;
312 struct h_galpas galpas;
313}; 298};
314 299
315struct ehca_alloc_cq_parms { 300struct ehca_alloc_cq_parms {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 6074c897f51c..9291a86ca053 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
267 if (context) { 267 if (context) {
268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 268 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
269 struct ehca_create_cq_resp resp; 269 struct ehca_create_cq_resp resp;
270 struct vm_area_struct *vma;
271 memset(&resp, 0, sizeof(resp)); 270 memset(&resp, 0, sizeof(resp));
272 resp.cq_number = my_cq->cq_number; 271 resp.cq_number = my_cq->cq_number;
273 resp.token = my_cq->token; 272 resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
276 resp.ipz_queue.queue_length = ipz_queue->queue_length; 275 resp.ipz_queue.queue_length = ipz_queue->queue_length;
277 resp.ipz_queue.pagesize = ipz_queue->pagesize; 276 resp.ipz_queue.pagesize = ipz_queue->pagesize;
278 resp.ipz_queue.toggle_state = ipz_queue->toggle_state; 277 resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
279 ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
280 ipz_queue->queue_length,
281 (void**)&resp.ipz_queue.queue,
282 &vma);
283 if (ret) {
284 ehca_err(device, "Could not mmap queue pages");
285 cq = ERR_PTR(ret);
286 goto create_cq_exit4;
287 }
288 my_cq->uspace_queue = resp.ipz_queue.queue;
289 resp.galpas = my_cq->galpas;
290 ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
291 (void**)&resp.galpas.kernel.fw_handle,
292 &vma);
293 if (ret) {
294 ehca_err(device, "Could not mmap fw_handle");
295 cq = ERR_PTR(ret);
296 goto create_cq_exit5;
297 }
298 my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
299 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 278 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
300 ehca_err(device, "Copy to udata failed."); 279 ehca_err(device, "Copy to udata failed.");
301 goto create_cq_exit6; 280 goto create_cq_exit4;
302 } 281 }
303 } 282 }
304 283
305 return cq; 284 return cq;
306 285
307create_cq_exit6:
308 ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
309
310create_cq_exit5:
311 ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);
312
313create_cq_exit4: 286create_cq_exit4:
314 ipz_queue_dtor(&my_cq->ipz_queue); 287 ipz_queue_dtor(&my_cq->ipz_queue);
315 288
@@ -333,7 +306,6 @@ create_cq_exit1:
333int ehca_destroy_cq(struct ib_cq *cq) 306int ehca_destroy_cq(struct ib_cq *cq)
334{ 307{
335 u64 h_ret; 308 u64 h_ret;
336 int ret;
337 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 309 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
338 int cq_num = my_cq->cq_number; 310 int cq_num = my_cq->cq_number;
339 struct ib_device *device = cq->device; 311 struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
343 u32 cur_pid = current->tgid; 315 u32 cur_pid = current->tgid;
344 unsigned long flags; 316 unsigned long flags;
345 317
318 if (cq->uobject) {
319 if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
320 ehca_err(device, "Resources still referenced in "
321 "user space cq_num=%x", my_cq->cq_number);
322 return -EINVAL;
323 }
324 if (my_cq->ownpid != cur_pid) {
325 ehca_err(device, "Invalid caller pid=%x ownpid=%x "
326 "cq_num=%x",
327 cur_pid, my_cq->ownpid, my_cq->cq_number);
328 return -EINVAL;
329 }
330 }
331
346 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 332 spin_lock_irqsave(&ehca_cq_idr_lock, flags);
347 while (my_cq->nr_callbacks) { 333 while (my_cq->nr_callbacks) {
348 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 334 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
353 idr_remove(&ehca_cq_idr, my_cq->token); 339 idr_remove(&ehca_cq_idr, my_cq->token);
354 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 340 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
355 341
356 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
357 ehca_err(device, "Invalid caller pid=%x ownpid=%x",
358 cur_pid, my_cq->ownpid);
359 return -EINVAL;
360 }
361
362 /* un-mmap if vma alloc */
363 if (my_cq->uspace_queue ) {
364 ret = ehca_munmap(my_cq->uspace_queue,
365 my_cq->ipz_queue.queue_length);
366 if (ret)
367 ehca_err(device, "Could not munmap queue ehca_cq=%p "
368 "cq_num=%x", my_cq, cq_num);
369 ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
370 if (ret)
371 ehca_err(device, "Could not munmap fwh ehca_cq=%p "
372 "cq_num=%x", my_cq, cq_num);
373 }
374
375 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 342 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
376 if (h_ret == H_R_STATE) { 343 if (h_ret == H_R_STATE) {
377 /* cq in err: read err data and destroy it forcibly */ 344 /* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
400 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 367 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
401 u32 cur_pid = current->tgid; 368 u32 cur_pid = current->tgid;
402 369
403 if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { 370 if (cq->uobject && my_cq->ownpid != cur_pid) {
404 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", 371 ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
405 cur_pid, my_cq->ownpid); 372 cur_pid, my_cq->ownpid);
406 return -EINVAL; 373 return -EINVAL;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index cd7789f0d08e..95fd59fb4528 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
171 171
172void ehca_poll_eqs(unsigned long data); 172void ehca_poll_eqs(unsigned long data);
173 173
174int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
175 struct vm_area_struct **vma);
176
177int ehca_mmap_register(u64 physical,void **mapped,
178 struct vm_area_struct **vma);
179
180int ehca_munmap(unsigned long addr, size_t len);
181
182#ifdef CONFIG_PPC_64K_PAGES 174#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(gfp_t flags); 175void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 176void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6574fbbaead5..1155bcf48212 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0019"); 55MODULE_VERSION("SVNEHCA_0020");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 288 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
289 shca->ib_device.owner = THIS_MODULE; 289 shca->ib_device.owner = THIS_MODULE;
290 290
291 shca->ib_device.uverbs_abi_ver = 5; 291 shca->ib_device.uverbs_abi_ver = 6;
292 shca->ib_device.uverbs_cmd_mask = 292 shca->ib_device.uverbs_cmd_mask =
293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 293 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 294 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
790 int ret; 790 int ret;
791 791
792 printk(KERN_INFO "eHCA Infiniband Device Driver " 792 printk(KERN_INFO "eHCA Infiniband Device Driver "
793 "(Rel.: SVNEHCA_0019)\n"); 793 "(Rel.: SVNEHCA_0020)\n");
794 idr_init(&ehca_qp_idr); 794 idr_init(&ehca_qp_idr);
795 idr_init(&ehca_cq_idr); 795 idr_init(&ehca_cq_idr);
796 spin_lock_init(&ehca_qp_idr_lock); 796 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 34b85556d01e..95efef921f1d 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; 637 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; 638 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
639 struct ehca_create_qp_resp resp; 639 struct ehca_create_qp_resp resp;
640 struct vm_area_struct * vma;
641 memset(&resp, 0, sizeof(resp)); 640 memset(&resp, 0, sizeof(resp));
642 641
643 resp.qp_num = my_qp->real_qp_num; 642 resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
651 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; 650 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
652 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; 651 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
653 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; 652 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
654 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
655 ipz_rqueue->queue_length,
656 (void**)&resp.ipz_rqueue.queue,
657 &vma);
658 if (ret) {
659 ehca_err(pd->device, "Could not mmap rqueue pages");
660 goto create_qp_exit3;
661 }
662 my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
663 /* squeue properties */ 653 /* squeue properties */
664 resp.ipz_squeue.qe_size = ipz_squeue->qe_size; 654 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
665 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; 655 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
666 resp.ipz_squeue.queue_length = ipz_squeue->queue_length; 656 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
667 resp.ipz_squeue.pagesize = ipz_squeue->pagesize; 657 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
668 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; 658 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
669 ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
670 ipz_squeue->queue_length,
671 (void**)&resp.ipz_squeue.queue,
672 &vma);
673 if (ret) {
674 ehca_err(pd->device, "Could not mmap squeue pages");
675 goto create_qp_exit4;
676 }
677 my_qp->uspace_squeue = resp.ipz_squeue.queue;
678 /* fw_handle */
679 resp.galpas = my_qp->galpas;
680 ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
681 (void**)&resp.galpas.kernel.fw_handle,
682 &vma);
683 if (ret) {
684 ehca_err(pd->device, "Could not mmap fw_handle");
685 goto create_qp_exit5;
686 }
687 my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
688
689 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 659 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
690 ehca_err(pd->device, "Copy to udata failed"); 660 ehca_err(pd->device, "Copy to udata failed");
691 ret = -EINVAL; 661 ret = -EINVAL;
692 goto create_qp_exit6; 662 goto create_qp_exit3;
693 } 663 }
694 } 664 }
695 665
696 return &my_qp->ib_qp; 666 return &my_qp->ib_qp;
697 667
698create_qp_exit6:
699 ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
700
701create_qp_exit5:
702 ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);
703
704create_qp_exit4:
705 ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);
706
707create_qp_exit3: 668create_qp_exit3:
708 ipz_queue_dtor(&my_qp->ipz_rqueue); 669 ipz_queue_dtor(&my_qp->ipz_rqueue);
709 ipz_queue_dtor(&my_qp->ipz_squeue); 670 ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
931 my_qp->qp_type == IB_QPT_SMI) && 892 my_qp->qp_type == IB_QPT_SMI) &&
932 statetrans == IB_QPST_SQE2RTS) { 893 statetrans == IB_QPST_SQE2RTS) {
933 /* mark next free wqe if kernel */ 894 /* mark next free wqe if kernel */
934 if (my_qp->uspace_squeue == 0) { 895 if (!ibqp->uobject) {
935 struct ehca_wqe *wqe; 896 struct ehca_wqe *wqe;
936 /* lock send queue */ 897 /* lock send queue */
937 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); 898 spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,11 +1378,18 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1417 enum ib_qp_type qp_type; 1378 enum ib_qp_type qp_type;
1418 unsigned long flags; 1379 unsigned long flags;
1419 1380
1420 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 1381 if (ibqp->uobject) {
1421 my_pd->ownpid != cur_pid) { 1382 if (my_qp->mm_count_galpa ||
1422 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", 1383 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
1423 cur_pid, my_pd->ownpid); 1384 ehca_err(ibqp->device, "Resources still referenced in "
1424 return -EINVAL; 1385 "user space qp_num=%x", ibqp->qp_num);
1386 return -EINVAL;
1387 }
1388 if (my_pd->ownpid != cur_pid) {
1389 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
1390 cur_pid, my_pd->ownpid);
1391 return -EINVAL;
1392 }
1425 } 1393 }
1426 1394
1427 if (my_qp->send_cq) { 1395 if (my_qp->send_cq) {
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1439 idr_remove(&ehca_qp_idr, my_qp->token); 1407 idr_remove(&ehca_qp_idr, my_qp->token);
1440 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 1408 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1441 1409
1442 /* un-mmap if vma alloc */
1443 if (my_qp->uspace_rqueue) {
1444 ret = ehca_munmap(my_qp->uspace_rqueue,
1445 my_qp->ipz_rqueue.queue_length);
1446 if (ret)
1447 ehca_err(ibqp->device, "Could not munmap rqueue "
1448 "qp_num=%x", qp_num);
1449 ret = ehca_munmap(my_qp->uspace_squeue,
1450 my_qp->ipz_squeue.queue_length);
1451 if (ret)
1452 ehca_err(ibqp->device, "Could not munmap squeue "
1453 "qp_num=%x", qp_num);
1454 ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
1455 if (ret)
1456 ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
1457 qp_num);
1458 }
1459
1460 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1410 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1461 if (h_ret != H_SUCCESS) { 1411 if (h_ret != H_SUCCESS) {
1462 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " 1412 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index b46bda1bf85d..08d3f892d9f3 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -579,7 +579,7 @@ poll_cq_one_read_cqe:
579 } else 579 } else
580 wc->status = IB_WC_SUCCESS; 580 wc->status = IB_WC_SUCCESS;
581 581
582 wc->qp_num = cqe->local_qp_number; 582 wc->qp = NULL;
583 wc->byte_len = cqe->nr_bytes_transferred; 583 wc->byte_len = cqe->nr_bytes_transferred;
584 wc->pkey_index = cqe->pkey_index; 584 wc->pkey_index = cqe->pkey_index;
585 wc->slid = cqe->rlid; 585 wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e08764e4aef2..73db920b6945 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -68,105 +68,183 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context)
68 return 0; 68 return 0;
69} 69}
70 70
71struct page *ehca_nopage(struct vm_area_struct *vma, 71static void ehca_mm_open(struct vm_area_struct *vma)
72 unsigned long address, int *type)
73{ 72{
74 struct page *mypage = NULL; 73 u32 *count = (u32*)vma->vm_private_data;
75 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 74 if (!count) {
76 u32 idr_handle = fileoffset >> 32; 75 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
77 u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ 76 vma->vm_start, vma->vm_end);
78 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 77 return;
79 u32 cur_pid = current->tgid; 78 }
80 unsigned long flags; 79 (*count)++;
81 struct ehca_cq *cq; 80 if (!(*count))
82 struct ehca_qp *qp; 81 ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
83 struct ehca_pd *pd; 82 vma->vm_start, vma->vm_end);
84 u64 offset; 83 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
85 void *vaddr; 84 vma->vm_start, vma->vm_end, *count);
85}
86 86
87 switch (q_type) { 87static void ehca_mm_close(struct vm_area_struct *vma)
88 case 1: /* CQ */ 88{
89 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 89 u32 *count = (u32*)vma->vm_private_data;
90 cq = idr_find(&ehca_cq_idr, idr_handle); 90 if (!count) {
91 spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); 91 ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
92 vma->vm_start, vma->vm_end);
93 return;
94 }
95 (*count)--;
96 ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
97 vma->vm_start, vma->vm_end, *count);
98}
92 99
93 /* make sure this mmap really belongs to the authorized user */ 100static struct vm_operations_struct vm_ops = {
94 if (!cq) { 101 .open = ehca_mm_open,
95 ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); 102 .close = ehca_mm_close,
96 return NOPAGE_SIGBUS; 103};
104
105static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
106 u32 *mm_count)
107{
108 int ret;
109 u64 vsize, physical;
110
111 vsize = vma->vm_end - vma->vm_start;
112 if (vsize != EHCA_PAGESIZE) {
113 ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
114 return -EINVAL;
115 }
116
117 physical = galpas->user.fw_handle;
118 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
119 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
120 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
121 ret = remap_pfn_range(vma, vma->vm_start, physical >> PAGE_SHIFT,
122 vsize, vma->vm_page_prot);
123 if (unlikely(ret)) {
124 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
125 return -ENOMEM;
126 }
127
128 vma->vm_private_data = mm_count;
129 (*mm_count)++;
130 vma->vm_ops = &vm_ops;
131
132 return 0;
133}
134
135static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
136 u32 *mm_count)
137{
138 int ret;
139 u64 start, ofs;
140 struct page *page;
141
142 vma->vm_flags |= VM_RESERVED;
143 start = vma->vm_start;
144 for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
145 u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
146 page = virt_to_page(virt_addr);
147 ret = vm_insert_page(vma, start, page);
148 if (unlikely(ret)) {
149 ehca_gen_err("vm_insert_page() failed rc=%x", ret);
150 return ret;
97 } 151 }
152 start += PAGE_SIZE;
153 }
154 vma->vm_private_data = mm_count;
155 (*mm_count)++;
156 vma->vm_ops = &vm_ops;
98 157
99 if (cq->ownpid != cur_pid) { 158 return 0;
159}
160
161static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
162 u32 rsrc_type)
163{
164 int ret;
165
166 switch (rsrc_type) {
167 case 1: /* galpa fw handle */
168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
170 if (unlikely(ret)) {
100 ehca_err(cq->ib_cq.device, 171 ehca_err(cq->ib_cq.device,
101 "Invalid caller pid=%x ownpid=%x", 172 "ehca_mmap_fw() failed rc=%x cq_num=%x",
102 cur_pid, cq->ownpid); 173 ret, cq->cq_number);
103 return NOPAGE_SIGBUS; 174 return ret;
104 } 175 }
176 break;
105 177
106 if (rsrc_type == 2) { 178 case 2: /* cq queue_addr */
107 ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
108 offset = address - vma->vm_start; 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
109 vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); 181 if (unlikely(ret)) {
110 ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", 182 ehca_err(cq->ib_cq.device,
111 offset, vaddr); 183 "ehca_mmap_queue() failed rc=%x cq_num=%x",
112 mypage = virt_to_page(vaddr); 184 ret, cq->cq_number);
185 return ret;
113 } 186 }
114 break; 187 break;
115 188
116 case 2: /* QP */ 189 default:
117 spin_lock_irqsave(&ehca_qp_idr_lock, flags); 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
118 qp = idr_find(&ehca_qp_idr, idr_handle); 191 rsrc_type, cq->cq_number);
119 spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); 192 return -EINVAL;
193 }
120 194
121 /* make sure this mmap really belongs to the authorized user */ 195 return 0;
122 if (!qp) { 196}
123 ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); 197
124 return NOPAGE_SIGBUS; 198static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
199 u32 rsrc_type)
200{
201 int ret;
202
203 switch (rsrc_type) {
204 case 1: /* galpa fw handle */
205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
207 if (unlikely(ret)) {
208 ehca_err(qp->ib_qp.device,
209 "remap_pfn_range() failed ret=%x qp_num=%x",
210 ret, qp->ib_qp.qp_num);
211 return -ENOMEM;
125 } 212 }
213 break;
126 214
127 pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); 215 case 2: /* qp rqueue_addr */
128 if (pd->ownpid != cur_pid) { 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
217 qp->ib_qp.qp_num);
218 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, &qp->mm_count_rqueue);
219 if (unlikely(ret)) {
129 ehca_err(qp->ib_qp.device, 220 ehca_err(qp->ib_qp.device,
130 "Invalid caller pid=%x ownpid=%x", 221 "ehca_mmap_queue(rq) failed rc=%x qp_num=%x",
131 cur_pid, pd->ownpid); 222 ret, qp->ib_qp.qp_num);
132 return NOPAGE_SIGBUS; 223 return ret;
133 } 224 }
225 break;
134 226
135 if (rsrc_type == 2) { /* rqueue */ 227 case 3: /* qp squeue_addr */
136 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
137 offset = address - vma->vm_start; 229 qp->ib_qp.qp_num);
138 vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); 230 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, &qp->mm_count_squeue);
139 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", 231 if (unlikely(ret)) {
140 offset, vaddr); 232 ehca_err(qp->ib_qp.device,
141 mypage = virt_to_page(vaddr); 233 "ehca_mmap_queue(sq) failed rc=%x qp_num=%x",
142 } else if (rsrc_type == 3) { /* squeue */ 234 ret, qp->ib_qp.qp_num);
143 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); 235 return ret;
144 offset = address - vma->vm_start;
145 vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset);
146 ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p",
147 offset, vaddr);
148 mypage = virt_to_page(vaddr);
149 } 236 }
150 break; 237 break;
151 238
152 default: 239 default:
153 ehca_gen_err("bad queue type %x", q_type); 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
154 return NOPAGE_SIGBUS; 241 rsrc_type, qp->ib_qp.qp_num);
155 } 242 return -EINVAL;
156
157 if (!mypage) {
158 ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS");
159 return NOPAGE_SIGBUS;
160 } 243 }
161 get_page(mypage);
162 244
163 return mypage; 245 return 0;
164} 246}
165 247
166static struct vm_operations_struct ehcau_vm_ops = {
167 .nopage = ehca_nopage,
168};
169
170int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 248int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
171{ 249{
172 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; 250 u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT;
@@ -175,7 +253,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
175 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ 253 u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */
176 u32 cur_pid = current->tgid; 254 u32 cur_pid = current->tgid;
177 u32 ret; 255 u32 ret;
178 u64 vsize, physical;
179 unsigned long flags; 256 unsigned long flags;
180 struct ehca_cq *cq; 257 struct ehca_cq *cq;
181 struct ehca_qp *qp; 258 struct ehca_qp *qp;
@@ -201,44 +278,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
201 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) 278 if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
202 return -EINVAL; 279 return -EINVAL;
203 280
204 switch (rsrc_type) { 281 ret = ehca_mmap_cq(vma, cq, rsrc_type);
205 case 1: /* galpa fw handle */ 282 if (unlikely(ret)) {
206 ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); 283 ehca_err(cq->ib_cq.device,
207 vma->vm_flags |= VM_RESERVED; 284 "ehca_mmap_cq() failed rc=%x cq_num=%x",
208 vsize = vma->vm_end - vma->vm_start; 285 ret, cq->cq_number);
209 if (vsize != EHCA_PAGESIZE) { 286 return ret;
210 ehca_err(cq->ib_cq.device, "invalid vsize=%lx",
211 vma->vm_end - vma->vm_start);
212 return -EINVAL;
213 }
214
215 physical = cq->galpas.user.fw_handle;
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217 vma->vm_flags |= VM_IO | VM_RESERVED;
218
219 ehca_dbg(cq->ib_cq.device,
220 "vsize=%lx physical=%lx", vsize, physical);
221 ret = remap_pfn_range(vma, vma->vm_start,
222 physical >> PAGE_SHIFT, vsize,
223 vma->vm_page_prot);
224 if (ret) {
225 ehca_err(cq->ib_cq.device,
226 "remap_pfn_range() failed ret=%x",
227 ret);
228 return -ENOMEM;
229 }
230 break;
231
232 case 2: /* cq queue_addr */
233 ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq);
234 vma->vm_flags |= VM_RESERVED;
235 vma->vm_ops = &ehcau_vm_ops;
236 break;
237
238 default:
239 ehca_err(cq->ib_cq.device, "bad resource type %x",
240 rsrc_type);
241 return -EINVAL;
242 } 287 }
243 break; 288 break;
244 289
@@ -262,50 +307,12 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
262 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context)
263 return -EINVAL; 308 return -EINVAL;
264 309
265 switch (rsrc_type) { 310 ret = ehca_mmap_qp(vma, qp, rsrc_type);
266 case 1: /* galpa fw handle */ 311 if (unlikely(ret)) {
267 ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); 312 ehca_err(qp->ib_qp.device,
268 vma->vm_flags |= VM_RESERVED; 313 "ehca_mmap_qp() failed rc=%x qp_num=%x",
269 vsize = vma->vm_end - vma->vm_start; 314 ret, qp->ib_qp.qp_num);
270 if (vsize != EHCA_PAGESIZE) { 315 return ret;
271 ehca_err(qp->ib_qp.device, "invalid vsize=%lx",
272 vma->vm_end - vma->vm_start);
273 return -EINVAL;
274 }
275
276 physical = qp->galpas.user.fw_handle;
277 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
278 vma->vm_flags |= VM_IO | VM_RESERVED;
279
280 ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx",
281 vsize, physical);
282 ret = remap_pfn_range(vma, vma->vm_start,
283 physical >> PAGE_SHIFT, vsize,
284 vma->vm_page_prot);
285 if (ret) {
286 ehca_err(qp->ib_qp.device,
287 "remap_pfn_range() failed ret=%x",
288 ret);
289 return -ENOMEM;
290 }
291 break;
292
293 case 2: /* qp rqueue_addr */
294 ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp);
295 vma->vm_flags |= VM_RESERVED;
296 vma->vm_ops = &ehcau_vm_ops;
297 break;
298
299 case 3: /* qp squeue_addr */
300 ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp);
301 vma->vm_flags |= VM_RESERVED;
302 vma->vm_ops = &ehcau_vm_ops;
303 break;
304
305 default:
306 ehca_err(qp->ib_qp.device, "bad resource type %x",
307 rsrc_type);
308 return -EINVAL;
309 } 316 }
310 break; 317 break;
311 318
@@ -316,77 +323,3 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 323
317 return 0; 324 return 0;
318} 325}
319
320int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped,
321 struct vm_area_struct **vma)
322{
323 down_write(&current->mm->mmap_sem);
324 *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE,
325 MAP_SHARED | MAP_ANONYMOUS,
326 foffset);
327 up_write(&current->mm->mmap_sem);
328 if (!(*mapped)) {
329 ehca_gen_err("couldn't mmap foffset=%lx length=%lx",
330 foffset, length);
331 return -EINVAL;
332 }
333
334 *vma = find_vma(current->mm, (u64)*mapped);
335 if (!(*vma)) {
336 down_write(&current->mm->mmap_sem);
337 do_munmap(current->mm, 0, length);
338 up_write(&current->mm->mmap_sem);
339 ehca_gen_err("couldn't find vma queue=%p", *mapped);
340 return -EINVAL;
341 }
342 (*vma)->vm_flags |= VM_RESERVED;
343 (*vma)->vm_ops = &ehcau_vm_ops;
344
345 return 0;
346}
347
348int ehca_mmap_register(u64 physical, void **mapped,
349 struct vm_area_struct **vma)
350{
351 int ret;
352 unsigned long vsize;
353 /* ehca hw supports only 4k page */
354 ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma);
355 if (ret) {
356 ehca_gen_err("could'nt mmap physical=%lx", physical);
357 return ret;
358 }
359
360 (*vma)->vm_flags |= VM_RESERVED;
361 vsize = (*vma)->vm_end - (*vma)->vm_start;
362 if (vsize != EHCA_PAGESIZE) {
363 ehca_gen_err("invalid vsize=%lx",
364 (*vma)->vm_end - (*vma)->vm_start);
365 return -EINVAL;
366 }
367
368 (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot);
369 (*vma)->vm_flags |= VM_IO | VM_RESERVED;
370
371 ret = remap_pfn_range((*vma), (*vma)->vm_start,
372 physical >> PAGE_SHIFT, vsize,
373 (*vma)->vm_page_prot);
374 if (ret) {
375 ehca_gen_err("remap_pfn_range() failed ret=%x", ret);
376 return -ENOMEM;
377 }
378
379 return 0;
380
381}
382
383int ehca_munmap(unsigned long addr, size_t len) {
384 int ret = 0;
385 struct mm_struct *mm = current->mm;
386 if (mm) {
387 down_write(&mm->mmap_sem);
388 ret = do_munmap(mm, addr, len);
389 up_write(&mm->mmap_sem);
390 }
391 return ret;
392}
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index 46c1c89bf6ae..64f07b19349f 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -379,7 +379,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
379 wc.vendor_err = 0; 379 wc.vendor_err = 0;
380 wc.byte_len = 0; 380 wc.byte_len = 0;
381 wc.imm_data = 0; 381 wc.imm_data = 0;
382 wc.qp_num = qp->ibqp.qp_num; 382 wc.qp = &qp->ibqp;
383 wc.src_qp = 0; 383 wc.src_qp = 0;
384 wc.wc_flags = 0; 384 wc.wc_flags = 0;
385 wc.pkey_index = 0; 385 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index ce6038743c5c..5ff20cb04494 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -702,7 +702,7 @@ void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 702 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
703 wc->vendor_err = 0; 703 wc->vendor_err = 0;
704 wc->byte_len = 0; 704 wc->byte_len = 0;
705 wc->qp_num = qp->ibqp.qp_num; 705 wc->qp = &qp->ibqp;
706 wc->src_qp = qp->remote_qpn; 706 wc->src_qp = qp->remote_qpn;
707 wc->pkey_index = 0; 707 wc->pkey_index = 0;
708 wc->slid = qp->remote_ah_attr.dlid; 708 wc->slid = qp->remote_ah_attr.dlid;
@@ -836,7 +836,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 836 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
837 wc.vendor_err = 0; 837 wc.vendor_err = 0;
838 wc.byte_len = wqe->length; 838 wc.byte_len = wqe->length;
839 wc.qp_num = qp->ibqp.qp_num; 839 wc.qp = &qp->ibqp;
840 wc.src_qp = qp->remote_qpn; 840 wc.src_qp = qp->remote_qpn;
841 wc.pkey_index = 0; 841 wc.pkey_index = 0;
842 wc.slid = qp->remote_ah_attr.dlid; 842 wc.slid = qp->remote_ah_attr.dlid;
@@ -951,7 +951,7 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 951 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
952 wc.vendor_err = 0; 952 wc.vendor_err = 0;
953 wc.byte_len = 0; 953 wc.byte_len = 0;
954 wc.qp_num = qp->ibqp.qp_num; 954 wc.qp = &qp->ibqp;
955 wc.src_qp = qp->remote_qpn; 955 wc.src_qp = qp->remote_qpn;
956 wc.pkey_index = 0; 956 wc.pkey_index = 0;
957 wc.slid = qp->remote_ah_attr.dlid; 957 wc.slid = qp->remote_ah_attr.dlid;
@@ -1511,7 +1511,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1511 wc.status = IB_WC_SUCCESS; 1511 wc.status = IB_WC_SUCCESS;
1512 wc.opcode = IB_WC_RECV; 1512 wc.opcode = IB_WC_RECV;
1513 wc.vendor_err = 0; 1513 wc.vendor_err = 0;
1514 wc.qp_num = qp->ibqp.qp_num; 1514 wc.qp = &qp->ibqp;
1515 wc.src_qp = qp->remote_qpn; 1515 wc.src_qp = qp->remote_qpn;
1516 wc.pkey_index = 0; 1516 wc.pkey_index = 0;
1517 wc.slid = qp->remote_ah_attr.dlid; 1517 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index f7530512045d..e86cb171872e 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -137,7 +137,7 @@ bad_lkey:
137 wc.vendor_err = 0; 137 wc.vendor_err = 0;
138 wc.byte_len = 0; 138 wc.byte_len = 0;
139 wc.imm_data = 0; 139 wc.imm_data = 0;
140 wc.qp_num = qp->ibqp.qp_num; 140 wc.qp = &qp->ibqp;
141 wc.src_qp = 0; 141 wc.src_qp = 0;
142 wc.wc_flags = 0; 142 wc.wc_flags = 0;
143 wc.pkey_index = 0; 143 wc.pkey_index = 0;
@@ -336,7 +336,7 @@ again:
336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 336 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
337 wc.vendor_err = 0; 337 wc.vendor_err = 0;
338 wc.byte_len = 0; 338 wc.byte_len = 0;
339 wc.qp_num = sqp->ibqp.qp_num; 339 wc.qp = &sqp->ibqp;
340 wc.src_qp = sqp->remote_qpn; 340 wc.src_qp = sqp->remote_qpn;
341 wc.pkey_index = 0; 341 wc.pkey_index = 0;
342 wc.slid = sqp->remote_ah_attr.dlid; 342 wc.slid = sqp->remote_ah_attr.dlid;
@@ -426,7 +426,7 @@ again:
426 wc.status = IB_WC_SUCCESS; 426 wc.status = IB_WC_SUCCESS;
427 wc.vendor_err = 0; 427 wc.vendor_err = 0;
428 wc.byte_len = wqe->length; 428 wc.byte_len = wqe->length;
429 wc.qp_num = qp->ibqp.qp_num; 429 wc.qp = &qp->ibqp;
430 wc.src_qp = qp->remote_qpn; 430 wc.src_qp = qp->remote_qpn;
431 /* XXX do we know which pkey matched? Only needed for GSI. */ 431 /* XXX do we know which pkey matched? Only needed for GSI. */
432 wc.pkey_index = 0; 432 wc.pkey_index = 0;
@@ -447,7 +447,7 @@ send_comp:
447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 447 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
448 wc.vendor_err = 0; 448 wc.vendor_err = 0;
449 wc.byte_len = wqe->length; 449 wc.byte_len = wqe->length;
450 wc.qp_num = sqp->ibqp.qp_num; 450 wc.qp = &sqp->ibqp;
451 wc.src_qp = 0; 451 wc.src_qp = 0;
452 wc.pkey_index = 0; 452 wc.pkey_index = 0;
453 wc.slid = 0; 453 wc.slid = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index e636cfd67a82..325d6634ff53 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -49,7 +49,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 49 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
50 wc->vendor_err = 0; 50 wc->vendor_err = 0;
51 wc->byte_len = wqe->length; 51 wc->byte_len = wqe->length;
52 wc->qp_num = qp->ibqp.qp_num; 52 wc->qp = &qp->ibqp;
53 wc->src_qp = qp->remote_qpn; 53 wc->src_qp = qp->remote_qpn;
54 wc->pkey_index = 0; 54 wc->pkey_index = 0;
55 wc->slid = qp->remote_ah_attr.dlid; 55 wc->slid = qp->remote_ah_attr.dlid;
@@ -411,7 +411,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
411 wc.status = IB_WC_SUCCESS; 411 wc.status = IB_WC_SUCCESS;
412 wc.opcode = IB_WC_RECV; 412 wc.opcode = IB_WC_RECV;
413 wc.vendor_err = 0; 413 wc.vendor_err = 0;
414 wc.qp_num = qp->ibqp.qp_num; 414 wc.qp = &qp->ibqp;
415 wc.src_qp = qp->remote_qpn; 415 wc.src_qp = qp->remote_qpn;
416 wc.pkey_index = 0; 416 wc.pkey_index = 0;
417 wc.slid = qp->remote_ah_attr.dlid; 417 wc.slid = qp->remote_ah_attr.dlid;
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 49f1102af8b3..9a3e54664ee4 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -66,7 +66,7 @@ bad_lkey:
66 wc.vendor_err = 0; 66 wc.vendor_err = 0;
67 wc.byte_len = 0; 67 wc.byte_len = 0;
68 wc.imm_data = 0; 68 wc.imm_data = 0;
69 wc.qp_num = qp->ibqp.qp_num; 69 wc.qp = &qp->ibqp;
70 wc.src_qp = 0; 70 wc.src_qp = 0;
71 wc.wc_flags = 0; 71 wc.wc_flags = 0;
72 wc.pkey_index = 0; 72 wc.pkey_index = 0;
@@ -255,7 +255,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
255 wc->status = IB_WC_SUCCESS; 255 wc->status = IB_WC_SUCCESS;
256 wc->opcode = IB_WC_RECV; 256 wc->opcode = IB_WC_RECV;
257 wc->vendor_err = 0; 257 wc->vendor_err = 0;
258 wc->qp_num = qp->ibqp.qp_num; 258 wc->qp = &qp->ibqp;
259 wc->src_qp = sqp->ibqp.qp_num; 259 wc->src_qp = sqp->ibqp.qp_num;
260 /* XXX do we know which pkey matched? Only needed for GSI. */ 260 /* XXX do we know which pkey matched? Only needed for GSI. */
261 wc->pkey_index = 0; 261 wc->pkey_index = 0;
@@ -474,7 +474,7 @@ done:
474 wc.vendor_err = 0; 474 wc.vendor_err = 0;
475 wc.opcode = IB_WC_SEND; 475 wc.opcode = IB_WC_SEND;
476 wc.byte_len = len; 476 wc.byte_len = len;
477 wc.qp_num = qp->ibqp.qp_num; 477 wc.qp = &qp->ibqp;
478 wc.src_qp = 0; 478 wc.src_qp = 0;
479 wc.wc_flags = 0; 479 wc.wc_flags = 0;
480 /* XXX initialize other fields? */ 480 /* XXX initialize other fields? */
@@ -651,7 +651,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
651 wc.status = IB_WC_SUCCESS; 651 wc.status = IB_WC_SUCCESS;
652 wc.opcode = IB_WC_RECV; 652 wc.opcode = IB_WC_RECV;
653 wc.vendor_err = 0; 653 wc.vendor_err = 0;
654 wc.qp_num = qp->ibqp.qp_num; 654 wc.qp = &qp->ibqp;
655 wc.src_qp = src_qp; 655 wc.src_qp = src_qp;
656 /* XXX do we know which pkey matched? Only needed for GSI. */ 656 /* XXX do we know which pkey matched? Only needed for GSI. */
657 wc.pkey_index = 0; 657 wc.pkey_index = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 768df7265b81..968d1519761c 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1854,7 +1854,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1854 1854
1855 memset(inbox + 256, 0, 256); 1855 memset(inbox + 256, 0, 256);
1856 1856
1857 MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1857 MTHCA_PUT(inbox, in_wc->qp->qp_num, MAD_IFC_MY_QPN_OFFSET);
1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1858 MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET);
1859 1859
1860 val = in_wc->sl << 4; 1860 val = in_wc->sl << 4;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 1159c8a0f2c5..efd79ef109a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -534,7 +534,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
534 } 534 }
535 } 535 }
536 536
537 entry->qp_num = (*cur_qp)->qpn; 537 entry->qp = &(*cur_qp)->ibqp;
538 538
539 if (is_send) { 539 if (is_send) {
540 wq = &(*cur_qp)->sq; 540 wq = &(*cur_qp)->sq;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 705eb1d0e554..af5ee2ec4499 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -958,16 +958,17 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
958 return netdev_priv(dev); 958 return netdev_priv(dev);
959} 959}
960 960
961static ssize_t show_pkey(struct class_device *cdev, char *buf) 961static ssize_t show_pkey(struct device *dev,
962 struct device_attribute *attr, char *buf)
962{ 963{
963 struct ipoib_dev_priv *priv = 964 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(dev));
964 netdev_priv(container_of(cdev, struct net_device, class_dev));
965 965
966 return sprintf(buf, "0x%04x\n", priv->pkey); 966 return sprintf(buf, "0x%04x\n", priv->pkey);
967} 967}
968static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); 968static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
969 969
970static ssize_t create_child(struct class_device *cdev, 970static ssize_t create_child(struct device *dev,
971 struct device_attribute *attr,
971 const char *buf, size_t count) 972 const char *buf, size_t count)
972{ 973{
973 int pkey; 974 int pkey;
@@ -985,14 +986,14 @@ static ssize_t create_child(struct class_device *cdev,
985 */ 986 */
986 pkey |= 0x8000; 987 pkey |= 0x8000;
987 988
988 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev), 989 ret = ipoib_vlan_add(to_net_dev(dev), pkey);
989 pkey);
990 990
991 return ret ? ret : count; 991 return ret ? ret : count;
992} 992}
993static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); 993static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
994 994
995static ssize_t delete_child(struct class_device *cdev, 995static ssize_t delete_child(struct device *dev,
996 struct device_attribute *attr,
996 const char *buf, size_t count) 997 const char *buf, size_t count)
997{ 998{
998 int pkey; 999 int pkey;
@@ -1004,18 +1005,16 @@ static ssize_t delete_child(struct class_device *cdev,
1004 if (pkey < 0 || pkey > 0xffff) 1005 if (pkey < 0 || pkey > 0xffff)
1005 return -EINVAL; 1006 return -EINVAL;
1006 1007
1007 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev), 1008 ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
1008 pkey);
1009 1009
1010 return ret ? ret : count; 1010 return ret ? ret : count;
1011 1011
1012} 1012}
1013static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); 1013static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
1014 1014
1015int ipoib_add_pkey_attr(struct net_device *dev) 1015int ipoib_add_pkey_attr(struct net_device *dev)
1016{ 1016{
1017 return class_device_create_file(&dev->class_dev, 1017 return device_create_file(&dev->dev, &dev_attr_pkey);
1018 &class_device_attr_pkey);
1019} 1018}
1020 1019
1021static struct net_device *ipoib_add_port(const char *format, 1020static struct net_device *ipoib_add_port(const char *format,
@@ -1083,11 +1082,9 @@ static struct net_device *ipoib_add_port(const char *format,
1083 1082
1084 if (ipoib_add_pkey_attr(priv->dev)) 1083 if (ipoib_add_pkey_attr(priv->dev))
1085 goto sysfs_failed; 1084 goto sysfs_failed;
1086 if (class_device_create_file(&priv->dev->class_dev, 1085 if (device_create_file(&priv->dev->dev, &dev_attr_create_child))
1087 &class_device_attr_create_child))
1088 goto sysfs_failed; 1086 goto sysfs_failed;
1089 if (class_device_create_file(&priv->dev->class_dev, 1087 if (device_create_file(&priv->dev->dev, &dev_attr_delete_child))
1090 &class_device_attr_delete_child))
1091 goto sysfs_failed; 1088 goto sysfs_failed;
1092 1089
1093 return priv->dev; 1090 return priv->dev;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index f887780e8093..085eafe6667c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -42,15 +42,15 @@
42 42
43#include "ipoib.h" 43#include "ipoib.h"
44 44
45static ssize_t show_parent(struct class_device *class_dev, char *buf) 45static ssize_t show_parent(struct device *d, struct device_attribute *attr,
46 char *buf)
46{ 47{
47 struct net_device *dev = 48 struct net_device *dev = to_net_dev(d);
48 container_of(class_dev, struct net_device, class_dev);
49 struct ipoib_dev_priv *priv = netdev_priv(dev); 49 struct ipoib_dev_priv *priv = netdev_priv(dev);
50 50
51 return sprintf(buf, "%s\n", priv->parent->name); 51 return sprintf(buf, "%s\n", priv->parent->name);
52} 52}
53static CLASS_DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL); 53static DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
54 54
55int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) 55int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
56{ 56{
@@ -118,8 +118,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
118 if (ipoib_add_pkey_attr(priv->dev)) 118 if (ipoib_add_pkey_attr(priv->dev))
119 goto sysfs_failed; 119 goto sysfs_failed;
120 120
121 if (class_device_create_file(&priv->dev->class_dev, 121 if (device_create_file(&priv->dev->dev, &dev_attr_parent))
122 &class_device_attr_parent))
123 goto sysfs_failed; 122 goto sysfs_failed;
124 123
125 list_add_tail(&priv->list, &ppriv->child_intfs); 124 list_add_tail(&priv->list, &ppriv->child_intfs);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 0a7d1ab60e6d..89e37283c836 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -567,7 +567,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
567 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 567 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
568 568
569 if (opcode == ISCSI_OP_SCSI_CMD_RSP) { 569 if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
570 itt = hdr->itt & ISCSI_ITT_MASK; /* mask out cid and age bits */ 570 itt = get_itt(hdr->itt); /* mask out cid and age bits */
571 if (!(itt < session->cmds_max)) 571 if (!(itt < session->cmds_max))
572 iser_err("itt can't be matched to task!!!" 572 iser_err("itt can't be matched to task!!!"
573 "conn %p opcode %d cmds_max %d itt %d\n", 573 "conn %p opcode %d cmds_max %d itt %d\n",
@@ -625,7 +625,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
625 /* this arithmetic is legal by libiscsi dd_data allocation */ 625 /* this arithmetic is legal by libiscsi dd_data allocation */
626 mtask = (void *) ((long)(void *)tx_desc - 626 mtask = (void *) ((long)(void *)tx_desc -
627 sizeof(struct iscsi_mgmt_task)); 627 sizeof(struct iscsi_mgmt_task));
628 if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { 628 if (mtask->hdr->itt == RESERVED_ITT) {
629 struct iscsi_session *session = conn->session; 629 struct iscsi_session *session = conn->session;
630 630
631 spin_lock(&conn->session->lock); 631 spin_lock(&conn->session->lock);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 72611fd15103..5e8ac577f0ad 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -548,6 +548,7 @@ static int srp_reconnect_target(struct srp_target_port *target)
548 target->tx_head = 0; 548 target->tx_head = 0;
549 target->tx_tail = 0; 549 target->tx_tail = 0;
550 550
551 target->qp_in_error = 0;
551 ret = srp_connect_target(target); 552 ret = srp_connect_target(target);
552 if (ret) 553 if (ret)
553 goto err; 554 goto err;
@@ -878,6 +879,7 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
878 printk(KERN_ERR PFX "failed %s status %d\n", 879 printk(KERN_ERR PFX "failed %s status %d\n",
879 wc.wr_id & SRP_OP_RECV ? "receive" : "send", 880 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
880 wc.status); 881 wc.status);
882 target->qp_in_error = 1;
881 break; 883 break;
882 } 884 }
883 885
@@ -1337,6 +1339,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1337 1339
1338 printk(KERN_ERR "SRP abort called\n"); 1340 printk(KERN_ERR "SRP abort called\n");
1339 1341
1342 if (target->qp_in_error)
1343 return FAILED;
1340 if (srp_find_req(target, scmnd, &req)) 1344 if (srp_find_req(target, scmnd, &req))
1341 return FAILED; 1345 return FAILED;
1342 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) 1346 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
@@ -1365,6 +1369,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
1365 1369
1366 printk(KERN_ERR "SRP reset_device called\n"); 1370 printk(KERN_ERR "SRP reset_device called\n");
1367 1371
1372 if (target->qp_in_error)
1373 return FAILED;
1368 if (srp_find_req(target, scmnd, &req)) 1374 if (srp_find_req(target, scmnd, &req))
1369 return FAILED; 1375 return FAILED;
1370 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1376 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
@@ -1801,6 +1807,7 @@ static ssize_t srp_create_target(struct class_device *class_dev,
1801 goto err_free; 1807 goto err_free;
1802 } 1808 }
1803 1809
1810 target->qp_in_error = 0;
1804 ret = srp_connect_target(target); 1811 ret = srp_connect_target(target);
1805 if (ret) { 1812 if (ret) {
1806 printk(KERN_ERR PFX "Connection failed\n"); 1813 printk(KERN_ERR PFX "Connection failed\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index c21772317b86..2f3319c719a5 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -158,6 +158,7 @@ struct srp_target_port {
158 struct completion done; 158 struct completion done;
159 int status; 159 int status;
160 enum srp_target_state state; 160 enum srp_target_state state;
161 int qp_in_error;
161}; 162};
162 163
163struct srp_iu { 164struct srp_iu {
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index f0ce822c1028..17c8c63cbe1a 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -45,7 +45,7 @@ EXPORT_SYMBOL(serio_interrupt);
45EXPORT_SYMBOL(__serio_register_port); 45EXPORT_SYMBOL(__serio_register_port);
46EXPORT_SYMBOL(serio_unregister_port); 46EXPORT_SYMBOL(serio_unregister_port);
47EXPORT_SYMBOL(serio_unregister_child_port); 47EXPORT_SYMBOL(serio_unregister_child_port);
48EXPORT_SYMBOL(serio_register_driver); 48EXPORT_SYMBOL(__serio_register_driver);
49EXPORT_SYMBOL(serio_unregister_driver); 49EXPORT_SYMBOL(serio_unregister_driver);
50EXPORT_SYMBOL(serio_open); 50EXPORT_SYMBOL(serio_open);
51EXPORT_SYMBOL(serio_close); 51EXPORT_SYMBOL(serio_close);
@@ -789,12 +789,14 @@ static void serio_attach_driver(struct serio_driver *drv)
789 drv->driver.name, error); 789 drv->driver.name, error);
790} 790}
791 791
792int serio_register_driver(struct serio_driver *drv) 792int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
793{ 793{
794 int manual_bind = drv->manual_bind; 794 int manual_bind = drv->manual_bind;
795 int error; 795 int error;
796 796
797 drv->driver.bus = &serio_bus; 797 drv->driver.bus = &serio_bus;
798 drv->driver.owner = owner;
799 drv->driver.mod_name = mod_name;
798 800
799 /* 801 /*
800 * Temporarily disable automatic binding because probing 802 * Temporarily disable automatic binding because probing
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 4358a0a78eaa..c7db4032ef02 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -83,7 +83,7 @@
83 83
84 84
85struct ucb1400 { 85struct ucb1400 {
86 ac97_t *ac97; 86 struct snd_ac97 *ac97;
87 struct input_dev *ts_idev; 87 struct input_dev *ts_idev;
88 88
89 int irq; 89 int irq;
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index b10972ed0c9f..099f0afd394d 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -62,7 +62,7 @@ static struct kvm_stats_debugfs_item {
62 { "halt_exits", &kvm_stat.halt_exits }, 62 { "halt_exits", &kvm_stat.halt_exits },
63 { "request_irq", &kvm_stat.request_irq_exits }, 63 { "request_irq", &kvm_stat.request_irq_exits },
64 { "irq_exits", &kvm_stat.irq_exits }, 64 { "irq_exits", &kvm_stat.irq_exits },
65 { 0, 0 } 65 { NULL, NULL }
66}; 66};
67 67
68static struct dentry *debugfs_dir; 68static struct dentry *debugfs_dir;
@@ -205,7 +205,7 @@ static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
205 mutex_lock(&vcpu->mutex); 205 mutex_lock(&vcpu->mutex);
206 if (unlikely(!vcpu->vmcs)) { 206 if (unlikely(!vcpu->vmcs)) {
207 mutex_unlock(&vcpu->mutex); 207 mutex_unlock(&vcpu->mutex);
208 return 0; 208 return NULL;
209 } 209 }
210 return kvm_arch_ops->vcpu_load(vcpu); 210 return kvm_arch_ops->vcpu_load(vcpu);
211} 211}
@@ -257,9 +257,9 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
257 if (!dont || free->dirty_bitmap != dont->dirty_bitmap) 257 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
258 vfree(free->dirty_bitmap); 258 vfree(free->dirty_bitmap);
259 259
260 free->phys_mem = 0; 260 free->phys_mem = NULL;
261 free->npages = 0; 261 free->npages = 0;
262 free->dirty_bitmap = 0; 262 free->dirty_bitmap = NULL;
263} 263}
264 264
265static void kvm_free_physmem(struct kvm *kvm) 265static void kvm_free_physmem(struct kvm *kvm)
@@ -267,7 +267,7 @@ static void kvm_free_physmem(struct kvm *kvm)
267 int i; 267 int i;
268 268
269 for (i = 0; i < kvm->nmemslots; ++i) 269 for (i = 0; i < kvm->nmemslots; ++i)
270 kvm_free_physmem_slot(&kvm->memslots[i], 0); 270 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
271} 271}
272 272
273static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 273static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
@@ -640,11 +640,11 @@ raced:
640 640
641 /* Deallocate if slot is being removed */ 641 /* Deallocate if slot is being removed */
642 if (!npages) 642 if (!npages)
643 new.phys_mem = 0; 643 new.phys_mem = NULL;
644 644
645 /* Free page dirty bitmap if unneeded */ 645 /* Free page dirty bitmap if unneeded */
646 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) 646 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
647 new.dirty_bitmap = 0; 647 new.dirty_bitmap = NULL;
648 648
649 r = -ENOMEM; 649 r = -ENOMEM;
650 650
@@ -799,14 +799,14 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
799 && gfn < memslot->base_gfn + memslot->npages) 799 && gfn < memslot->base_gfn + memslot->npages)
800 return memslot; 800 return memslot;
801 } 801 }
802 return 0; 802 return NULL;
803} 803}
804EXPORT_SYMBOL_GPL(gfn_to_memslot); 804EXPORT_SYMBOL_GPL(gfn_to_memslot);
805 805
806void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 806void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
807{ 807{
808 int i; 808 int i;
809 struct kvm_memory_slot *memslot = 0; 809 struct kvm_memory_slot *memslot = NULL;
810 unsigned long rel_gfn; 810 unsigned long rel_gfn;
811 811
812 for (i = 0; i < kvm->nmemslots; ++i) { 812 for (i = 0; i < kvm->nmemslots; ++i) {
@@ -1778,6 +1778,7 @@ static long kvm_dev_ioctl(struct file *filp,
1778 unsigned int ioctl, unsigned long arg) 1778 unsigned int ioctl, unsigned long arg)
1779{ 1779{
1780 struct kvm *kvm = filp->private_data; 1780 struct kvm *kvm = filp->private_data;
1781 void __user *argp = (void __user *)arg;
1781 int r = -EINVAL; 1782 int r = -EINVAL;
1782 1783
1783 switch (ioctl) { 1784 switch (ioctl) {
@@ -1794,12 +1795,12 @@ static long kvm_dev_ioctl(struct file *filp,
1794 struct kvm_run kvm_run; 1795 struct kvm_run kvm_run;
1795 1796
1796 r = -EFAULT; 1797 r = -EFAULT;
1797 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) 1798 if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
1798 goto out; 1799 goto out;
1799 r = kvm_dev_ioctl_run(kvm, &kvm_run); 1800 r = kvm_dev_ioctl_run(kvm, &kvm_run);
1800 if (r < 0 && r != -EINTR) 1801 if (r < 0 && r != -EINTR)
1801 goto out; 1802 goto out;
1802 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) { 1803 if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
1803 r = -EFAULT; 1804 r = -EFAULT;
1804 goto out; 1805 goto out;
1805 } 1806 }
@@ -1809,13 +1810,13 @@ static long kvm_dev_ioctl(struct file *filp,
1809 struct kvm_regs kvm_regs; 1810 struct kvm_regs kvm_regs;
1810 1811
1811 r = -EFAULT; 1812 r = -EFAULT;
1812 if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs)) 1813 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
1813 goto out; 1814 goto out;
1814 r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs); 1815 r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
1815 if (r) 1816 if (r)
1816 goto out; 1817 goto out;
1817 r = -EFAULT; 1818 r = -EFAULT;
1818 if (copy_to_user((void *)arg, &kvm_regs, sizeof kvm_regs)) 1819 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
1819 goto out; 1820 goto out;
1820 r = 0; 1821 r = 0;
1821 break; 1822 break;
@@ -1824,7 +1825,7 @@ static long kvm_dev_ioctl(struct file *filp,
1824 struct kvm_regs kvm_regs; 1825 struct kvm_regs kvm_regs;
1825 1826
1826 r = -EFAULT; 1827 r = -EFAULT;
1827 if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs)) 1828 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
1828 goto out; 1829 goto out;
1829 r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs); 1830 r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
1830 if (r) 1831 if (r)
@@ -1836,13 +1837,13 @@ static long kvm_dev_ioctl(struct file *filp,
1836 struct kvm_sregs kvm_sregs; 1837 struct kvm_sregs kvm_sregs;
1837 1838
1838 r = -EFAULT; 1839 r = -EFAULT;
1839 if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs)) 1840 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1840 goto out; 1841 goto out;
1841 r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs); 1842 r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
1842 if (r) 1843 if (r)
1843 goto out; 1844 goto out;
1844 r = -EFAULT; 1845 r = -EFAULT;
1845 if (copy_to_user((void *)arg, &kvm_sregs, sizeof kvm_sregs)) 1846 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
1846 goto out; 1847 goto out;
1847 r = 0; 1848 r = 0;
1848 break; 1849 break;
@@ -1851,7 +1852,7 @@ static long kvm_dev_ioctl(struct file *filp,
1851 struct kvm_sregs kvm_sregs; 1852 struct kvm_sregs kvm_sregs;
1852 1853
1853 r = -EFAULT; 1854 r = -EFAULT;
1854 if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs)) 1855 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1855 goto out; 1856 goto out;
1856 r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs); 1857 r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
1857 if (r) 1858 if (r)
@@ -1863,13 +1864,13 @@ static long kvm_dev_ioctl(struct file *filp,
1863 struct kvm_translation tr; 1864 struct kvm_translation tr;
1864 1865
1865 r = -EFAULT; 1866 r = -EFAULT;
1866 if (copy_from_user(&tr, (void *)arg, sizeof tr)) 1867 if (copy_from_user(&tr, argp, sizeof tr))
1867 goto out; 1868 goto out;
1868 r = kvm_dev_ioctl_translate(kvm, &tr); 1869 r = kvm_dev_ioctl_translate(kvm, &tr);
1869 if (r) 1870 if (r)
1870 goto out; 1871 goto out;
1871 r = -EFAULT; 1872 r = -EFAULT;
1872 if (copy_to_user((void *)arg, &tr, sizeof tr)) 1873 if (copy_to_user(argp, &tr, sizeof tr))
1873 goto out; 1874 goto out;
1874 r = 0; 1875 r = 0;
1875 break; 1876 break;
@@ -1878,7 +1879,7 @@ static long kvm_dev_ioctl(struct file *filp,
1878 struct kvm_interrupt irq; 1879 struct kvm_interrupt irq;
1879 1880
1880 r = -EFAULT; 1881 r = -EFAULT;
1881 if (copy_from_user(&irq, (void *)arg, sizeof irq)) 1882 if (copy_from_user(&irq, argp, sizeof irq))
1882 goto out; 1883 goto out;
1883 r = kvm_dev_ioctl_interrupt(kvm, &irq); 1884 r = kvm_dev_ioctl_interrupt(kvm, &irq);
1884 if (r) 1885 if (r)
@@ -1890,7 +1891,7 @@ static long kvm_dev_ioctl(struct file *filp,
1890 struct kvm_debug_guest dbg; 1891 struct kvm_debug_guest dbg;
1891 1892
1892 r = -EFAULT; 1893 r = -EFAULT;
1893 if (copy_from_user(&dbg, (void *)arg, sizeof dbg)) 1894 if (copy_from_user(&dbg, argp, sizeof dbg))
1894 goto out; 1895 goto out;
1895 r = kvm_dev_ioctl_debug_guest(kvm, &dbg); 1896 r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
1896 if (r) 1897 if (r)
@@ -1902,7 +1903,7 @@ static long kvm_dev_ioctl(struct file *filp,
1902 struct kvm_memory_region kvm_mem; 1903 struct kvm_memory_region kvm_mem;
1903 1904
1904 r = -EFAULT; 1905 r = -EFAULT;
1905 if (copy_from_user(&kvm_mem, (void *)arg, sizeof kvm_mem)) 1906 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1906 goto out; 1907 goto out;
1907 r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem); 1908 r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
1908 if (r) 1909 if (r)
@@ -1913,7 +1914,7 @@ static long kvm_dev_ioctl(struct file *filp,
1913 struct kvm_dirty_log log; 1914 struct kvm_dirty_log log;
1914 1915
1915 r = -EFAULT; 1916 r = -EFAULT;
1916 if (copy_from_user(&log, (void *)arg, sizeof log)) 1917 if (copy_from_user(&log, argp, sizeof log))
1917 goto out; 1918 goto out;
1918 r = kvm_dev_ioctl_get_dirty_log(kvm, &log); 1919 r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
1919 if (r) 1920 if (r)
@@ -1921,13 +1922,13 @@ static long kvm_dev_ioctl(struct file *filp,
1921 break; 1922 break;
1922 } 1923 }
1923 case KVM_GET_MSRS: 1924 case KVM_GET_MSRS:
1924 r = msr_io(kvm, (void __user *)arg, get_msr, 1); 1925 r = msr_io(kvm, argp, get_msr, 1);
1925 break; 1926 break;
1926 case KVM_SET_MSRS: 1927 case KVM_SET_MSRS:
1927 r = msr_io(kvm, (void __user *)arg, do_set_msr, 0); 1928 r = msr_io(kvm, argp, do_set_msr, 0);
1928 break; 1929 break;
1929 case KVM_GET_MSR_INDEX_LIST: { 1930 case KVM_GET_MSR_INDEX_LIST: {
1930 struct kvm_msr_list __user *user_msr_list = (void __user *)arg; 1931 struct kvm_msr_list __user *user_msr_list = argp;
1931 struct kvm_msr_list msr_list; 1932 struct kvm_msr_list msr_list;
1932 unsigned n; 1933 unsigned n;
1933 1934
@@ -2014,7 +2015,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2014 * in vmx root mode. 2015 * in vmx root mode.
2015 */ 2016 */
2016 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2017 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2017 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); 2018 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2018 } 2019 }
2019 return NOTIFY_OK; 2020 return NOTIFY_OK;
2020} 2021}
@@ -2028,7 +2029,7 @@ static __init void kvm_init_debug(void)
2028{ 2029{
2029 struct kvm_stats_debugfs_item *p; 2030 struct kvm_stats_debugfs_item *p;
2030 2031
2031 debugfs_dir = debugfs_create_dir("kvm", 0); 2032 debugfs_dir = debugfs_create_dir("kvm", NULL);
2032 for (p = debugfs_entries; p->name; ++p) 2033 for (p = debugfs_entries; p->name; ++p)
2033 p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir, 2034 p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir,
2034 p->data); 2035 p->data);
@@ -2069,7 +2070,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
2069 if (r < 0) 2070 if (r < 0)
2070 return r; 2071 return r;
2071 2072
2072 on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1); 2073 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1);
2073 register_reboot_notifier(&kvm_reboot_notifier); 2074 register_reboot_notifier(&kvm_reboot_notifier);
2074 2075
2075 kvm_chardev_ops.owner = module; 2076 kvm_chardev_ops.owner = module;
@@ -2084,7 +2085,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
2084 2085
2085out_free: 2086out_free:
2086 unregister_reboot_notifier(&kvm_reboot_notifier); 2087 unregister_reboot_notifier(&kvm_reboot_notifier);
2087 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); 2088 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2088 kvm_arch_ops->hardware_unsetup(); 2089 kvm_arch_ops->hardware_unsetup();
2089 return r; 2090 return r;
2090} 2091}
@@ -2094,7 +2095,7 @@ void kvm_exit_arch(void)
2094 misc_deregister(&kvm_dev); 2095 misc_deregister(&kvm_dev);
2095 2096
2096 unregister_reboot_notifier(&kvm_reboot_notifier); 2097 unregister_reboot_notifier(&kvm_reboot_notifier);
2097 on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); 2098 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2098 kvm_arch_ops->hardware_unsetup(); 2099 kvm_arch_ops->hardware_unsetup();
2099 kvm_arch_ops = NULL; 2100 kvm_arch_ops = NULL;
2100} 2101}
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 22c426cd8cb2..be793770f31b 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -333,7 +333,7 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
333 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) 333 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
334 ; 334 ;
335 desc->shadow_ptes[i] = desc->shadow_ptes[j]; 335 desc->shadow_ptes[i] = desc->shadow_ptes[j];
336 desc->shadow_ptes[j] = 0; 336 desc->shadow_ptes[j] = NULL;
337 if (j != 0) 337 if (j != 0)
338 return; 338 return;
339 if (!prev_desc && !desc->more) 339 if (!prev_desc && !desc->more)
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index c79df79307ed..85f61dd1e936 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -274,7 +274,7 @@ static void svm_hardware_disable(void *garbage)
274 wrmsrl(MSR_VM_HSAVE_PA, 0); 274 wrmsrl(MSR_VM_HSAVE_PA, 0);
275 rdmsrl(MSR_EFER, efer); 275 rdmsrl(MSR_EFER, efer);
276 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); 276 wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
277 per_cpu(svm_data, raw_smp_processor_id()) = 0; 277 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
278 __free_page(svm_data->save_area); 278 __free_page(svm_data->save_area);
279 kfree(svm_data); 279 kfree(svm_data);
280 } 280 }
@@ -642,7 +642,7 @@ static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
642 case VCPU_SREG_LDTR: return &save->ldtr; 642 case VCPU_SREG_LDTR: return &save->ldtr;
643 } 643 }
644 BUG(); 644 BUG();
645 return 0; 645 return NULL;
646} 646}
647 647
648static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) 648static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
@@ -934,7 +934,7 @@ static int io_get_override(struct kvm_vcpu *vcpu,
934 return 0; 934 return 0;
935 935
936 *addr_override = 0; 936 *addr_override = 0;
937 *seg = 0; 937 *seg = NULL;
938 for (i = 0; i < ins_length; i++) 938 for (i = 0; i < ins_length; i++)
939 switch (inst[i]) { 939 switch (inst[i]) {
940 case 0xf0: 940 case 0xf0:
@@ -1087,7 +1087,7 @@ static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1087 1087
1088static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1088static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1089{ 1089{
1090 if (emulate_instruction(vcpu, 0, 0, 0) != EMULATE_DONE) 1090 if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
1091 printk(KERN_ERR "%s: failed\n", __FUNCTION__); 1091 printk(KERN_ERR "%s: failed\n", __FUNCTION__);
1092 return 1; 1092 return 1;
1093} 1093}
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 27f2751c3baa..27e05a77e21a 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -98,7 +98,7 @@ static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
98 for (i = 0; i < vcpu->nmsrs; ++i) 98 for (i = 0; i < vcpu->nmsrs; ++i)
99 if (vcpu->guest_msrs[i].index == msr) 99 if (vcpu->guest_msrs[i].index == msr)
100 return &vcpu->guest_msrs[i]; 100 return &vcpu->guest_msrs[i];
101 return 0; 101 return NULL;
102} 102}
103 103
104static void vmcs_clear(struct vmcs *vmcs) 104static void vmcs_clear(struct vmcs *vmcs)
@@ -1116,6 +1116,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1116 1116
1117 if (rdmsr_safe(index, &data_low, &data_high) < 0) 1117 if (rdmsr_safe(index, &data_low, &data_high) < 0)
1118 continue; 1118 continue;
1119 if (wrmsr_safe(index, data_low, data_high) < 0)
1120 continue;
1119 data = data_low | ((u64)data_high << 32); 1121 data = data_low | ((u64)data_high << 32);
1120 vcpu->host_msrs[j].index = index; 1122 vcpu->host_msrs[j].index = index;
1121 vcpu->host_msrs[j].reserved = 0; 1123 vcpu->host_msrs[j].reserved = 0;
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a9e747c39791..1a86387e23be 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -1,6 +1,6 @@
1 1
2menu "Macintosh device drivers" 2menu "Macintosh device drivers"
3 depends on PPC || MAC 3 depends on PPC || MAC || X86
4 4
5config ADB 5config ADB
6 bool "Apple Desktop Bus (ADB) support" 6 bool "Apple Desktop Bus (ADB) support"
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 5ed41fe84e57..f83fad2a3ff4 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -171,11 +171,11 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm)
171 /* Make sure dbdma is reset */ 171 /* Make sure dbdma is reset */
172 DBDMA_DO_RESET(rm->dma_regs); 172 DBDMA_DO_RESET(rm->dma_regs);
173 173
174 pr_debug("rackmeter: mark offset=0x%lx\n", 174 pr_debug("rackmeter: mark offset=0x%zx\n",
175 offsetof(struct rackmeter_dma, mark)); 175 offsetof(struct rackmeter_dma, mark));
176 pr_debug("rackmeter: buf1 offset=0x%lx\n", 176 pr_debug("rackmeter: buf1 offset=0x%zx\n",
177 offsetof(struct rackmeter_dma, buf1)); 177 offsetof(struct rackmeter_dma, buf1));
178 pr_debug("rackmeter: buf2 offset=0x%lx\n", 178 pr_debug("rackmeter: buf2 offset=0x%zx\n",
179 offsetof(struct rackmeter_dma, buf2)); 179 offsetof(struct rackmeter_dma, buf2));
180 180
181 /* Prepare 4 dbdma commands for the 2 buffers */ 181 /* Prepare 4 dbdma commands for the 2 buffers */
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index e947af982f93..94c117ef20c1 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -94,8 +94,6 @@ static int wf_thread_func(void *data)
94 DBG("wf: thread started\n"); 94 DBG("wf: thread started\n");
95 95
96 while(!kthread_should_stop()) { 96 while(!kthread_should_stop()) {
97 try_to_freeze();
98
99 if (time_after_eq(jiffies, next)) { 97 if (time_after_eq(jiffies, next)) {
100 wf_notify(WF_EVENT_TICK, NULL); 98 wf_notify(WF_EVENT_TICK, NULL);
101 if (wf_overtemp) { 99 if (wf_overtemp) {
@@ -118,8 +116,8 @@ static int wf_thread_func(void *data)
118 if (delay <= HZ) 116 if (delay <= HZ)
119 schedule_timeout_interruptible(delay); 117 schedule_timeout_interruptible(delay);
120 118
121 /* there should be no signal, but oh well */ 119 /* there should be no non-suspend signal, but oh well */
122 if (signal_pending(current)) { 120 if (signal_pending(current) && !try_to_freeze()) {
123 printk(KERN_WARNING "windfarm: thread got sigl !\n"); 121 printk(KERN_WARNING "windfarm: thread got sigl !\n");
124 break; 122 break;
125 } 123 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 11108165e264..059704fbb753 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1160,6 +1160,22 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1160 return 0; 1160 return 0;
1161 } 1161 }
1162 1162
1163 if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) {
1164 DEFINE_WAIT(__wait);
1165 /* note that it is safe to do the prepare_to_wait
1166 * after the test as long as we do it before dropping
1167 * the spinlock.
1168 */
1169 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1170 TASK_UNINTERRUPTIBLE);
1171 spin_unlock_irq(&bitmap->lock);
1172 bitmap->mddev->queue
1173 ->unplug_fn(bitmap->mddev->queue);
1174 schedule();
1175 finish_wait(&bitmap->overflow_wait, &__wait);
1176 continue;
1177 }
1178
1163 switch(*bmc) { 1179 switch(*bmc) {
1164 case 0: 1180 case 0:
1165 bitmap_file_set_bit(bitmap, offset); 1181 bitmap_file_set_bit(bitmap, offset);
@@ -1169,7 +1185,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1169 case 1: 1185 case 1:
1170 *bmc = 2; 1186 *bmc = 2;
1171 } 1187 }
1172 BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX); 1188
1173 (*bmc)++; 1189 (*bmc)++;
1174 1190
1175 spin_unlock_irq(&bitmap->lock); 1191 spin_unlock_irq(&bitmap->lock);
@@ -1207,6 +1223,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1207 if (!success && ! (*bmc & NEEDED_MASK)) 1223 if (!success && ! (*bmc & NEEDED_MASK))
1208 *bmc |= NEEDED_MASK; 1224 *bmc |= NEEDED_MASK;
1209 1225
1226 if ((*bmc & COUNTER_MAX) == COUNTER_MAX)
1227 wake_up(&bitmap->overflow_wait);
1228
1210 (*bmc)--; 1229 (*bmc)--;
1211 if (*bmc <= 2) { 1230 if (*bmc <= 2) {
1212 set_page_attr(bitmap, 1231 set_page_attr(bitmap,
@@ -1431,6 +1450,7 @@ int bitmap_create(mddev_t *mddev)
1431 spin_lock_init(&bitmap->lock); 1450 spin_lock_init(&bitmap->lock);
1432 atomic_set(&bitmap->pending_writes, 0); 1451 atomic_set(&bitmap->pending_writes, 0);
1433 init_waitqueue_head(&bitmap->write_wait); 1452 init_waitqueue_head(&bitmap->write_wait);
1453 init_waitqueue_head(&bitmap->overflow_wait);
1434 1454
1435 bitmap->mddev = mddev; 1455 bitmap->mddev = mddev;
1436 1456
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 467c16982d02..11c3d7bfa797 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2620,7 +2620,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
2620 } 2620 }
2621 bi = conf->retry_read_aligned_list; 2621 bi = conf->retry_read_aligned_list;
2622 if(bi) { 2622 if(bi) {
2623 conf->retry_read_aligned = bi->bi_next; 2623 conf->retry_read_aligned_list = bi->bi_next;
2624 bi->bi_next = NULL; 2624 bi->bi_next = NULL;
2625 bi->bi_phys_segments = 1; /* biased count of active stripes */ 2625 bi->bi_phys_segments = 1; /* biased count of active stripes */
2626 bi->bi_hw_segments = 0; /* count of processed stripes */ 2626 bi->bi_hw_segments = 0; /* count of processed stripes */
@@ -2669,6 +2669,27 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error)
2669 return 0; 2669 return 0;
2670} 2670}
2671 2671
2672static int bio_fits_rdev(struct bio *bi)
2673{
2674 request_queue_t *q = bdev_get_queue(bi->bi_bdev);
2675
2676 if ((bi->bi_size>>9) > q->max_sectors)
2677 return 0;
2678 blk_recount_segments(q, bi);
2679 if (bi->bi_phys_segments > q->max_phys_segments ||
2680 bi->bi_hw_segments > q->max_hw_segments)
2681 return 0;
2682
2683 if (q->merge_bvec_fn)
2684 /* it's too hard to apply the merge_bvec_fn at this stage,
2685 * just just give up
2686 */
2687 return 0;
2688
2689 return 1;
2690}
2691
2692
2672static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) 2693static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
2673{ 2694{
2674 mddev_t *mddev = q->queuedata; 2695 mddev_t *mddev = q->queuedata;
@@ -2715,6 +2736,13 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
2715 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 2736 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
2716 align_bi->bi_sector += rdev->data_offset; 2737 align_bi->bi_sector += rdev->data_offset;
2717 2738
2739 if (!bio_fits_rdev(align_bi)) {
2740 /* too big in some way */
2741 bio_put(align_bi);
2742 rdev_dec_pending(rdev, mddev);
2743 return 0;
2744 }
2745
2718 spin_lock_irq(&conf->device_lock); 2746 spin_lock_irq(&conf->device_lock);
2719 wait_event_lock_irq(conf->wait_for_stripe, 2747 wait_event_lock_irq(conf->wait_for_stripe,
2720 conf->quiesce == 0, 2748 conf->quiesce == 0,
@@ -3107,7 +3135,9 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3107 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3135 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
3108 3136
3109 for (; logical_sector < last_sector; 3137 for (; logical_sector < last_sector;
3110 logical_sector += STRIPE_SECTORS, scnt++) { 3138 logical_sector += STRIPE_SECTORS,
3139 sector += STRIPE_SECTORS,
3140 scnt++) {
3111 3141
3112 if (scnt < raid_bio->bi_hw_segments) 3142 if (scnt < raid_bio->bi_hw_segments)
3113 /* already done this stripe */ 3143 /* already done this stripe */
@@ -3123,7 +3153,13 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
3123 } 3153 }
3124 3154
3125 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3155 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
3126 add_stripe_bio(sh, raid_bio, dd_idx, 0); 3156 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
3157 release_stripe(sh);
3158 raid_bio->bi_hw_segments = scnt;
3159 conf->retry_read_aligned = raid_bio;
3160 return handled;
3161 }
3162
3127 handle_stripe(sh, NULL); 3163 handle_stripe(sh, NULL);
3128 release_stripe(sh); 3164 release_stripe(sh);
3129 handled++; 3165 handled++;
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index f51e02fe3655..0e948a5c5a03 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -698,7 +698,6 @@ IR_KEYTAB_TYPE ir_codes_pinnacle_grey[IR_KEYTAB_SIZE] = {
698 [ 0x29 ] = KEY_TEXT, 698 [ 0x29 ] = KEY_TEXT,
699 [ 0x2a ] = KEY_MEDIA, 699 [ 0x2a ] = KEY_MEDIA,
700 [ 0x18 ] = KEY_EPG, 700 [ 0x18 ] = KEY_EPG,
701 [ 0x27 ] = KEY_RECORD,
702}; 701};
703 702
704EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey); 703EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey);
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 7243337b771a..bdd6301d2a47 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1072,7 +1072,7 @@ static int usbvision_v4l2_ioctl(struct inode *inode, struct file *file,
1072} 1072}
1073 1073
1074 1074
1075static ssize_t usbvision_v4l2_read(struct file *file, char *buf, 1075static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf,
1076 size_t count, loff_t *ppos) 1076 size_t count, loff_t *ppos)
1077{ 1077{
1078 struct video_device *dev = video_devdata(file); 1078 struct video_device *dev = video_devdata(file);
diff --git a/drivers/media/video/zc0301/zc0301_sensor.h b/drivers/media/video/zc0301/zc0301_sensor.h
index 4363a915b1f4..3daf049a288a 100644
--- a/drivers/media/video/zc0301/zc0301_sensor.h
+++ b/drivers/media/video/zc0301/zc0301_sensor.h
@@ -75,7 +75,6 @@ static const struct usb_device_id zc0301_id_table[] = { \
75 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \ 75 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
76 { ZC0301_USB_DEVICE(0x055f, 0xd003, 0xff), }, /* TAS5130 */ \ 76 { ZC0301_USB_DEVICE(0x055f, 0xd003, 0xff), }, /* TAS5130 */ \
77 { ZC0301_USB_DEVICE(0x055f, 0xd004, 0xff), }, /* TAS5130 */ \ 77 { ZC0301_USB_DEVICE(0x055f, 0xd004, 0xff), }, /* TAS5130 */ \
78 { ZC0301_USB_DEVICE(0x046d, 0x08ae, 0xff), }, /* PAS202 */ \
79 { ZC0301_USB_DEVICE(0x0ac8, 0x0301, 0xff), }, \ 78 { ZC0301_USB_DEVICE(0x0ac8, 0x0301, 0xff), }, \
80 { ZC0301_USB_DEVICE(0x0ac8, 0x301b, 0xff), }, /* PB-0330/HV7131 */ \ 79 { ZC0301_USB_DEVICE(0x0ac8, 0x301b, 0xff), }, /* PB-0330/HV7131 */ \
81 { ZC0301_USB_DEVICE(0x0ac8, 0x303b, 0xff), }, /* PB-0330 */ \ 80 { ZC0301_USB_DEVICE(0x0ac8, 0x303b, 0xff), }, /* PB-0330 */ \
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 00db31c314e0..bedae4ad3f74 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -42,7 +42,7 @@ config SGI_IOC4
42 42
43config TIFM_CORE 43config TIFM_CORE
44 tristate "TI Flash Media interface support (EXPERIMENTAL)" 44 tristate "TI Flash Media interface support (EXPERIMENTAL)"
45 depends on EXPERIMENTAL 45 depends on EXPERIMENTAL && PCI
46 help 46 help
47 If you want support for Texas Instruments(R) Flash Media adapters 47 If you want support for Texas Instruments(R) Flash Media adapters
48 you should select this option and then also choose an appropriate 48 you should select this option and then also choose an appropriate
@@ -69,6 +69,25 @@ config TIFM_7XX1
69 To compile this driver as a module, choose M here: the module will 69 To compile this driver as a module, choose M here: the module will
70 be called tifm_7xx1. 70 be called tifm_7xx1.
71 71
72config ASUS_LAPTOP
73 tristate "Asus Laptop Extras (EXPERIMENTAL)"
74 depends on X86
75 depends on ACPI
76 depends on EXPERIMENTAL && !ACPI_ASUS
77 depends on LEDS_CLASS
78 depends on BACKLIGHT_CLASS_DEVICE
79 ---help---
80 This is the new Linux driver for Asus laptops. It may also support some
81 MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
82 standard ACPI events that go through /proc/acpi/events. It also adds
83 support for video output switching, LCD backlight control, Bluetooth and
84 Wlan control, and most importantly, allows you to blink those fancy LEDs.
85
86 For more information and a userspace daemon for handling the extra
87 buttons see <http://acpi4asus.sf.net/>.
88
89 If you have an ACPI-compatible ASUS laptop, say Y or M here.
90
72config MSI_LAPTOP 91config MSI_LAPTOP
73 tristate "MSI Laptop Extras" 92 tristate "MSI Laptop Extras"
74 depends on X86 93 depends on X86
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c9e98ab021c5..35da53c409c0 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,6 +6,7 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
6obj-$(CONFIG_IBM_ASM) += ibmasm/ 6obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 8obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
9obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
9obj-$(CONFIG_LKDTM) += lkdtm.o 10obj-$(CONFIG_LKDTM) += lkdtm.o
10obj-$(CONFIG_TIFM_CORE) += tifm_core.o 11obj-$(CONFIG_TIFM_CORE) += tifm_core.o
11obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 12obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
new file mode 100644
index 000000000000..861c39935f99
--- /dev/null
+++ b/drivers/misc/asus-laptop.c
@@ -0,0 +1,1165 @@
1/*
2 * asus-laptop.c - Asus Laptop Support
3 *
4 *
5 * Copyright (C) 2002-2005 Julien Lerouge, 2003-2006 Karol Kozimor
6 * Copyright (C) 2006 Corentin Chary
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 *
23 * The development page for this driver is located at
24 * http://sourceforge.net/projects/acpi4asus/
25 *
26 * Credits:
27 * Pontus Fuchs - Helper functions, cleanup
28 * Johann Wiesner - Small compile fixes
29 * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
30 * Eric Burghard - LED display support for W1N
31 * Josh Green - Light Sens support
32 * Thomas Tuttle - His first patch for led support was very helpfull
33 *
34 */
35
36#include <linux/autoconf.h>
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/types.h>
41#include <linux/err.h>
42#include <linux/proc_fs.h>
43#include <linux/backlight.h>
44#include <linux/fb.h>
45#include <linux/leds.h>
46#include <linux/platform_device.h>
47#include <acpi/acpi_drivers.h>
48#include <acpi/acpi_bus.h>
49#include <asm/uaccess.h>
50
51#define ASUS_LAPTOP_VERSION "0.40"
52
53#define ASUS_HOTK_NAME "Asus Laptop Support"
54#define ASUS_HOTK_CLASS "hotkey"
55#define ASUS_HOTK_DEVICE_NAME "Hotkey"
56#define ASUS_HOTK_HID "ATK0100"
57#define ASUS_HOTK_FILE "asus-laptop"
58#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
59
60/*
61 * Some events we use, same for all Asus
62 */
63#define ATKD_BR_UP 0x10
64#define ATKD_BR_DOWN 0x20
65#define ATKD_LCD_ON 0x33
66#define ATKD_LCD_OFF 0x34
67
68/*
69 * Known bits returned by \_SB.ATKD.HWRS
70 */
71#define WL_HWRS 0x80
72#define BT_HWRS 0x100
73
74/*
75 * Flags for hotk status
76 * WL_ON and BT_ON are also used for wireless_status()
77 */
78#define WL_ON 0x01 //internal Wifi
79#define BT_ON 0x02 //internal Bluetooth
80#define MLED_ON 0x04 //mail LED
81#define TLED_ON 0x08 //touchpad LED
82#define RLED_ON 0x10 //Record LED
83#define PLED_ON 0x20 //Phone LED
84#define LCD_ON 0x40 //LCD backlight
85
86#define ASUS_LOG ASUS_HOTK_FILE ": "
87#define ASUS_ERR KERN_ERR ASUS_LOG
88#define ASUS_WARNING KERN_WARNING ASUS_LOG
89#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
90#define ASUS_INFO KERN_INFO ASUS_LOG
91#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
92
93MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
94MODULE_DESCRIPTION(ASUS_HOTK_NAME);
95MODULE_LICENSE("GPL");
96
97#define ASUS_HANDLE(object, paths...) \
98 static acpi_handle object##_handle = NULL; \
99 static char *object##_paths[] = { paths }
100
101/* LED */
102ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED");
103ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED");
104ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */
105ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */
106
107/* LEDD */
108ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
109
110/* Bluetooth and WLAN
111 * WLED and BLED are not handled like other XLED, because in some dsdt
112 * they also control the WLAN/Bluetooth device.
113 */
114ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED");
115ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED");
116ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */
117
118/* Brightness */
119ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV");
120ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV");
121
122/* Backlight */
123ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
124 "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
125 "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
126 "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
127 "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
128 "\\_SB.PCI0.PX40.Q10", /* S1x */
129 "\\Q10"); /* A2x, L2D, L3D, M2E */
130
131/* Display */
132ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
133ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
134 M6A M6V VX-1 V6J V6V W3Z */
135 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
136 S5A M5A z33A W1Jc W2V */
137 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
138 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
139 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
140 "\\_SB.PCI0.VGA.GETD", /* Z96F */
141 "\\ACTD", /* A2D */
142 "\\ADVG", /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
143 "\\DNXT", /* P30 */
144 "\\INFB", /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
145 "\\SSTE"); /* A3F A6F A3N A3L M6N W3N W6A */
146
147ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
148ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
149
150/*
151 * This is the main structure, we can use it to store anything interesting
152 * about the hotk device
153 */
154struct asus_hotk {
155 char *name; //laptop name
156 struct acpi_device *device; //the device we are in
157 acpi_handle handle; //the handle of the hotk device
158 char status; //status of the hotk, for LEDs, ...
159 u32 ledd_status; //status of the LED display
160 u8 light_level; //light sensor level
161 u8 light_switch; //light sensor switch value
162 u16 event_count[128]; //count for each event TODO make this better
163};
164
165/*
166 * This header is made available to allow proper configuration given model,
167 * revision number , ... this info cannot go in struct asus_hotk because it is
168 * available before the hotk
169 */
170static struct acpi_table_header *asus_info;
171
172/* The actual device the driver binds to */
173static struct asus_hotk *hotk;
174
175/*
176 * The hotkey driver declaration
177 */
178static int asus_hotk_add(struct acpi_device *device);
179static int asus_hotk_remove(struct acpi_device *device, int type);
180static struct acpi_driver asus_hotk_driver = {
181 .name = ASUS_HOTK_NAME,
182 .class = ASUS_HOTK_CLASS,
183 .ids = ASUS_HOTK_HID,
184 .ops = {
185 .add = asus_hotk_add,
186 .remove = asus_hotk_remove,
187 },
188};
189
190/* The backlight device /sys/class/backlight */
191static struct backlight_device *asus_backlight_device;
192
193/*
194 * The backlight class declaration
195 */
196static int read_brightness(struct backlight_device *bd);
197static int update_bl_status(struct backlight_device *bd);
198static struct backlight_properties asusbl_data = {
199 .owner = THIS_MODULE,
200 .get_brightness = read_brightness,
201 .update_status = update_bl_status,
202 .max_brightness = 15,
203};
204
205/* These functions actually update the LED's, and are called from a
206 * workqueue. By doing this as separate work rather than when the LED
207 * subsystem asks, we avoid messing with the Asus ACPI stuff during a
208 * potentially bad time, such as a timer interrupt. */
209static struct workqueue_struct *led_workqueue;
210
211#define ASUS_LED(object, ledname) \
212 static void object##_led_set(struct led_classdev *led_cdev, \
213 enum led_brightness value); \
214 static void object##_led_update(struct work_struct *ignored); \
215 static int object##_led_wk; \
216 DECLARE_WORK(object##_led_work, object##_led_update); \
217 static struct led_classdev object##_led = { \
218 .name = "asus:" ledname, \
219 .brightness_set = object##_led_set, \
220 }
221
222ASUS_LED(mled, "mail");
223ASUS_LED(tled, "touchpad");
224ASUS_LED(rled, "record");
225ASUS_LED(pled, "phone");
226
227/*
228 * This function evaluates an ACPI method, given an int as parameter, the
229 * method is searched within the scope of the handle, can be NULL. The output
230 * of the method is written is output, which can also be NULL
231 *
232 * returns 1 if write is successful, 0 else.
233 */
234static int write_acpi_int(acpi_handle handle, const char *method, int val,
235 struct acpi_buffer *output)
236{
237 struct acpi_object_list params; //list of input parameters (an int here)
238 union acpi_object in_obj; //the only param we use
239 acpi_status status;
240
241 params.count = 1;
242 params.pointer = &in_obj;
243 in_obj.type = ACPI_TYPE_INTEGER;
244 in_obj.integer.value = val;
245
246 status = acpi_evaluate_object(handle, (char *)method, &params, output);
247 return (status == AE_OK);
248}
249
250static int read_acpi_int(acpi_handle handle, const char *method, int *val,
251 struct acpi_object_list *params)
252{
253 struct acpi_buffer output;
254 union acpi_object out_obj;
255 acpi_status status;
256
257 output.length = sizeof(out_obj);
258 output.pointer = &out_obj;
259
260 status = acpi_evaluate_object(handle, (char *)method, params, &output);
261 *val = out_obj.integer.value;
262 return (status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER);
263}
264
265static int read_wireless_status(int mask)
266{
267 int status;
268
269 if (!wireless_status_handle)
270 return (hotk->status & mask) ? 1 : 0;
271
272 if (read_acpi_int(wireless_status_handle, NULL, &status, NULL)) {
273 return (status & mask) ? 1 : 0;
274 } else
275 printk(ASUS_WARNING "Error reading Wireless status\n");
276
277 return (hotk->status & mask) ? 1 : 0;
278}
279
280/* Generic LED functions */
281static int read_status(int mask)
282{
283 /* There is a special method for both wireless devices */
284 if (mask == BT_ON || mask == WL_ON)
285 return read_wireless_status(mask);
286
287 return (hotk->status & mask) ? 1 : 0;
288}
289
290static void write_status(acpi_handle handle, int out, int mask, int invert)
291{
292 hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask);
293
294 if (invert) /* invert target value */
295 out = !out & 0x1;
296
297 if (handle && !write_acpi_int(handle, NULL, out, NULL))
298 printk(ASUS_WARNING " write failed\n");
299}
300
301/* /sys/class/led handlers */
302#define ASUS_LED_HANDLER(object, mask, invert) \
303 static void object##_led_set(struct led_classdev *led_cdev, \
304 enum led_brightness value) \
305 { \
306 object##_led_wk = value; \
307 queue_work(led_workqueue, &object##_led_work); \
308 } \
309 static void object##_led_update(struct work_struct *ignored) \
310 { \
311 int value = object##_led_wk; \
312 write_status(object##_set_handle, value, (mask), (invert)); \
313 }
314
315ASUS_LED_HANDLER(mled, MLED_ON, 1);
316ASUS_LED_HANDLER(pled, PLED_ON, 0);
317ASUS_LED_HANDLER(rled, RLED_ON, 0);
318ASUS_LED_HANDLER(tled, TLED_ON, 0);
319
320static int get_lcd_state(void)
321{
322 return read_status(LCD_ON);
323}
324
325static int set_lcd_state(int value)
326{
327 int lcd = 0;
328 acpi_status status = 0;
329
330 lcd = value ? 1 : 0;
331
332 if (lcd == get_lcd_state())
333 return 0;
334
335 if (lcd_switch_handle) {
336 status = acpi_evaluate_object(lcd_switch_handle,
337 NULL, NULL, NULL);
338
339 if (ACPI_FAILURE(status))
340 printk(ASUS_WARNING "Error switching LCD\n");
341 }
342
343 write_status(NULL, lcd, LCD_ON, 0);
344 return 0;
345}
346
347static void lcd_blank(int blank)
348{
349 struct backlight_device *bd = asus_backlight_device;
350
351 if (bd) {
352 down(&bd->sem);
353 if (likely(bd->props)) {
354 bd->props->power = blank;
355 if (likely(bd->props->update_status))
356 bd->props->update_status(bd);
357 }
358 up(&bd->sem);
359 }
360}
361
362static int read_brightness(struct backlight_device *bd)
363{
364 int value;
365
366 if (!read_acpi_int(brightness_get_handle, NULL, &value, NULL))
367 printk(ASUS_WARNING "Error reading brightness\n");
368
369 return value;
370}
371
372static int set_brightness(struct backlight_device *bd, int value)
373{
374 int ret = 0;
375
376 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
377 /* 0 <= value <= 15 */
378
379 if (!write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
380 printk(ASUS_WARNING "Error changing brightness\n");
381 ret = -EIO;
382 }
383
384 return ret;
385}
386
387static int update_bl_status(struct backlight_device *bd)
388{
389 int rv;
390 int value = bd->props->brightness;
391
392 rv = set_brightness(bd, value);
393 if (rv)
394 return rv;
395
396 value = (bd->props->power == FB_BLANK_UNBLANK) ? 1 : 0;
397 return set_lcd_state(value);
398}
399
400/*
401 * Platform device handlers
402 */
403
404/*
405 * We write our info in page, we begin at offset off and cannot write more
406 * than count bytes. We set eof to 1 if we handle those 2 values. We return the
407 * number of bytes written in page
408 */
409static ssize_t show_infos(struct device *dev,
410 struct device_attribute *attr, char *page)
411{
412 int len = 0;
413 int temp;
414 char buf[16]; //enough for all info
415 /*
416 * We use the easy way, we don't care of off and count, so we don't set eof
417 * to 1
418 */
419
420 len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n");
421 len += sprintf(page + len, "Model reference : %s\n", hotk->name);
422 /*
423 * The SFUN method probably allows the original driver to get the list
424 * of features supported by a given model. For now, 0x0100 or 0x0800
425 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
426 * The significance of others is yet to be found.
427 */
428 if (read_acpi_int(hotk->handle, "SFUN", &temp, NULL))
429 len +=
430 sprintf(page + len, "SFUN value : 0x%04x\n", temp);
431 /*
432 * Another value for userspace: the ASYM method returns 0x02 for
433 * battery low and 0x04 for battery critical, its readings tend to be
434 * more accurate than those provided by _BST.
435 * Note: since not all the laptops provide this method, errors are
436 * silently ignored.
437 */
438 if (read_acpi_int(hotk->handle, "ASYM", &temp, NULL))
439 len +=
440 sprintf(page + len, "ASYM value : 0x%04x\n", temp);
441 if (asus_info) {
442 snprintf(buf, 16, "%d", asus_info->length);
443 len += sprintf(page + len, "DSDT length : %s\n", buf);
444 snprintf(buf, 16, "%d", asus_info->checksum);
445 len += sprintf(page + len, "DSDT checksum : %s\n", buf);
446 snprintf(buf, 16, "%d", asus_info->revision);
447 len += sprintf(page + len, "DSDT revision : %s\n", buf);
448 snprintf(buf, 7, "%s", asus_info->oem_id);
449 len += sprintf(page + len, "OEM id : %s\n", buf);
450 snprintf(buf, 9, "%s", asus_info->oem_table_id);
451 len += sprintf(page + len, "OEM table id : %s\n", buf);
452 snprintf(buf, 16, "%x", asus_info->oem_revision);
453 len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
454 snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
455 len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
456 snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
457 len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
458 }
459
460 return len;
461}
462
463static int parse_arg(const char *buf, unsigned long count, int *val)
464{
465 if (!count)
466 return 0;
467 if (count > 31)
468 return -EINVAL;
469 if (sscanf(buf, "%i", val) != 1)
470 return -EINVAL;
471 return count;
472}
473
474static ssize_t store_status(const char *buf, size_t count,
475 acpi_handle handle, int mask, int invert)
476{
477 int rv, value;
478 int out = 0;
479
480 rv = parse_arg(buf, count, &value);
481 if (rv > 0)
482 out = value ? 1 : 0;
483
484 write_status(handle, out, mask, invert);
485
486 return rv;
487}
488
489/*
490 * LEDD display
491 */
492static ssize_t show_ledd(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 return sprintf(buf, "0x%08x\n", hotk->ledd_status);
496}
497
498static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
499 const char *buf, size_t count)
500{
501 int rv, value;
502
503 rv = parse_arg(buf, count, &value);
504 if (rv > 0) {
505 if (!write_acpi_int(ledd_set_handle, NULL, value, NULL))
506 printk(ASUS_WARNING "LED display write failed\n");
507 else
508 hotk->ledd_status = (u32) value;
509 }
510 return rv;
511}
512
513/*
514 * WLAN
515 */
516static ssize_t show_wlan(struct device *dev,
517 struct device_attribute *attr, char *buf)
518{
519 return sprintf(buf, "%d\n", read_status(WL_ON));
520}
521
522static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
523 const char *buf, size_t count)
524{
525 return store_status(buf, count, wl_switch_handle, WL_ON, 0);
526}
527
528/*
529 * Bluetooth
530 */
531static ssize_t show_bluetooth(struct device *dev,
532 struct device_attribute *attr, char *buf)
533{
534 return sprintf(buf, "%d\n", read_status(BT_ON));
535}
536
537static ssize_t store_bluetooth(struct device *dev,
538 struct device_attribute *attr, const char *buf,
539 size_t count)
540{
541 return store_status(buf, count, bt_switch_handle, BT_ON, 0);
542}
543
544/*
545 * Display
546 */
547static void set_display(int value)
548{
549 /* no sanity check needed for now */
550 if (!write_acpi_int(display_set_handle, NULL, value, NULL))
551 printk(ASUS_WARNING "Error setting display\n");
552 return;
553}
554
555static int read_display(void)
556{
557 int value = 0;
558
559 /* In most of the case, we know how to set the display, but sometime
560 we can't read it */
561 if (display_get_handle) {
562 if (!read_acpi_int(display_get_handle, NULL, &value, NULL))
563 printk(ASUS_WARNING "Error reading display status\n");
564 }
565
566 value &= 0x0F; /* needed for some models, shouldn't hurt others */
567
568 return value;
569}
570
571/*
572 * Now, *this* one could be more user-friendly, but so far, no-one has
573 * complained. The significance of bits is the same as in store_disp()
574 */
575static ssize_t show_disp(struct device *dev,
576 struct device_attribute *attr, char *buf)
577{
578 return sprintf(buf, "%d\n", read_display());
579}
580
581/*
582 * Experimental support for display switching. As of now: 1 should activate
583 * the LCD output, 2 should do for CRT, 4 for TV-Out and 8 for DVI.
584 * Any combination (bitwise) of these will suffice. I never actually tested 4
585 * displays hooked up simultaneously, so be warned. See the acpi4asus README
586 * for more info.
587 */
588static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
589 const char *buf, size_t count)
590{
591 int rv, value;
592
593 rv = parse_arg(buf, count, &value);
594 if (rv > 0)
595 set_display(value);
596 return rv;
597}
598
599/*
600 * Light Sens
601 */
602static void set_light_sens_switch(int value)
603{
604 if (!write_acpi_int(ls_switch_handle, NULL, value, NULL))
605 printk(ASUS_WARNING "Error setting light sensor switch\n");
606 hotk->light_switch = value;
607}
608
609static ssize_t show_lssw(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 return sprintf(buf, "%d\n", hotk->light_switch);
613}
614
615static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
616 const char *buf, size_t count)
617{
618 int rv, value;
619
620 rv = parse_arg(buf, count, &value);
621 if (rv > 0)
622 set_light_sens_switch(value ? 1 : 0);
623
624 return rv;
625}
626
627static void set_light_sens_level(int value)
628{
629 if (!write_acpi_int(ls_level_handle, NULL, value, NULL))
630 printk(ASUS_WARNING "Error setting light sensor level\n");
631 hotk->light_level = value;
632}
633
634static ssize_t show_lslvl(struct device *dev,
635 struct device_attribute *attr, char *buf)
636{
637 return sprintf(buf, "%d\n", hotk->light_level);
638}
639
640static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
641 const char *buf, size_t count)
642{
643 int rv, value;
644
645 rv = parse_arg(buf, count, &value);
646 if (rv > 0) {
647 value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
648 /* 0 <= value <= 15 */
649 set_light_sens_level(value);
650 }
651
652 return rv;
653}
654
655static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
656{
657 /* TODO Find a better way to handle events count. */
658 if (!hotk)
659 return;
660
661 /*
662 * We need to tell the backlight device when the backlight power is
663 * switched
664 */
665 if (event == ATKD_LCD_ON) {
666 write_status(NULL, 1, LCD_ON, 0);
667 lcd_blank(FB_BLANK_UNBLANK);
668 } else if (event == ATKD_LCD_OFF) {
669 write_status(NULL, 0, LCD_ON, 0);
670 lcd_blank(FB_BLANK_POWERDOWN);
671 }
672
673 acpi_bus_generate_event(hotk->device, event,
674 hotk->event_count[event % 128]++);
675
676 return;
677}
678
679#define ASUS_CREATE_DEVICE_ATTR(_name) \
680 struct device_attribute dev_attr_##_name = { \
681 .attr = { \
682 .name = __stringify(_name), \
683 .mode = 0, \
684 .owner = THIS_MODULE }, \
685 .show = NULL, \
686 .store = NULL, \
687 }
688
689#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \
690 do { \
691 dev_attr_##_name.attr.mode = _mode; \
692 dev_attr_##_name.show = _show; \
693 dev_attr_##_name.store = _store; \
694 } while(0)
695
696static ASUS_CREATE_DEVICE_ATTR(infos);
697static ASUS_CREATE_DEVICE_ATTR(wlan);
698static ASUS_CREATE_DEVICE_ATTR(bluetooth);
699static ASUS_CREATE_DEVICE_ATTR(display);
700static ASUS_CREATE_DEVICE_ATTR(ledd);
701static ASUS_CREATE_DEVICE_ATTR(ls_switch);
702static ASUS_CREATE_DEVICE_ATTR(ls_level);
703
704static struct attribute *asuspf_attributes[] = {
705 &dev_attr_infos.attr,
706 &dev_attr_wlan.attr,
707 &dev_attr_bluetooth.attr,
708 &dev_attr_display.attr,
709 &dev_attr_ledd.attr,
710 &dev_attr_ls_switch.attr,
711 &dev_attr_ls_level.attr,
712 NULL
713};
714
715static struct attribute_group asuspf_attribute_group = {
716 .attrs = asuspf_attributes
717};
718
719static struct platform_driver asuspf_driver = {
720 .driver = {
721 .name = ASUS_HOTK_FILE,
722 .owner = THIS_MODULE,
723 }
724};
725
726static struct platform_device *asuspf_device;
727
728static void asus_hotk_add_fs(void)
729{
730 ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL);
731
732 if (wl_switch_handle)
733 ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan);
734
735 if (bt_switch_handle)
736 ASUS_SET_DEVICE_ATTR(bluetooth, 0644,
737 show_bluetooth, store_bluetooth);
738
739 if (display_set_handle && display_get_handle)
740 ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp);
741 else if (display_set_handle)
742 ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
743
744 if (ledd_set_handle)
745 ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd);
746
747 if (ls_switch_handle && ls_level_handle) {
748 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
749 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
750 }
751}
752
753static int asus_handle_init(char *name, acpi_handle * handle,
754 char **paths, int num_paths)
755{
756 int i;
757 acpi_status status;
758
759 for (i = 0; i < num_paths; i++) {
760 status = acpi_get_handle(NULL, paths[i], handle);
761 if (ACPI_SUCCESS(status))
762 return 0;
763 }
764
765 *handle = NULL;
766 return -ENODEV;
767}
768
769#define ASUS_HANDLE_INIT(object) \
770 asus_handle_init(#object, &object##_handle, object##_paths, \
771 ARRAY_SIZE(object##_paths))
772
773/*
774 * This function is used to initialize the hotk with right values. In this
775 * method, we can make all the detection we want, and modify the hotk struct
776 */
777static int asus_hotk_get_info(void)
778{
779 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
780 union acpi_object *model = NULL;
781 int bsts_result, hwrs_result;
782 char *string = NULL;
783 acpi_status status;
784
785 /*
786 * Get DSDT headers early enough to allow for differentiating between
787 * models, but late enough to allow acpi_bus_register_driver() to fail
788 * before doing anything ACPI-specific. Should we encounter a machine,
789 * which needs special handling (i.e. its hotkey device has a different
790 * HID), this bit will be moved. A global variable asus_info contains
791 * the DSDT header.
792 */
793 status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
794 if (ACPI_FAILURE(status))
795 printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
796
797 /* We have to write 0 on init this far for all ASUS models */
798 if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
799 printk(ASUS_ERR "Hotkey initialization failed\n");
800 return -ENODEV;
801 }
802
803 /* This needs to be called for some laptops to init properly */
804 if (!read_acpi_int(hotk->handle, "BSTS", &bsts_result, NULL))
805 printk(ASUS_WARNING "Error calling BSTS\n");
806 else if (bsts_result)
807 printk(ASUS_NOTICE "BSTS called, 0x%02x returned\n",
808 bsts_result);
809
810 /*
811 * Try to match the object returned by INIT to the specific model.
812 * Handle every possible object (or the lack of thereof) the DSDT
813 * writers might throw at us. When in trouble, we pass NULL to
814 * asus_model_match() and try something completely different.
815 */
816 if (buffer.pointer) {
817 model = buffer.pointer;
818 switch (model->type) {
819 case ACPI_TYPE_STRING:
820 string = model->string.pointer;
821 break;
822 case ACPI_TYPE_BUFFER:
823 string = model->buffer.pointer;
824 break;
825 default:
826 string = "";
827 break;
828 }
829 }
830 hotk->name = kstrdup(string, GFP_KERNEL);
831 if (!hotk->name)
832 return -ENOMEM;
833
834 if (*string)
835 printk(ASUS_NOTICE " %s model detected\n", string);
836
837 ASUS_HANDLE_INIT(mled_set);
838 ASUS_HANDLE_INIT(tled_set);
839 ASUS_HANDLE_INIT(rled_set);
840 ASUS_HANDLE_INIT(pled_set);
841
842 ASUS_HANDLE_INIT(ledd_set);
843
844 /*
845 * The HWRS method return informations about the hardware.
846 * 0x80 bit is for WLAN, 0x100 for Bluetooth.
847 * The significance of others is yet to be found.
848 * If we don't find the method, we assume the device are present.
849 */
850 if (!read_acpi_int(hotk->handle, "HRWS", &hwrs_result, NULL))
851 hwrs_result = WL_HWRS | BT_HWRS;
852
853 if (hwrs_result & WL_HWRS)
854 ASUS_HANDLE_INIT(wl_switch);
855 if (hwrs_result & BT_HWRS)
856 ASUS_HANDLE_INIT(bt_switch);
857
858 ASUS_HANDLE_INIT(wireless_status);
859
860 ASUS_HANDLE_INIT(brightness_set);
861 ASUS_HANDLE_INIT(brightness_get);
862
863 ASUS_HANDLE_INIT(lcd_switch);
864
865 ASUS_HANDLE_INIT(display_set);
866 ASUS_HANDLE_INIT(display_get);
867
868 /* There is a lot of models with "ALSL", but a few get
869 a real light sens, so we need to check it. */
870 if (ASUS_HANDLE_INIT(ls_switch))
871 ASUS_HANDLE_INIT(ls_level);
872
873 kfree(model);
874
875 return AE_OK;
876}
877
878static int asus_hotk_check(void)
879{
880 int result = 0;
881
882 result = acpi_bus_get_status(hotk->device);
883 if (result)
884 return result;
885
886 if (hotk->device->status.present) {
887 result = asus_hotk_get_info();
888 } else {
889 printk(ASUS_ERR "Hotkey device not present, aborting\n");
890 return -EINVAL;
891 }
892
893 return result;
894}
895
896static int asus_hotk_found;
897
898static int asus_hotk_add(struct acpi_device *device)
899{
900 acpi_status status = AE_OK;
901 int result;
902
903 if (!device)
904 return -EINVAL;
905
906 printk(ASUS_NOTICE "Asus Laptop Support version %s\n",
907 ASUS_LAPTOP_VERSION);
908
909 hotk = kmalloc(sizeof(struct asus_hotk), GFP_KERNEL);
910 if (!hotk)
911 return -ENOMEM;
912 memset(hotk, 0, sizeof(struct asus_hotk));
913
914 hotk->handle = device->handle;
915 strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
916 strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
917 acpi_driver_data(device) = hotk;
918 hotk->device = device;
919
920 result = asus_hotk_check();
921 if (result)
922 goto end;
923
924 asus_hotk_add_fs();
925
926 /*
927 * We install the handler, it will receive the hotk in parameter, so, we
928 * could add other data to the hotk struct
929 */
930 status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
931 asus_hotk_notify, hotk);
932 if (ACPI_FAILURE(status))
933 printk(ASUS_ERR "Error installing notify handler\n");
934
935 asus_hotk_found = 1;
936
937 /* WLED and BLED are on by default */
938 write_status(bt_switch_handle, 1, BT_ON, 0);
939 write_status(wl_switch_handle, 1, WL_ON, 0);
940
941 /* LCD Backlight is on by default */
942 write_status(NULL, 1, LCD_ON, 0);
943
944 /* LED display is off by default */
945 hotk->ledd_status = 0xFFF;
946
947 /* Set initial values of light sensor and level */
948 hotk->light_switch = 1; /* Default to light sensor disabled */
949 hotk->light_level = 0; /* level 5 for sensor sensitivity */
950
951 if (ls_switch_handle)
952 set_light_sens_switch(hotk->light_switch);
953
954 if (ls_level_handle)
955 set_light_sens_level(hotk->light_level);
956
957 end:
958 if (result) {
959 kfree(hotk->name);
960 kfree(hotk);
961 }
962
963 return result;
964}
965
966static int asus_hotk_remove(struct acpi_device *device, int type)
967{
968 acpi_status status = 0;
969
970 if (!device || !acpi_driver_data(device))
971 return -EINVAL;
972
973 status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
974 asus_hotk_notify);
975 if (ACPI_FAILURE(status))
976 printk(ASUS_ERR "Error removing notify handler\n");
977
978 kfree(hotk->name);
979 kfree(hotk);
980
981 return 0;
982}
983
984static void asus_backlight_exit(void)
985{
986 if (asus_backlight_device)
987 backlight_device_unregister(asus_backlight_device);
988}
989
990#define ASUS_LED_UNREGISTER(object) \
991 if(object##_led.class_dev \
992 && !IS_ERR(object##_led.class_dev)) \
993 led_classdev_unregister(&object##_led)
994
995static void asus_led_exit(void)
996{
997 ASUS_LED_UNREGISTER(mled);
998 ASUS_LED_UNREGISTER(tled);
999 ASUS_LED_UNREGISTER(pled);
1000 ASUS_LED_UNREGISTER(rled);
1001
1002 destroy_workqueue(led_workqueue);
1003}
1004
1005static void __exit asus_laptop_exit(void)
1006{
1007 asus_backlight_exit();
1008 asus_led_exit();
1009
1010 acpi_bus_unregister_driver(&asus_hotk_driver);
1011 sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
1012 platform_device_unregister(asuspf_device);
1013 platform_driver_unregister(&asuspf_driver);
1014}
1015
1016static int asus_backlight_init(struct device *dev)
1017{
1018 struct backlight_device *bd;
1019
1020 if (brightness_set_handle && lcd_switch_handle) {
1021 bd = backlight_device_register(ASUS_HOTK_FILE, dev,
1022 NULL, &asusbl_data);
1023 if (IS_ERR(bd)) {
1024 printk(ASUS_ERR
1025 "Could not register asus backlight device\n");
1026 asus_backlight_device = NULL;
1027 return PTR_ERR(bd);
1028 }
1029
1030 asus_backlight_device = bd;
1031
1032 down(&bd->sem);
1033 if (likely(bd->props)) {
1034 bd->props->brightness = read_brightness(NULL);
1035 bd->props->power = FB_BLANK_UNBLANK;
1036 if (likely(bd->props->update_status))
1037 bd->props->update_status(bd);
1038 }
1039 up(&bd->sem);
1040 }
1041 return 0;
1042}
1043
1044static int asus_led_register(acpi_handle handle,
1045 struct led_classdev *ldev, struct device *dev)
1046{
1047 if (!handle)
1048 return 0;
1049
1050 return led_classdev_register(dev, ldev);
1051}
1052
1053#define ASUS_LED_REGISTER(object, device) \
1054 asus_led_register(object##_set_handle, &object##_led, device)
1055
1056static int asus_led_init(struct device *dev)
1057{
1058 int rv;
1059
1060 rv = ASUS_LED_REGISTER(mled, dev);
1061 if (rv)
1062 return rv;
1063
1064 rv = ASUS_LED_REGISTER(tled, dev);
1065 if (rv)
1066 return rv;
1067
1068 rv = ASUS_LED_REGISTER(rled, dev);
1069 if (rv)
1070 return rv;
1071
1072 rv = ASUS_LED_REGISTER(pled, dev);
1073 if (rv)
1074 return rv;
1075
1076 led_workqueue = create_singlethread_workqueue("led_workqueue");
1077 if (!led_workqueue)
1078 return -ENOMEM;
1079
1080 return 0;
1081}
1082
1083static int __init asus_laptop_init(void)
1084{
1085 struct device *dev;
1086 int result;
1087
1088 if (acpi_disabled)
1089 return -ENODEV;
1090
1091 if (!acpi_specific_hotkey_enabled) {
1092 printk(ASUS_ERR "Using generic hotkey driver\n");
1093 return -ENODEV;
1094 }
1095
1096 result = acpi_bus_register_driver(&asus_hotk_driver);
1097 if (result < 0)
1098 return result;
1099
1100 /*
1101 * This is a bit of a kludge. We only want this module loaded
1102 * for ASUS systems, but there's currently no way to probe the
1103 * ACPI namespace for ASUS HIDs. So we just return failure if
1104 * we didn't find one, which will cause the module to be
1105 * unloaded.
1106 */
1107 if (!asus_hotk_found) {
1108 acpi_bus_unregister_driver(&asus_hotk_driver);
1109 return -ENODEV;
1110 }
1111
1112 dev = acpi_get_physical_device(hotk->device->handle);
1113
1114 result = asus_backlight_init(dev);
1115 if (result)
1116 goto fail_backlight;
1117
1118 result = asus_led_init(dev);
1119 if (result)
1120 goto fail_led;
1121
1122 /* Register platform stuff */
1123 result = platform_driver_register(&asuspf_driver);
1124 if (result)
1125 goto fail_platform_driver;
1126
1127 asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
1128 if (!asuspf_device) {
1129 result = -ENOMEM;
1130 goto fail_platform_device1;
1131 }
1132
1133 result = platform_device_add(asuspf_device);
1134 if (result)
1135 goto fail_platform_device2;
1136
1137 result = sysfs_create_group(&asuspf_device->dev.kobj,
1138 &asuspf_attribute_group);
1139 if (result)
1140 goto fail_sysfs;
1141
1142 return 0;
1143
1144 fail_sysfs:
1145 platform_device_del(asuspf_device);
1146
1147 fail_platform_device2:
1148 platform_device_put(asuspf_device);
1149
1150 fail_platform_device1:
1151 platform_driver_unregister(&asuspf_driver);
1152
1153 fail_platform_driver:
1154 asus_led_exit();
1155
1156 fail_led:
1157 asus_backlight_exit();
1158
1159 fail_backlight:
1160
1161 return result;
1162}
1163
1164module_init(asus_laptop_init);
1165module_exit(asus_laptop_exit);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index db9d7df75ae0..552b7957a92a 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -108,8 +108,8 @@ static struct jprobe lkdtm;
108static int lkdtm_parse_commandline(void); 108static int lkdtm_parse_commandline(void);
109static void lkdtm_handler(void); 109static void lkdtm_handler(void);
110 110
111static char* cpoint_name = INVALID; 111static char* cpoint_name;
112static char* cpoint_type = NONE; 112static char* cpoint_type;
113static int cpoint_count = DEFAULT_COUNT; 113static int cpoint_count = DEFAULT_COUNT;
114static int recur_count = REC_NUM_DEFAULT; 114static int recur_count = REC_NUM_DEFAULT;
115 115
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 2ab7add78f94..e21e490fedb0 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -11,66 +11,25 @@
11 11
12#include <linux/tifm.h> 12#include <linux/tifm.h>
13#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
14#include <linux/freezer.h>
14 15
15#define DRIVER_NAME "tifm_7xx1" 16#define DRIVER_NAME "tifm_7xx1"
16#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
17 18
18static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) 19static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
19{ 20{
20 int cnt;
21 unsigned long flags;
22
23 spin_lock_irqsave(&fm->lock, flags);
24 if (!fm->inhibit_new_cards) {
25 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
26 if (fm->sockets[cnt] == sock) {
27 fm->remove_mask |= (1 << cnt);
28 queue_work(fm->wq, &fm->media_remover);
29 break;
30 }
31 }
32 }
33 spin_unlock_irqrestore(&fm->lock, flags);
34}
35
36static void tifm_7xx1_remove_media(struct work_struct *work)
37{
38 struct tifm_adapter *fm =
39 container_of(work, struct tifm_adapter, media_remover);
40 unsigned long flags; 21 unsigned long flags;
41 int cnt;
42 struct tifm_dev *sock;
43 22
44 if (!class_device_get(&fm->cdev))
45 return;
46 spin_lock_irqsave(&fm->lock, flags); 23 spin_lock_irqsave(&fm->lock, flags);
47 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 24 fm->socket_change_set |= 1 << sock->socket_id;
48 if (fm->sockets[cnt] && (fm->remove_mask & (1 << cnt))) { 25 wake_up_all(&fm->change_set_notify);
49 printk(KERN_INFO DRIVER_NAME
50 ": demand removing card from socket %d\n", cnt);
51 sock = fm->sockets[cnt];
52 fm->sockets[cnt] = NULL;
53 fm->remove_mask &= ~(1 << cnt);
54
55 writel(0x0e00, sock->addr + SOCK_CONTROL);
56
57 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
58 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
59 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
60 fm->addr + FM_SET_INTERRUPT_ENABLE);
61
62 spin_unlock_irqrestore(&fm->lock, flags);
63 device_unregister(&sock->dev);
64 spin_lock_irqsave(&fm->lock, flags);
65 }
66 }
67 spin_unlock_irqrestore(&fm->lock, flags); 26 spin_unlock_irqrestore(&fm->lock, flags);
68 class_device_put(&fm->cdev);
69} 27}
70 28
71static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id) 29static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
72{ 30{
73 struct tifm_adapter *fm = dev_id; 31 struct tifm_adapter *fm = dev_id;
32 struct tifm_dev *sock;
74 unsigned int irq_status; 33 unsigned int irq_status;
75 unsigned int sock_irq_status, cnt; 34 unsigned int sock_irq_status, cnt;
76 35
@@ -84,42 +43,32 @@ static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
84 if (irq_status & TIFM_IRQ_ENABLE) { 43 if (irq_status & TIFM_IRQ_ENABLE) {
85 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 44 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
86 45
87 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 46 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
88 sock_irq_status = (irq_status >> cnt) & 47 sock = fm->sockets[cnt];
89 (TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK); 48 sock_irq_status = (irq_status >> cnt)
90 49 & (TIFM_IRQ_FIFOMASK(1)
91 if (fm->sockets[cnt]) { 50 | TIFM_IRQ_CARDMASK(1));
92 if (sock_irq_status &&
93 fm->sockets[cnt]->signal_irq)
94 sock_irq_status = fm->sockets[cnt]->
95 signal_irq(fm->sockets[cnt],
96 sock_irq_status);
97 51
98 if (irq_status & (1 << cnt)) 52 if (sock && sock_irq_status)
99 fm->remove_mask |= 1 << cnt; 53 sock->signal_irq(sock, sock_irq_status);
100 } else {
101 if (irq_status & (1 << cnt))
102 fm->insert_mask |= 1 << cnt;
103 }
104 } 54 }
55
56 fm->socket_change_set |= irq_status
57 & ((1 << fm->num_sockets) - 1);
105 } 58 }
106 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS); 59 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
107 60
108 if (!fm->inhibit_new_cards) { 61 if (!fm->socket_change_set)
109 if (!fm->remove_mask && !fm->insert_mask) { 62 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
110 writel(TIFM_IRQ_ENABLE, 63 else
111 fm->addr + FM_SET_INTERRUPT_ENABLE); 64 wake_up_all(&fm->change_set_notify);
112 } else {
113 queue_work(fm->wq, &fm->media_remover);
114 queue_work(fm->wq, &fm->media_inserter);
115 }
116 }
117 65
118 spin_unlock(&fm->lock); 66 spin_unlock(&fm->lock);
119 return IRQ_HANDLED; 67 return IRQ_HANDLED;
120} 68}
121 69
122static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is_x2) 70static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr,
71 int is_x2)
123{ 72{
124 unsigned int s_state; 73 unsigned int s_state;
125 int cnt; 74 int cnt;
@@ -127,8 +76,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
127 writel(0x0e00, sock_addr + SOCK_CONTROL); 76 writel(0x0e00, sock_addr + SOCK_CONTROL);
128 77
129 for (cnt = 0; cnt < 100; cnt++) { 78 for (cnt = 0; cnt < 100; cnt++) {
130 if (!(TIFM_SOCK_STATE_POWERED & 79 if (!(TIFM_SOCK_STATE_POWERED
131 readl(sock_addr + SOCK_PRESENT_STATE))) 80 & readl(sock_addr + SOCK_PRESENT_STATE)))
132 break; 81 break;
133 msleep(10); 82 msleep(10);
134 } 83 }
@@ -151,8 +100,8 @@ static tifm_media_id tifm_7xx1_toggle_sock_power(char __iomem *sock_addr, int is
151 } 100 }
152 101
153 for (cnt = 0; cnt < 100; cnt++) { 102 for (cnt = 0; cnt < 100; cnt++) {
154 if ((TIFM_SOCK_STATE_POWERED & 103 if ((TIFM_SOCK_STATE_POWERED
155 readl(sock_addr + SOCK_PRESENT_STATE))) 104 & readl(sock_addr + SOCK_PRESENT_STATE)))
156 break; 105 break;
157 msleep(10); 106 msleep(10);
158 } 107 }
@@ -170,130 +119,209 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
170 return base_addr + ((sock_num + 1) << 10); 119 return base_addr + ((sock_num + 1) << 10);
171} 120}
172 121
173static void tifm_7xx1_insert_media(struct work_struct *work) 122static int tifm_7xx1_switch_media(void *data)
174{ 123{
175 struct tifm_adapter *fm = 124 struct tifm_adapter *fm = data;
176 container_of(work, struct tifm_adapter, media_inserter);
177 unsigned long flags; 125 unsigned long flags;
178 tifm_media_id media_id; 126 tifm_media_id media_id;
179 char *card_name = "xx"; 127 char *card_name = "xx";
180 int cnt, ok_to_register; 128 int cnt, rc;
181 unsigned int insert_mask; 129 struct tifm_dev *sock;
182 struct tifm_dev *new_sock = NULL; 130 unsigned int socket_change_set;
183 131
184 if (!class_device_get(&fm->cdev)) 132 while (1) {
185 return; 133 rc = wait_event_interruptible(fm->change_set_notify,
186 spin_lock_irqsave(&fm->lock, flags); 134 fm->socket_change_set);
187 insert_mask = fm->insert_mask; 135 if (rc == -ERESTARTSYS)
188 fm->insert_mask = 0; 136 try_to_freeze();
189 if (fm->inhibit_new_cards) { 137
138 spin_lock_irqsave(&fm->lock, flags);
139 socket_change_set = fm->socket_change_set;
140 fm->socket_change_set = 0;
141
142 dev_dbg(fm->dev, "checking media set %x\n",
143 socket_change_set);
144
145 if (kthread_should_stop())
146 socket_change_set = (1 << fm->num_sockets) - 1;
190 spin_unlock_irqrestore(&fm->lock, flags); 147 spin_unlock_irqrestore(&fm->lock, flags);
191 class_device_put(&fm->cdev);
192 return;
193 }
194 spin_unlock_irqrestore(&fm->lock, flags);
195 148
196 for (cnt = 0; cnt < fm->max_sockets; cnt++) { 149 if (!socket_change_set)
197 if (!(insert_mask & (1 << cnt)))
198 continue; 150 continue;
199 151
200 media_id = tifm_7xx1_toggle_sock_power(tifm_7xx1_sock_addr(fm->addr, cnt), 152 spin_lock_irqsave(&fm->lock, flags);
201 fm->max_sockets == 2); 153 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
202 if (media_id) { 154 if (!(socket_change_set & (1 << cnt)))
203 ok_to_register = 0; 155 continue;
204 new_sock = tifm_alloc_device(fm, cnt); 156 sock = fm->sockets[cnt];
205 if (new_sock) { 157 if (sock) {
206 new_sock->addr = tifm_7xx1_sock_addr(fm->addr,
207 cnt);
208 new_sock->media_id = media_id;
209 switch (media_id) {
210 case 1:
211 card_name = "xd";
212 break;
213 case 2:
214 card_name = "ms";
215 break;
216 case 3:
217 card_name = "sd";
218 break;
219 default:
220 break;
221 }
222 snprintf(new_sock->dev.bus_id, BUS_ID_SIZE,
223 "tifm_%s%u:%u", card_name, fm->id, cnt);
224 printk(KERN_INFO DRIVER_NAME 158 printk(KERN_INFO DRIVER_NAME
225 ": %s card detected in socket %d\n", 159 ": demand removing card from socket %d\n",
226 card_name, cnt); 160 cnt);
161 fm->sockets[cnt] = NULL;
162 spin_unlock_irqrestore(&fm->lock, flags);
163 device_unregister(&sock->dev);
227 spin_lock_irqsave(&fm->lock, flags); 164 spin_lock_irqsave(&fm->lock, flags);
228 if (!fm->sockets[cnt]) { 165 writel(0x0e00,
229 fm->sockets[cnt] = new_sock; 166 tifm_7xx1_sock_addr(fm->addr, cnt)
230 ok_to_register = 1; 167 + SOCK_CONTROL);
168 }
169 if (kthread_should_stop())
170 continue;
171
172 spin_unlock_irqrestore(&fm->lock, flags);
173 media_id = tifm_7xx1_toggle_sock_power(
174 tifm_7xx1_sock_addr(fm->addr, cnt),
175 fm->num_sockets == 2);
176 if (media_id) {
177 sock = tifm_alloc_device(fm);
178 if (sock) {
179 sock->addr = tifm_7xx1_sock_addr(fm->addr,
180 cnt);
181 sock->media_id = media_id;
182 sock->socket_id = cnt;
183 switch (media_id) {
184 case 1:
185 card_name = "xd";
186 break;
187 case 2:
188 card_name = "ms";
189 break;
190 case 3:
191 card_name = "sd";
192 break;
193 default:
194 tifm_free_device(&sock->dev);
195 spin_lock_irqsave(&fm->lock, flags);
196 continue;
197 }
198 snprintf(sock->dev.bus_id, BUS_ID_SIZE,
199 "tifm_%s%u:%u", card_name,
200 fm->id, cnt);
201 printk(KERN_INFO DRIVER_NAME
202 ": %s card detected in socket %d\n",
203 card_name, cnt);
204 if (!device_register(&sock->dev)) {
205 spin_lock_irqsave(&fm->lock, flags);
206 if (!fm->sockets[cnt]) {
207 fm->sockets[cnt] = sock;
208 sock = NULL;
209 }
210 spin_unlock_irqrestore(&fm->lock, flags);
211 }
212 if (sock)
213 tifm_free_device(&sock->dev);
231 } 214 }
215 spin_lock_irqsave(&fm->lock, flags);
216 }
217 }
218
219 if (!kthread_should_stop()) {
220 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
221 | TIFM_IRQ_CARDMASK(socket_change_set),
222 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
223 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
224 | TIFM_IRQ_CARDMASK(socket_change_set),
225 fm->addr + FM_SET_INTERRUPT_ENABLE);
226 writel(TIFM_IRQ_ENABLE,
227 fm->addr + FM_SET_INTERRUPT_ENABLE);
228 spin_unlock_irqrestore(&fm->lock, flags);
229 } else {
230 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
231 if (fm->sockets[cnt])
232 fm->socket_change_set |= 1 << cnt;
233 }
234 if (!fm->socket_change_set) {
235 spin_unlock_irqrestore(&fm->lock, flags);
236 return 0;
237 } else {
232 spin_unlock_irqrestore(&fm->lock, flags); 238 spin_unlock_irqrestore(&fm->lock, flags);
233 if (!ok_to_register ||
234 device_register(&new_sock->dev)) {
235 spin_lock_irqsave(&fm->lock, flags);
236 fm->sockets[cnt] = NULL;
237 spin_unlock_irqrestore(&fm->lock,
238 flags);
239 tifm_free_device(&new_sock->dev);
240 }
241 } 239 }
242 } 240 }
243 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
244 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
245 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
246 fm->addr + FM_SET_INTERRUPT_ENABLE);
247 } 241 }
248 242 return 0;
249 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
250 class_device_put(&fm->cdev);
251} 243}
252 244
245#ifdef CONFIG_PM
246
253static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) 247static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
254{ 248{
255 struct tifm_adapter *fm = pci_get_drvdata(dev); 249 dev_dbg(&dev->dev, "suspending host\n");
256 unsigned long flags;
257 250
258 spin_lock_irqsave(&fm->lock, flags); 251 pci_save_state(dev);
259 fm->inhibit_new_cards = 1; 252 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
260 fm->remove_mask = 0xf; 253 pci_disable_device(dev);
261 fm->insert_mask = 0; 254 pci_set_power_state(dev, pci_choose_state(dev, state));
262 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
263 spin_unlock_irqrestore(&fm->lock, flags);
264 flush_workqueue(fm->wq);
265
266 tifm_7xx1_remove_media(&fm->media_remover);
267
268 pci_set_power_state(dev, PCI_D3hot);
269 pci_disable_device(dev);
270 pci_save_state(dev);
271 return 0; 255 return 0;
272} 256}
273 257
274static int tifm_7xx1_resume(struct pci_dev *dev) 258static int tifm_7xx1_resume(struct pci_dev *dev)
275{ 259{
276 struct tifm_adapter *fm = pci_get_drvdata(dev); 260 struct tifm_adapter *fm = pci_get_drvdata(dev);
261 int cnt, rc;
277 unsigned long flags; 262 unsigned long flags;
263 tifm_media_id new_ids[fm->num_sockets];
278 264
265 pci_set_power_state(dev, PCI_D0);
279 pci_restore_state(dev); 266 pci_restore_state(dev);
280 pci_enable_device(dev); 267 rc = pci_enable_device(dev);
281 pci_set_power_state(dev, PCI_D0); 268 if (rc)
282 pci_set_master(dev); 269 return rc;
270 pci_set_master(dev);
283 271
272 dev_dbg(&dev->dev, "resuming host\n");
273
274 for (cnt = 0; cnt < fm->num_sockets; cnt++)
275 new_ids[cnt] = tifm_7xx1_toggle_sock_power(
276 tifm_7xx1_sock_addr(fm->addr, cnt),
277 fm->num_sockets == 2);
284 spin_lock_irqsave(&fm->lock, flags); 278 spin_lock_irqsave(&fm->lock, flags);
285 fm->inhibit_new_cards = 0; 279 fm->socket_change_set = 0;
286 writel(TIFM_IRQ_SETALL, fm->addr + FM_INTERRUPT_STATUS); 280 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
287 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 281 if (fm->sockets[cnt]) {
288 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 282 if (fm->sockets[cnt]->media_id == new_ids[cnt])
289 fm->addr + FM_SET_INTERRUPT_ENABLE); 283 fm->socket_change_set |= 1 << cnt;
290 fm->insert_mask = 0xf; 284
285 fm->sockets[cnt]->media_id = new_ids[cnt];
286 }
287 }
288
289 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
290 fm->addr + FM_SET_INTERRUPT_ENABLE);
291 if (!fm->socket_change_set) {
292 spin_unlock_irqrestore(&fm->lock, flags);
293 return 0;
294 } else {
295 fm->socket_change_set = 0;
296 spin_unlock_irqrestore(&fm->lock, flags);
297 }
298
299 wait_event_timeout(fm->change_set_notify, fm->socket_change_set, HZ);
300
301 spin_lock_irqsave(&fm->lock, flags);
302 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
303 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
304 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
305 writel(TIFM_IRQ_FIFOMASK(fm->socket_change_set)
306 | TIFM_IRQ_CARDMASK(fm->socket_change_set),
307 fm->addr + FM_SET_INTERRUPT_ENABLE);
308 writel(TIFM_IRQ_ENABLE,
309 fm->addr + FM_SET_INTERRUPT_ENABLE);
310 fm->socket_change_set = 0;
311
291 spin_unlock_irqrestore(&fm->lock, flags); 312 spin_unlock_irqrestore(&fm->lock, flags);
292 return 0; 313 return 0;
293} 314}
294 315
316#else
317
318#define tifm_7xx1_suspend NULL
319#define tifm_7xx1_resume NULL
320
321#endif /* CONFIG_PM */
322
295static int tifm_7xx1_probe(struct pci_dev *dev, 323static int tifm_7xx1_probe(struct pci_dev *dev,
296 const struct pci_device_id *dev_id) 324 const struct pci_device_id *dev_id)
297{ 325{
298 struct tifm_adapter *fm; 326 struct tifm_adapter *fm;
299 int pci_dev_busy = 0; 327 int pci_dev_busy = 0;
@@ -324,19 +352,18 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
324 } 352 }
325 353
326 fm->dev = &dev->dev; 354 fm->dev = &dev->dev;
327 fm->max_sockets = (dev->device == 0x803B) ? 2 : 4; 355 fm->num_sockets = (dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM)
328 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->max_sockets, 356 ? 4 : 2;
329 GFP_KERNEL); 357 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->num_sockets,
358 GFP_KERNEL);
330 if (!fm->sockets) 359 if (!fm->sockets)
331 goto err_out_free; 360 goto err_out_free;
332 361
333 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
334 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
335 fm->eject = tifm_7xx1_eject; 362 fm->eject = tifm_7xx1_eject;
336 pci_set_drvdata(dev, fm); 363 pci_set_drvdata(dev, fm);
337 364
338 fm->addr = ioremap(pci_resource_start(dev, 0), 365 fm->addr = ioremap(pci_resource_start(dev, 0),
339 pci_resource_len(dev, 0)); 366 pci_resource_len(dev, 0));
340 if (!fm->addr) 367 if (!fm->addr)
341 goto err_out_free; 368 goto err_out_free;
342 369
@@ -344,16 +371,15 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
344 if (rc) 371 if (rc)
345 goto err_out_unmap; 372 goto err_out_unmap;
346 373
347 rc = tifm_add_adapter(fm); 374 init_waitqueue_head(&fm->change_set_notify);
375 rc = tifm_add_adapter(fm, tifm_7xx1_switch_media);
348 if (rc) 376 if (rc)
349 goto err_out_irq; 377 goto err_out_irq;
350 378
351 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); 379 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
352 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK, 380 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
353 fm->addr + FM_SET_INTERRUPT_ENABLE); 381 fm->addr + FM_SET_INTERRUPT_ENABLE);
354 382 wake_up_process(fm->media_switcher);
355 fm->insert_mask = 0xf;
356
357 return 0; 383 return 0;
358 384
359err_out_irq: 385err_out_irq:
@@ -377,19 +403,15 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
377 struct tifm_adapter *fm = pci_get_drvdata(dev); 403 struct tifm_adapter *fm = pci_get_drvdata(dev);
378 unsigned long flags; 404 unsigned long flags;
379 405
406 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
407 mmiowb();
408 free_irq(dev->irq, fm);
409
380 spin_lock_irqsave(&fm->lock, flags); 410 spin_lock_irqsave(&fm->lock, flags);
381 fm->inhibit_new_cards = 1; 411 fm->socket_change_set = (1 << fm->num_sockets) - 1;
382 fm->remove_mask = 0xf;
383 fm->insert_mask = 0;
384 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
385 spin_unlock_irqrestore(&fm->lock, flags); 412 spin_unlock_irqrestore(&fm->lock, flags);
386 413
387 flush_workqueue(fm->wq); 414 kthread_stop(fm->media_switcher);
388
389 tifm_7xx1_remove_media(&fm->media_remover);
390
391 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
392 free_irq(dev->irq, fm);
393 415
394 tifm_remove_adapter(fm); 416 tifm_remove_adapter(fm);
395 417
@@ -404,10 +426,12 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
404} 426}
405 427
406static struct pci_device_id tifm_7xx1_pci_tbl [] = { 428static struct pci_device_id tifm_7xx1_pci_tbl [] = {
407 { PCI_VENDOR_ID_TI, 0x8033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 429 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
408 0 }, /* xx21 - the one I have */ 430 PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
409 { PCI_VENDOR_ID_TI, 0x803B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 431 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
410 0 }, /* xx12 - should be also supported */ 432 PCI_ANY_ID, 0, 0, 0 },
433 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
434 PCI_ANY_ID, 0, 0, 0 },
411 { } 435 { }
412}; 436};
413 437
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index d61df5c3ac36..6b10ebe9d936 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -14,7 +14,7 @@
14#include <linux/idr.h> 14#include <linux/idr.h>
15 15
16#define DRIVER_NAME "tifm_core" 16#define DRIVER_NAME "tifm_core"
17#define DRIVER_VERSION "0.6" 17#define DRIVER_VERSION "0.7"
18 18
19static DEFINE_IDR(tifm_adapter_idr); 19static DEFINE_IDR(tifm_adapter_idr);
20static DEFINE_SPINLOCK(tifm_adapter_lock); 20static DEFINE_SPINLOCK(tifm_adapter_lock);
@@ -60,10 +60,41 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp,
60 return 0; 60 return 0;
61} 61}
62 62
63#ifdef CONFIG_PM
64
65static int tifm_device_suspend(struct device *dev, pm_message_t state)
66{
67 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
68 struct tifm_driver *drv = fm_dev->drv;
69
70 if (drv && drv->suspend)
71 return drv->suspend(fm_dev, state);
72 return 0;
73}
74
75static int tifm_device_resume(struct device *dev)
76{
77 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
78 struct tifm_driver *drv = fm_dev->drv;
79
80 if (drv && drv->resume)
81 return drv->resume(fm_dev);
82 return 0;
83}
84
85#else
86
87#define tifm_device_suspend NULL
88#define tifm_device_resume NULL
89
90#endif /* CONFIG_PM */
91
63static struct bus_type tifm_bus_type = { 92static struct bus_type tifm_bus_type = {
64 .name = "tifm", 93 .name = "tifm",
65 .match = tifm_match, 94 .match = tifm_match,
66 .uevent = tifm_uevent, 95 .uevent = tifm_uevent,
96 .suspend = tifm_device_suspend,
97 .resume = tifm_device_resume
67}; 98};
68 99
69static void tifm_free(struct class_device *cdev) 100static void tifm_free(struct class_device *cdev)
@@ -71,8 +102,6 @@ static void tifm_free(struct class_device *cdev)
71 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev); 102 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev);
72 103
73 kfree(fm->sockets); 104 kfree(fm->sockets);
74 if (fm->wq)
75 destroy_workqueue(fm->wq);
76 kfree(fm); 105 kfree(fm);
77} 106}
78 107
@@ -101,7 +130,8 @@ void tifm_free_adapter(struct tifm_adapter *fm)
101} 130}
102EXPORT_SYMBOL(tifm_free_adapter); 131EXPORT_SYMBOL(tifm_free_adapter);
103 132
104int tifm_add_adapter(struct tifm_adapter *fm) 133int tifm_add_adapter(struct tifm_adapter *fm,
134 int (*mediathreadfn)(void *data))
105{ 135{
106 int rc; 136 int rc;
107 137
@@ -113,10 +143,10 @@ int tifm_add_adapter(struct tifm_adapter *fm)
113 spin_unlock(&tifm_adapter_lock); 143 spin_unlock(&tifm_adapter_lock);
114 if (!rc) { 144 if (!rc) {
115 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id); 145 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id);
116 strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN); 146 fm->media_switcher = kthread_create(mediathreadfn,
147 fm, "tifm/%u", fm->id);
117 148
118 fm->wq = create_singlethread_workqueue(fm->wq_name); 149 if (!IS_ERR(fm->media_switcher))
119 if (fm->wq)
120 return class_device_add(&fm->cdev); 150 return class_device_add(&fm->cdev);
121 151
122 spin_lock(&tifm_adapter_lock); 152 spin_lock(&tifm_adapter_lock);
@@ -141,27 +171,27 @@ EXPORT_SYMBOL(tifm_remove_adapter);
141void tifm_free_device(struct device *dev) 171void tifm_free_device(struct device *dev)
142{ 172{
143 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev); 173 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
144 if (fm_dev->wq)
145 destroy_workqueue(fm_dev->wq);
146 kfree(fm_dev); 174 kfree(fm_dev);
147} 175}
148EXPORT_SYMBOL(tifm_free_device); 176EXPORT_SYMBOL(tifm_free_device);
149 177
150struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id) 178static void tifm_dummy_signal_irq(struct tifm_dev *sock,
179 unsigned int sock_irq_status)
180{
181 return;
182}
183
184struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm)
151{ 185{
152 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL); 186 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
153 187
154 if (dev) { 188 if (dev) {
155 spin_lock_init(&dev->lock); 189 spin_lock_init(&dev->lock);
156 snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id); 190
157 dev->wq = create_singlethread_workqueue(dev->wq_name);
158 if (!dev->wq) {
159 kfree(dev);
160 return NULL;
161 }
162 dev->dev.parent = fm->dev; 191 dev->dev.parent = fm->dev;
163 dev->dev.bus = &tifm_bus_type; 192 dev->dev.bus = &tifm_bus_type;
164 dev->dev.release = tifm_free_device; 193 dev->dev.release = tifm_free_device;
194 dev->signal_irq = tifm_dummy_signal_irq;
165 } 195 }
166 return dev; 196 return dev;
167} 197}
@@ -219,6 +249,7 @@ static int tifm_device_remove(struct device *dev)
219 struct tifm_driver *drv = fm_dev->drv; 249 struct tifm_driver *drv = fm_dev->drv;
220 250
221 if (drv) { 251 if (drv) {
252 fm_dev->signal_irq = tifm_dummy_signal_irq;
222 if (drv->remove) 253 if (drv->remove)
223 drv->remove(fm_dev); 254 drv->remove(fm_dev);
224 fm_dev->drv = NULL; 255 fm_dev->drv = NULL;
@@ -233,6 +264,8 @@ int tifm_register_driver(struct tifm_driver *drv)
233 drv->driver.bus = &tifm_bus_type; 264 drv->driver.bus = &tifm_bus_type;
234 drv->driver.probe = tifm_device_probe; 265 drv->driver.probe = tifm_device_probe;
235 drv->driver.remove = tifm_device_remove; 266 drv->driver.remove = tifm_device_remove;
267 drv->driver.suspend = tifm_device_suspend;
268 drv->driver.resume = tifm_device_resume;
236 269
237 return driver_register(&drv->driver); 270 return driver_register(&drv->driver);
238} 271}
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 4224686fdf2a..12af9c718764 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -111,7 +111,7 @@ config MMC_IMX
111 111
112config MMC_TIFM_SD 112config MMC_TIFM_SD
113 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" 113 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
114 depends on MMC && EXPERIMENTAL 114 depends on MMC && EXPERIMENTAL && PCI
115 select TIFM_CORE 115 select TIFM_CORE
116 help 116 help
117 Say Y here if you want to be able to access MMC/SD cards with 117 Say Y here if you want to be able to access MMC/SD cards with
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index aa152f31851e..2ce50f38e3c7 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -823,6 +823,9 @@ static int __init at91_mci_probe(struct platform_device *pdev)
823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
824 mmc->caps = MMC_CAP_BYTEBLOCK; 824 mmc->caps = MMC_CAP_BYTEBLOCK;
825 825
826 mmc->max_blk_size = 4095;
827 mmc->max_blk_count = mmc->max_req_size;
828
826 host = mmc_priv(mmc); 829 host = mmc_priv(mmc);
827 host->mmc = mmc; 830 host->mmc = mmc;
828 host->buffer = NULL; 831 host->buffer = NULL;
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c
index 800527cf40d5..b834be261ab7 100644
--- a/drivers/mmc/au1xmmc.c
+++ b/drivers/mmc/au1xmmc.c
@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
152 ? 1 : 0; 152 ? 1 : 0;
153} 153}
154 154
155static inline int au1xmmc_card_readonly(struct au1xmmc_host *host) 155static int au1xmmc_card_readonly(struct mmc_host *mmc)
156{ 156{
157 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus) 158 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
158 ? 1 : 0; 159 ? 1 : 0;
159} 160}
@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); 194 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194 195
195 switch (mmc_resp_type(cmd)) { 196 switch (mmc_resp_type(cmd)) {
197 case MMC_RSP_NONE:
198 break;
196 case MMC_RSP_R1: 199 case MMC_RSP_R1:
197 mmccmd |= SD_CMD_RT_1; 200 mmccmd |= SD_CMD_RT_1;
198 break; 201 break;
@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
205 case MMC_RSP_R3: 208 case MMC_RSP_R3:
206 mmccmd |= SD_CMD_RT_3; 209 mmccmd |= SD_CMD_RT_3;
207 break; 210 break;
211 default:
212 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
213 mmc_resp_type(cmd));
214 return MMC_ERR_INVALID;
208 } 215 }
209 216
210 switch(cmd->opcode) { 217 switch(cmd->opcode) {
@@ -878,6 +885,7 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host)
878static const struct mmc_host_ops au1xmmc_ops = { 885static const struct mmc_host_ops au1xmmc_ops = {
879 .request = au1xmmc_request, 886 .request = au1xmmc_request,
880 .set_ios = au1xmmc_set_ios, 887 .set_ios = au1xmmc_set_ios,
888 .get_ro = au1xmmc_card_readonly,
881}; 889};
882 890
883static int __devinit au1xmmc_probe(struct platform_device *pdev) 891static int __devinit au1xmmc_probe(struct platform_device *pdev)
@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
914 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 922 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
915 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; 923 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
916 924
925 mmc->max_blk_size = 2048;
926 mmc->max_blk_count = 512;
927
917 mmc->ocr_avail = AU1XMMC_OCR; 928 mmc->ocr_avail = AU1XMMC_OCR;
918 929
919 host = mmc_priv(mmc); 930 host = mmc_priv(mmc);
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index bfb9ff693208..b060d4bfba29 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -958,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev)
958 /* MMC core transfer sizes tunable parameters */ 958 /* MMC core transfer sizes tunable parameters */
959 mmc->max_hw_segs = 64; 959 mmc->max_hw_segs = 64;
960 mmc->max_phys_segs = 64; 960 mmc->max_phys_segs = 64;
961 mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
962 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ 961 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
962 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
963 mmc->max_blk_size = 2048;
964 mmc->max_blk_count = 65535;
963 965
964 host = mmc_priv(mmc); 966 host = mmc_priv(mmc);
965 host->mmc = mmc; 967 host->mmc = mmc;
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 6f2a282e2b97..5046a1661342 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
103 mmc_hostname(host), mrq->cmd->opcode, 103 mmc_hostname(host), mrq->cmd->opcode,
104 mrq->cmd->arg, mrq->cmd->flags); 104 mrq->cmd->arg, mrq->cmd->flags);
105 105
106 WARN_ON(host->card_busy == NULL); 106 WARN_ON(!host->claimed);
107 107
108 mrq->cmd->error = 0; 108 mrq->cmd->error = 0;
109 mrq->cmd->mrq = mrq; 109 mrq->cmd->mrq = mrq;
110 if (mrq->data) { 110 if (mrq->data) {
111 BUG_ON(mrq->data->blksz > host->max_blk_size);
112 BUG_ON(mrq->data->blocks > host->max_blk_count);
113 BUG_ON(mrq->data->blocks * mrq->data->blksz >
114 host->max_req_size);
115
111 mrq->cmd->data = mrq->data; 116 mrq->cmd->data = mrq->data;
112 mrq->data->error = 0; 117 mrq->data->error = 0;
113 mrq->data->mrq = mrq; 118 mrq->data->mrq = mrq;
@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
157{ 162{
158 struct mmc_request mrq; 163 struct mmc_request mrq;
159 164
160 BUG_ON(host->card_busy == NULL); 165 BUG_ON(!host->claimed);
161 166
162 memset(&mrq, 0, sizeof(struct mmc_request)); 167 memset(&mrq, 0, sizeof(struct mmc_request));
163 168
@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
195 200
196 int i, err; 201 int i, err;
197 202
198 BUG_ON(host->card_busy == NULL); 203 BUG_ON(!host->claimed);
199 BUG_ON(retries < 0); 204 BUG_ON(retries < 0);
200 205
201 err = MMC_ERR_INVALID; 206 err = MMC_ERR_INVALID;
@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
289 else 294 else
290 limit_us = 100000; 295 limit_us = 100000;
291 296
292 if (timeout_us > limit_us) { 297 /*
298 * SDHC cards always use these fixed values.
299 */
300 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
293 data->timeout_ns = limit_us * 1000; 301 data->timeout_ns = limit_us * 1000;
294 data->timeout_clks = 0; 302 data->timeout_clks = 0;
295 } 303 }
@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
320 spin_lock_irqsave(&host->lock, flags); 328 spin_lock_irqsave(&host->lock, flags);
321 while (1) { 329 while (1) {
322 set_current_state(TASK_UNINTERRUPTIBLE); 330 set_current_state(TASK_UNINTERRUPTIBLE);
323 if (host->card_busy == NULL) 331 if (!host->claimed)
324 break; 332 break;
325 spin_unlock_irqrestore(&host->lock, flags); 333 spin_unlock_irqrestore(&host->lock, flags);
326 schedule(); 334 schedule();
327 spin_lock_irqsave(&host->lock, flags); 335 spin_lock_irqsave(&host->lock, flags);
328 } 336 }
329 set_current_state(TASK_RUNNING); 337 set_current_state(TASK_RUNNING);
330 host->card_busy = card; 338 host->claimed = 1;
331 spin_unlock_irqrestore(&host->lock, flags); 339 spin_unlock_irqrestore(&host->lock, flags);
332 remove_wait_queue(&host->wq, &wait); 340 remove_wait_queue(&host->wq, &wait);
333 341
@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host)
353{ 361{
354 unsigned long flags; 362 unsigned long flags;
355 363
356 BUG_ON(host->card_busy == NULL); 364 BUG_ON(!host->claimed);
357 365
358 spin_lock_irqsave(&host->lock, flags); 366 spin_lock_irqsave(&host->lock, flags);
359 host->card_busy = NULL; 367 host->claimed = 0;
360 spin_unlock_irqrestore(&host->lock, flags); 368 spin_unlock_irqrestore(&host->lock, flags);
361 369
362 wake_up(&host->wq); 370 wake_up(&host->wq);
@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
372 mmc_hostname(host), ios->clock, ios->bus_mode, 380 mmc_hostname(host), ios->clock, ios->bus_mode,
373 ios->power_mode, ios->chip_select, ios->vdd, 381 ios->power_mode, ios->chip_select, ios->vdd,
374 ios->bus_width); 382 ios->bus_width);
375 383
376 host->ops->set_ios(host, ios); 384 host->ops->set_ios(host, ios);
377} 385}
378 386
@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
381 int err; 389 int err;
382 struct mmc_command cmd; 390 struct mmc_command cmd;
383 391
384 BUG_ON(host->card_busy == NULL); 392 BUG_ON(!host->claimed);
385 393
386 if (host->card_selected == card) 394 if (host->card_selected == card)
387 return MMC_ERR_NONE; 395 return MMC_ERR_NONE;
@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card)
588 596
589 if (mmc_card_sd(card)) { 597 if (mmc_card_sd(card)) {
590 csd_struct = UNSTUFF_BITS(resp, 126, 2); 598 csd_struct = UNSTUFF_BITS(resp, 126, 2);
591 if (csd_struct != 0) { 599
600 switch (csd_struct) {
601 case 0:
602 m = UNSTUFF_BITS(resp, 115, 4);
603 e = UNSTUFF_BITS(resp, 112, 3);
604 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
605 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
606
607 m = UNSTUFF_BITS(resp, 99, 4);
608 e = UNSTUFF_BITS(resp, 96, 3);
609 csd->max_dtr = tran_exp[e] * tran_mant[m];
610 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
611
612 e = UNSTUFF_BITS(resp, 47, 3);
613 m = UNSTUFF_BITS(resp, 62, 12);
614 csd->capacity = (1 + m) << (e + 2);
615
616 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
617 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
618 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
619 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
620 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
621 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
622 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
623 break;
624 case 1:
625 /*
626 * This is a block-addressed SDHC card. Most
627 * interesting fields are unused and have fixed
628 * values. To avoid getting tripped by buggy cards,
629 * we assume those fixed values ourselves.
630 */
631 mmc_card_set_blockaddr(card);
632
633 csd->tacc_ns = 0; /* Unused */
634 csd->tacc_clks = 0; /* Unused */
635
636 m = UNSTUFF_BITS(resp, 99, 4);
637 e = UNSTUFF_BITS(resp, 96, 3);
638 csd->max_dtr = tran_exp[e] * tran_mant[m];
639 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
640
641 m = UNSTUFF_BITS(resp, 48, 22);
642 csd->capacity = (1 + m) << 10;
643
644 csd->read_blkbits = 9;
645 csd->read_partial = 0;
646 csd->write_misalign = 0;
647 csd->read_misalign = 0;
648 csd->r2w_factor = 4; /* Unused */
649 csd->write_blkbits = 9;
650 csd->write_partial = 0;
651 break;
652 default:
592 printk("%s: unrecognised CSD structure version %d\n", 653 printk("%s: unrecognised CSD structure version %d\n",
593 mmc_hostname(card->host), csd_struct); 654 mmc_hostname(card->host), csd_struct);
594 mmc_card_set_bad(card); 655 mmc_card_set_bad(card);
595 return; 656 return;
596 } 657 }
597
598 m = UNSTUFF_BITS(resp, 115, 4);
599 e = UNSTUFF_BITS(resp, 112, 3);
600 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
601 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
602
603 m = UNSTUFF_BITS(resp, 99, 4);
604 e = UNSTUFF_BITS(resp, 96, 3);
605 csd->max_dtr = tran_exp[e] * tran_mant[m];
606 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
607
608 e = UNSTUFF_BITS(resp, 47, 3);
609 m = UNSTUFF_BITS(resp, 62, 12);
610 csd->capacity = (1 + m) << (e + 2);
611
612 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
613 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
614 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
615 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
616 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
617 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
618 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
619 } else { 658 } else {
620 /* 659 /*
621 * We only understand CSD structure v1.1 and v1.2. 660 * We only understand CSD structure v1.1 and v1.2.
@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
848 return err; 887 return err;
849} 888}
850 889
890static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2)
891{
892 struct mmc_command cmd;
893 int err, sd2;
894 static const u8 test_pattern = 0xAA;
895
896 /*
897 * To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
898 * before SD_APP_OP_COND. This command will harmlessly fail for
899 * SD 1.0 cards.
900 */
901 cmd.opcode = SD_SEND_IF_COND;
902 cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
903 cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR;
904
905 err = mmc_wait_for_cmd(host, &cmd, 0);
906 if (err == MMC_ERR_NONE) {
907 if ((cmd.resp[0] & 0xFF) == test_pattern) {
908 sd2 = 1;
909 } else {
910 sd2 = 0;
911 err = MMC_ERR_FAILED;
912 }
913 } else {
914 /*
915 * Treat errors as SD 1.0 card.
916 */
917 sd2 = 0;
918 err = MMC_ERR_NONE;
919 }
920 if (rsd2)
921 *rsd2 = sd2;
922 return err;
923}
924
851/* 925/*
852 * Discover cards by requesting their CID. If this command 926 * Discover cards by requesting their CID. If this command
853 * times out, it is not an error; there are no further cards 927 * times out, it is not an error; there are no further cards
@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1018 mmc_wait_for_req(host, &mrq); 1092 mmc_wait_for_req(host, &mrq);
1019 1093
1020 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1094 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1021 mmc_card_set_dead(card); 1095 printk("%s: unable to read EXT_CSD, performance "
1096 "might suffer.\n", mmc_hostname(card->host));
1022 continue; 1097 continue;
1023 } 1098 }
1024 1099
@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host)
1034 printk("%s: card is mmc v4 but doesn't support " 1109 printk("%s: card is mmc v4 but doesn't support "
1035 "any high-speed modes.\n", 1110 "any high-speed modes.\n",
1036 mmc_hostname(card->host)); 1111 mmc_hostname(card->host));
1037 mmc_card_set_bad(card);
1038 continue; 1112 continue;
1039 } 1113 }
1040 1114
@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1215 mmc_wait_for_req(host, &mrq); 1289 mmc_wait_for_req(host, &mrq);
1216 1290
1217 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1291 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
1218 mmc_card_set_dead(card); 1292 printk("%s: unable to read switch capabilities, "
1293 "performance might suffer.\n",
1294 mmc_hostname(card->host));
1219 continue; 1295 continue;
1220 } 1296 }
1221 1297
@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host)
1247 1323
1248 mmc_wait_for_req(host, &mrq); 1324 mmc_wait_for_req(host, &mrq);
1249 1325
1250 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) { 1326 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE ||
1251 mmc_card_set_dead(card); 1327 (status[16] & 0xF) != 1) {
1252 continue;
1253 }
1254
1255 if ((status[16] & 0xF) != 1) {
1256 printk(KERN_WARNING "%s: Problem switching card " 1328 printk(KERN_WARNING "%s: Problem switching card "
1257 "into high-speed mode!\n", 1329 "into high-speed mode!\n",
1258 mmc_hostname(host)); 1330 mmc_hostname(host));
@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host)
1334 mmc_power_up(host); 1406 mmc_power_up(host);
1335 mmc_idle_cards(host); 1407 mmc_idle_cards(host);
1336 1408
1409 err = mmc_send_if_cond(host, host->ocr_avail, NULL);
1410 if (err != MMC_ERR_NONE) {
1411 return;
1412 }
1337 err = mmc_send_app_op_cond(host, 0, &ocr); 1413 err = mmc_send_app_op_cond(host, 0, &ocr);
1338 1414
1339 /* 1415 /*
@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host)
1386 * all get the idea that they should be ready for CMD2. 1462 * all get the idea that they should be ready for CMD2.
1387 * (My SanDisk card seems to need this.) 1463 * (My SanDisk card seems to need this.)
1388 */ 1464 */
1389 if (host->mode == MMC_MODE_SD) 1465 if (host->mode == MMC_MODE_SD) {
1390 mmc_send_app_op_cond(host, host->ocr, NULL); 1466 int err, sd2;
1391 else 1467 err = mmc_send_if_cond(host, host->ocr, &sd2);
1468 if (err == MMC_ERR_NONE) {
1469 /*
1470 * If SD_SEND_IF_COND indicates an SD 2.0
1471 * compliant card and we should set bit 30
1472 * of the ocr to indicate that we can handle
1473 * block-addressed SDHC cards.
1474 */
1475 mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL);
1476 }
1477 } else {
1392 mmc_send_op_cond(host, host->ocr, NULL); 1478 mmc_send_op_cond(host, host->ocr, NULL);
1479 }
1393 1480
1394 mmc_discover_cards(host); 1481 mmc_discover_cards(host);
1395 1482
@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
1519 */ 1606 */
1520 host->max_hw_segs = 1; 1607 host->max_hw_segs = 1;
1521 host->max_phys_segs = 1; 1608 host->max_phys_segs = 1;
1522 host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
1523 host->max_seg_size = PAGE_CACHE_SIZE; 1609 host->max_seg_size = PAGE_CACHE_SIZE;
1610
1611 host->max_req_size = PAGE_CACHE_SIZE;
1612 host->max_blk_size = 512;
1613 host->max_blk_count = PAGE_CACHE_SIZE / 512;
1524 } 1614 }
1525 1615
1526 return host; 1616 return host;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 87713572293f..05ba8ace70e7 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
237 brq.mrq.cmd = &brq.cmd; 237 brq.mrq.cmd = &brq.cmd;
238 brq.mrq.data = &brq.data; 238 brq.mrq.data = &brq.data;
239 239
240 brq.cmd.arg = req->sector << 9; 240 brq.cmd.arg = req->sector;
241 if (!mmc_card_blockaddr(card))
242 brq.cmd.arg <<= 9;
241 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 243 brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
242 brq.data.blksz = 1 << md->block_bits; 244 brq.data.blksz = 1 << md->block_bits;
243 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
244 brq.stop.opcode = MMC_STOP_TRANSMISSION; 245 brq.stop.opcode = MMC_STOP_TRANSMISSION;
245 brq.stop.arg = 0; 246 brq.stop.arg = 0;
246 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; 247 brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
248 brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
249 if (brq.data.blocks > card->host->max_blk_count)
250 brq.data.blocks = card->host->max_blk_count;
247 251
248 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); 252 mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
249 253
@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
375 spin_unlock_irq(&md->lock); 379 spin_unlock_irq(&md->lock);
376 } 380 }
377 381
382flush_queue:
383
378 mmc_card_release_host(card); 384 mmc_card_release_host(card);
379 385
380flush_queue:
381 spin_lock_irq(&md->lock); 386 spin_lock_irq(&md->lock);
382 while (ret) { 387 while (ret) {
383 ret = end_that_request_chunk(req, 0, 388 ret = end_that_request_chunk(req, 0,
@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
494 struct mmc_command cmd; 499 struct mmc_command cmd;
495 int err; 500 int err;
496 501
502 /* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
503 if (mmc_card_blockaddr(card))
504 return 0;
505
497 mmc_card_claim_host(card); 506 mmc_card_claim_host(card);
498 cmd.opcode = MMC_SET_BLOCKLEN; 507 cmd.opcode = MMC_SET_BLOCKLEN;
499 cmd.arg = 1 << md->block_bits; 508 cmd.arg = 1 << md->block_bits;
diff --git a/drivers/mmc/mmc_queue.c b/drivers/mmc/mmc_queue.c
index 3e35a43819fb..c27e42645cdb 100644
--- a/drivers/mmc/mmc_queue.c
+++ b/drivers/mmc/mmc_queue.c
@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
147 147
148 blk_queue_prep_rq(mq->queue, mmc_prep_request); 148 blk_queue_prep_rq(mq->queue, mmc_prep_request);
149 blk_queue_bounce_limit(mq->queue, limit); 149 blk_queue_bounce_limit(mq->queue, limit);
150 blk_queue_max_sectors(mq->queue, host->max_sectors); 150 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); 151 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); 152 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
153 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 153 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index e334acd045bc..d32698b02d7f 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
199 memset(card, 0, sizeof(struct mmc_card)); 199 memset(card, 0, sizeof(struct mmc_card));
200 card->host = host; 200 card->host = host;
201 device_initialize(&card->dev); 201 device_initialize(&card->dev);
202 card->dev.parent = mmc_dev(host); 202 card->dev.parent = mmc_classdev(host);
203 card->dev.bus = &mmc_bus_type; 203 card->dev.bus = &mmc_bus_type;
204 card->dev.release = mmc_release_card; 204 card->dev.release = mmc_release_card;
205} 205}
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index ccfe6561be24..5941dd951e82 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -524,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id)
524 /* 524 /*
525 * Since we only have a 16-bit data length register, we must 525 * Since we only have a 16-bit data length register, we must
526 * ensure that we don't exceed 2^16-1 bytes in a single request. 526 * ensure that we don't exceed 2^16-1 bytes in a single request.
527 * Choose 64 (512-byte) sectors as the limit.
528 */ 527 */
529 mmc->max_sectors = 64; 528 mmc->max_req_size = 65535;
530 529
531 /* 530 /*
532 * Set the maximum segment size. Since we aren't doing DMA 531 * Set the maximum segment size. Since we aren't doing DMA
533 * (yet) we are only limited by the data length register. 532 * (yet) we are only limited by the data length register.
534 */ 533 */
535 mmc->max_seg_size = mmc->max_sectors << 9; 534 mmc->max_seg_size = mmc->max_req_size;
535
536 /*
537 * Block size can be up to 2048 bytes, but must be a power of two.
538 */
539 mmc->max_blk_size = 2048;
540
541 /*
542 * No limit on the number of blocks transferred.
543 */
544 mmc->max_blk_count = mmc->max_req_size;
536 545
537 spin_lock_init(&host->lock); 546 spin_lock_init(&host->lock);
538 547
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index d30540b27614..1e96a2f65022 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -1099,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1099 */ 1099 */
1100 mmc->max_phys_segs = 32; 1100 mmc->max_phys_segs = 32;
1101 mmc->max_hw_segs = 32; 1101 mmc->max_hw_segs = 32;
1102 mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ 1102 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
1103 mmc->max_seg_size = mmc->max_sectors * 512; 1103 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
1104 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1105 mmc->max_seg_size = mmc->max_req_size;
1104 1106
1105 if (host->power_pin >= 0) { 1107 if (host->power_pin >= 0) {
1106 if ((ret = omap_request_gpio(host->power_pin)) != 0) { 1108 if ((ret = omap_request_gpio(host->power_pin)) != 0) {
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index 6073d998b11f..9774fc68b61a 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev)
450 */ 450 */
451 mmc->max_seg_size = PAGE_SIZE; 451 mmc->max_seg_size = PAGE_SIZE;
452 452
453 /*
454 * Block length register is 10 bits.
455 */
456 mmc->max_blk_size = 1023;
457
458 /*
459 * Block count register is 16 bits.
460 */
461 mmc->max_blk_count = 65535;
462
453 host = mmc_priv(mmc); 463 host = mmc_priv(mmc);
454 host->mmc = mmc; 464 host->mmc = mmc;
455 host->dma = -1; 465 host->dma = -1;
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index c2d13d7e9911..4bf1fea5e2c4 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0;
37#define SDHCI_QUIRK_FORCE_DMA (1<<1) 37#define SDHCI_QUIRK_FORCE_DMA (1<<1)
38/* Controller doesn't like some resets when there is no card inserted. */ 38/* Controller doesn't like some resets when there is no card inserted. */
39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2) 39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
40#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
40 41
41static const struct pci_device_id pci_ids[] __devinitdata = { 42static const struct pci_device_id pci_ids[] __devinitdata = {
42 { 43 {
@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
65 .driver_data = SDHCI_QUIRK_FORCE_DMA, 66 .driver_data = SDHCI_QUIRK_FORCE_DMA,
66 }, 67 },
67 68
69 {
70 .vendor = PCI_VENDOR_ID_ENE,
71 .device = PCI_DEVICE_ID_ENE_CB712_SD,
72 .subvendor = PCI_ANY_ID,
73 .subdevice = PCI_ANY_ID,
74 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
75 },
76
68 { /* Generic SD host controller */ 77 { /* Generic SD host controller */
69 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 78 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
70 }, 79 },
@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
197 * * 206 * *
198\*****************************************************************************/ 207\*****************************************************************************/
199 208
200static inline char* sdhci_kmap_sg(struct sdhci_host* host) 209static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
201{ 210{
202 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ); 211 return page_address(host->cur_sg->page) + host->cur_sg->offset;
203 return host->mapped_sg + host->cur_sg->offset;
204}
205
206static inline void sdhci_kunmap_sg(struct sdhci_host* host)
207{
208 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
209} 212}
210 213
211static inline int sdhci_next_sg(struct sdhci_host* host) 214static inline int sdhci_next_sg(struct sdhci_host* host)
@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
240 chunk_remain = 0; 243 chunk_remain = 0;
241 data = 0; 244 data = 0;
242 245
243 buffer = sdhci_kmap_sg(host) + host->offset; 246 buffer = sdhci_sg_to_buffer(host) + host->offset;
244 247
245 while (blksize) { 248 while (blksize) {
246 if (chunk_remain == 0) { 249 if (chunk_remain == 0) {
@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
264 } 267 }
265 268
266 if (host->remain == 0) { 269 if (host->remain == 0) {
267 sdhci_kunmap_sg(host);
268 if (sdhci_next_sg(host) == 0) { 270 if (sdhci_next_sg(host) == 0) {
269 BUG_ON(blksize != 0); 271 BUG_ON(blksize != 0);
270 return; 272 return;
271 } 273 }
272 buffer = sdhci_kmap_sg(host); 274 buffer = sdhci_sg_to_buffer(host);
273 } 275 }
274 } 276 }
275
276 sdhci_kunmap_sg(host);
277} 277}
278 278
279static void sdhci_write_block_pio(struct sdhci_host *host) 279static void sdhci_write_block_pio(struct sdhci_host *host)
@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
290 data = 0; 290 data = 0;
291 291
292 bytes = 0; 292 bytes = 0;
293 buffer = sdhci_kmap_sg(host) + host->offset; 293 buffer = sdhci_sg_to_buffer(host) + host->offset;
294 294
295 while (blksize) { 295 while (blksize) {
296 size = min(host->size, host->remain); 296 size = min(host->size, host->remain);
@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
314 } 314 }
315 315
316 if (host->remain == 0) { 316 if (host->remain == 0) {
317 sdhci_kunmap_sg(host);
318 if (sdhci_next_sg(host) == 0) { 317 if (sdhci_next_sg(host) == 0) {
319 BUG_ON(blksize != 0); 318 BUG_ON(blksize != 0);
320 return; 319 return;
321 } 320 }
322 buffer = sdhci_kmap_sg(host); 321 buffer = sdhci_sg_to_buffer(host);
323 } 322 }
324 } 323 }
325
326 sdhci_kunmap_sg(host);
327} 324}
328 325
329static void sdhci_transfer_pio(struct sdhci_host *host) 326static void sdhci_transfer_pio(struct sdhci_host *host)
@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
372 369
373 /* Sanity checks */ 370 /* Sanity checks */
374 BUG_ON(data->blksz * data->blocks > 524288); 371 BUG_ON(data->blksz * data->blocks > 524288);
375 BUG_ON(data->blksz > host->max_block); 372 BUG_ON(data->blksz > host->mmc->max_blk_size);
376 BUG_ON(data->blocks > 65535); 373 BUG_ON(data->blocks > 65535);
377 374
378 /* timeout in us */ 375 /* timeout in us */
@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
674 if (host->power == power) 671 if (host->power == power)
675 return; 672 return;
676 673
677 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL); 674 if (power == (unsigned short)-1) {
678 675 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
679 if (power == (unsigned short)-1)
680 goto out; 676 goto out;
677 }
678
679 /*
680 * Spec says that we should clear the power reg before setting
681 * a new value. Some controllers don't seem to like this though.
682 */
683 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
684 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
681 685
682 pwr = SDHCI_POWER_ON; 686 pwr = SDHCI_POWER_ON;
683 687
@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev)
1109 1113
1110 pci_set_power_state(pdev, PCI_D0); 1114 pci_set_power_state(pdev, PCI_D0);
1111 pci_restore_state(pdev); 1115 pci_restore_state(pdev);
1112 pci_enable_device(pdev); 1116 ret = pci_enable_device(pdev);
1117 if (ret)
1118 return ret;
1113 1119
1114 for (i = 0;i < chip->num_slots;i++) { 1120 for (i = 0;i < chip->num_slots;i++) {
1115 if (!chip->hosts[i]) 1121 if (!chip->hosts[i])
@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1274 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 1280 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1275 host->timeout_clk *= 1000; 1281 host->timeout_clk *= 1000;
1276 1282
1277 host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1278 if (host->max_block >= 3) {
1279 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1280 host->slot_descr);
1281 ret = -ENODEV;
1282 goto unmap;
1283 }
1284 host->max_block = 512 << host->max_block;
1285
1286 /* 1283 /*
1287 * Set host parameters. 1284 * Set host parameters.
1288 */ 1285 */
@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1294 mmc->ocr_avail = 0; 1291 mmc->ocr_avail = 0;
1295 if (caps & SDHCI_CAN_VDD_330) 1292 if (caps & SDHCI_CAN_VDD_330)
1296 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1293 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1297 else if (caps & SDHCI_CAN_VDD_300) 1294 if (caps & SDHCI_CAN_VDD_300)
1298 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1295 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1299 else if (caps & SDHCI_CAN_VDD_180) 1296 if (caps & SDHCI_CAN_VDD_180)
1300 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19; 1297 mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
1301 1298
1302 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) { 1299 if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1326 1323
1327 /* 1324 /*
1328 * Maximum number of sectors in one transfer. Limited by DMA boundary 1325 * Maximum number of sectors in one transfer. Limited by DMA boundary
1329 * size (512KiB), which means (512 KiB/512=) 1024 entries. 1326 * size (512KiB).
1330 */ 1327 */
1331 mmc->max_sectors = 1024; 1328 mmc->max_req_size = 524288;
1332 1329
1333 /* 1330 /*
1334 * Maximum segment size. Could be one segment with the maximum number 1331 * Maximum segment size. Could be one segment with the maximum number
1335 * of sectors. 1332 * of bytes.
1333 */
1334 mmc->max_seg_size = mmc->max_req_size;
1335
1336 /*
1337 * Maximum block size. This varies from controller to controller and
1338 * is specified in the capabilities register.
1339 */
1340 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1341 if (mmc->max_blk_size >= 3) {
1342 printk(KERN_ERR "%s: Invalid maximum block size.\n",
1343 host->slot_descr);
1344 ret = -ENODEV;
1345 goto unmap;
1346 }
1347 mmc->max_blk_size = 512 << mmc->max_blk_size;
1348
1349 /*
1350 * Maximum block count.
1336 */ 1351 */
1337 mmc->max_seg_size = mmc->max_sectors * 512; 1352 mmc->max_blk_count = 65535;
1338 1353
1339 /* 1354 /*
1340 * Init tasklets. 1355 * Init tasklets.
diff --git a/drivers/mmc/sdhci.h b/drivers/mmc/sdhci.h
index f9d1a0a6f03a..e324f0a623dc 100644
--- a/drivers/mmc/sdhci.h
+++ b/drivers/mmc/sdhci.h
@@ -174,7 +174,6 @@ struct sdhci_host {
174 174
175 unsigned int max_clk; /* Max possible freq (MHz) */ 175 unsigned int max_clk; /* Max possible freq (MHz) */
176 unsigned int timeout_clk; /* Timeout freq (KHz) */ 176 unsigned int timeout_clk; /* Timeout freq (KHz) */
177 unsigned int max_block; /* Max block size (bytes) */
178 177
179 unsigned int clock; /* Current clock (MHz) */ 178 unsigned int clock; /* Current clock (MHz) */
180 unsigned short power; /* Current voltage */ 179 unsigned short power; /* Current voltage */
@@ -184,7 +183,6 @@ struct sdhci_host {
184 struct mmc_data *data; /* Current data request */ 183 struct mmc_data *data; /* Current data request */
185 184
186 struct scatterlist *cur_sg; /* We're working on this */ 185 struct scatterlist *cur_sg; /* We're working on this */
187 char *mapped_sg; /* This is where it's mapped */
188 int num_sg; /* Entries left */ 186 int num_sg; /* Entries left */
189 int offset; /* Offset into current sg */ 187 int offset; /* Offset into current sg */
190 int remain; /* Bytes left in current */ 188 int remain; /* Bytes left in current */
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index fa4a52886b97..e65f8a0a9349 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -17,7 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19#define DRIVER_NAME "tifm_sd" 19#define DRIVER_NAME "tifm_sd"
20#define DRIVER_VERSION "0.6" 20#define DRIVER_VERSION "0.7"
21 21
22static int no_dma = 0; 22static int no_dma = 0;
23static int fixed_timeout = 0; 23static int fixed_timeout = 0;
@@ -79,7 +79,6 @@ typedef enum {
79 79
80enum { 80enum {
81 FIFO_RDY = 0x0001, /* hardware dependent value */ 81 FIFO_RDY = 0x0001, /* hardware dependent value */
82 HOST_REG = 0x0002,
83 EJECT = 0x0004, 82 EJECT = 0x0004,
84 EJECT_DONE = 0x0008, 83 EJECT_DONE = 0x0008,
85 CARD_BUSY = 0x0010, 84 CARD_BUSY = 0x0010,
@@ -95,46 +94,53 @@ struct tifm_sd {
95 card_state_t state; 94 card_state_t state;
96 unsigned int clk_freq; 95 unsigned int clk_freq;
97 unsigned int clk_div; 96 unsigned int clk_div;
98 unsigned long timeout_jiffies; // software timeout - 2 sec 97 unsigned long timeout_jiffies;
99 98
99 struct tasklet_struct finish_tasklet;
100 struct timer_list timer;
100 struct mmc_request *req; 101 struct mmc_request *req;
101 struct work_struct cmd_handler; 102 wait_queue_head_t notify;
102 struct delayed_work abort_handler;
103 wait_queue_head_t can_eject;
104 103
105 size_t written_blocks; 104 size_t written_blocks;
106 char *buffer;
107 size_t buffer_size; 105 size_t buffer_size;
108 size_t buffer_pos; 106 size_t buffer_pos;
109 107
110}; 108};
111 109
110static char* tifm_sd_data_buffer(struct mmc_data *data)
111{
112 return page_address(data->sg->page) + data->sg->offset;
113}
114
112static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host, 115static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
113 unsigned int host_status) 116 unsigned int host_status)
114{ 117{
115 struct mmc_command *cmd = host->req->cmd; 118 struct mmc_command *cmd = host->req->cmd;
116 unsigned int t_val = 0, cnt = 0; 119 unsigned int t_val = 0, cnt = 0;
120 char *buffer;
117 121
118 if (host_status & TIFM_MMCSD_BRS) { 122 if (host_status & TIFM_MMCSD_BRS) {
119 /* in non-dma rx mode BRS fires when fifo is still not empty */ 123 /* in non-dma rx mode BRS fires when fifo is still not empty */
120 if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) { 124 if (no_dma && (cmd->data->flags & MMC_DATA_READ)) {
125 buffer = tifm_sd_data_buffer(host->req->data);
121 while (host->buffer_size > host->buffer_pos) { 126 while (host->buffer_size > host->buffer_pos) {
122 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 127 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
123 host->buffer[host->buffer_pos++] = t_val & 0xff; 128 buffer[host->buffer_pos++] = t_val & 0xff;
124 host->buffer[host->buffer_pos++] = 129 buffer[host->buffer_pos++] =
125 (t_val >> 8) & 0xff; 130 (t_val >> 8) & 0xff;
126 } 131 }
127 } 132 }
128 return 1; 133 return 1;
129 } else if (host->buffer) { 134 } else if (no_dma) {
135 buffer = tifm_sd_data_buffer(host->req->data);
130 if ((cmd->data->flags & MMC_DATA_READ) && 136 if ((cmd->data->flags & MMC_DATA_READ) &&
131 (host_status & TIFM_MMCSD_AF)) { 137 (host_status & TIFM_MMCSD_AF)) {
132 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 138 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
133 t_val = readl(sock->addr + SOCK_MMCSD_DATA); 139 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
134 if (host->buffer_size > host->buffer_pos) { 140 if (host->buffer_size > host->buffer_pos) {
135 host->buffer[host->buffer_pos++] = 141 buffer[host->buffer_pos++] =
136 t_val & 0xff; 142 t_val & 0xff;
137 host->buffer[host->buffer_pos++] = 143 buffer[host->buffer_pos++] =
138 (t_val >> 8) & 0xff; 144 (t_val >> 8) & 0xff;
139 } 145 }
140 } 146 }
@@ -142,11 +148,12 @@ static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
142 && (host_status & TIFM_MMCSD_AE)) { 148 && (host_status & TIFM_MMCSD_AE)) {
143 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) { 149 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
144 if (host->buffer_size > host->buffer_pos) { 150 if (host->buffer_size > host->buffer_pos) {
145 t_val = host->buffer[host->buffer_pos++] & 0x00ff; 151 t_val = buffer[host->buffer_pos++]
146 t_val |= ((host->buffer[host->buffer_pos++]) << 8) 152 & 0x00ff;
147 & 0xff00; 153 t_val |= ((buffer[host->buffer_pos++])
154 << 8) & 0xff00;
148 writel(t_val, 155 writel(t_val,
149 sock->addr + SOCK_MMCSD_DATA); 156 sock->addr + SOCK_MMCSD_DATA);
150 } 157 }
151 } 158 }
152 } 159 }
@@ -206,7 +213,7 @@ static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
206 cmd_mask |= TIFM_MMCSD_READ; 213 cmd_mask |= TIFM_MMCSD_READ;
207 214
208 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", 215 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
209 cmd->opcode, cmd->arg, cmd_mask); 216 cmd->opcode, cmd->arg, cmd_mask);
210 217
211 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); 218 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
212 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); 219 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
@@ -239,65 +246,78 @@ change_state:
239 tifm_sd_fetch_resp(cmd, sock); 246 tifm_sd_fetch_resp(cmd, sock);
240 if (cmd->data) { 247 if (cmd->data) {
241 host->state = BRS; 248 host->state = BRS;
242 } else 249 } else {
243 host->state = READY; 250 host->state = READY;
251 }
244 goto change_state; 252 goto change_state;
245 } 253 }
246 break; 254 break;
247 case BRS: 255 case BRS:
248 if (tifm_sd_transfer_data(sock, host, host_status)) { 256 if (tifm_sd_transfer_data(sock, host, host_status)) {
249 if (!host->req->stop) { 257 if (cmd->data->flags & MMC_DATA_WRITE) {
250 if (cmd->data->flags & MMC_DATA_WRITE) { 258 host->state = CARD;
251 host->state = CARD; 259 } else {
260 if (no_dma) {
261 if (host->req->stop) {
262 tifm_sd_exec(host, host->req->stop);
263 host->state = SCMD;
264 } else {
265 host->state = READY;
266 }
252 } else { 267 } else {
253 host->state = 268 host->state = FIFO;
254 host->buffer ? READY : FIFO;
255 } 269 }
256 goto change_state;
257 } 270 }
258 tifm_sd_exec(host, host->req->stop); 271 goto change_state;
259 host->state = SCMD;
260 } 272 }
261 break; 273 break;
262 case SCMD: 274 case SCMD:
263 if (host_status & TIFM_MMCSD_EOC) { 275 if (host_status & TIFM_MMCSD_EOC) {
264 tifm_sd_fetch_resp(host->req->stop, sock); 276 tifm_sd_fetch_resp(host->req->stop, sock);
265 if (cmd->error) { 277 host->state = READY;
266 host->state = READY;
267 } else if (cmd->data->flags & MMC_DATA_WRITE) {
268 host->state = CARD;
269 } else {
270 host->state = host->buffer ? READY : FIFO;
271 }
272 goto change_state; 278 goto change_state;
273 } 279 }
274 break; 280 break;
275 case CARD: 281 case CARD:
282 dev_dbg(&sock->dev, "waiting for CARD, have %zd blocks\n",
283 host->written_blocks);
276 if (!(host->flags & CARD_BUSY) 284 if (!(host->flags & CARD_BUSY)
277 && (host->written_blocks == cmd->data->blocks)) { 285 && (host->written_blocks == cmd->data->blocks)) {
278 host->state = host->buffer ? READY : FIFO; 286 if (no_dma) {
287 if (host->req->stop) {
288 tifm_sd_exec(host, host->req->stop);
289 host->state = SCMD;
290 } else {
291 host->state = READY;
292 }
293 } else {
294 host->state = FIFO;
295 }
279 goto change_state; 296 goto change_state;
280 } 297 }
281 break; 298 break;
282 case FIFO: 299 case FIFO:
283 if (host->flags & FIFO_RDY) { 300 if (host->flags & FIFO_RDY) {
284 host->state = READY;
285 host->flags &= ~FIFO_RDY; 301 host->flags &= ~FIFO_RDY;
302 if (host->req->stop) {
303 tifm_sd_exec(host, host->req->stop);
304 host->state = SCMD;
305 } else {
306 host->state = READY;
307 }
286 goto change_state; 308 goto change_state;
287 } 309 }
288 break; 310 break;
289 case READY: 311 case READY:
290 queue_work(sock->wq, &host->cmd_handler); 312 tasklet_schedule(&host->finish_tasklet);
291 return; 313 return;
292 } 314 }
293 315
294 queue_delayed_work(sock->wq, &host->abort_handler,
295 host->timeout_jiffies);
296} 316}
297 317
298/* Called from interrupt handler */ 318/* Called from interrupt handler */
299static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock, 319static void tifm_sd_signal_irq(struct tifm_dev *sock,
300 unsigned int sock_irq_status) 320 unsigned int sock_irq_status)
301{ 321{
302 struct tifm_sd *host; 322 struct tifm_sd *host;
303 unsigned int host_status = 0, fifo_status = 0; 323 unsigned int host_status = 0, fifo_status = 0;
@@ -305,7 +325,6 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
305 325
306 spin_lock(&sock->lock); 326 spin_lock(&sock->lock);
307 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); 327 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
308 cancel_delayed_work(&host->abort_handler);
309 328
310 if (sock_irq_status & FIFO_EVENT) { 329 if (sock_irq_status & FIFO_EVENT) {
311 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); 330 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
@@ -318,19 +337,17 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
318 host_status = readl(sock->addr + SOCK_MMCSD_STATUS); 337 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
319 writel(host_status, sock->addr + SOCK_MMCSD_STATUS); 338 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
320 339
321 if (!(host->flags & HOST_REG))
322 queue_work(sock->wq, &host->cmd_handler);
323 if (!host->req) 340 if (!host->req)
324 goto done; 341 goto done;
325 342
326 if (host_status & TIFM_MMCSD_ERRMASK) { 343 if (host_status & TIFM_MMCSD_ERRMASK) {
327 if (host_status & TIFM_MMCSD_CERR) 344 if (host_status & TIFM_MMCSD_CERR)
328 error_code = MMC_ERR_FAILED; 345 error_code = MMC_ERR_FAILED;
329 else if (host_status & 346 else if (host_status
330 (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO)) 347 & (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO))
331 error_code = MMC_ERR_TIMEOUT; 348 error_code = MMC_ERR_TIMEOUT;
332 else if (host_status & 349 else if (host_status
333 (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC)) 350 & (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC))
334 error_code = MMC_ERR_BADCRC; 351 error_code = MMC_ERR_BADCRC;
335 352
336 writel(TIFM_FIFO_INT_SETALL, 353 writel(TIFM_FIFO_INT_SETALL,
@@ -340,12 +357,11 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
340 if (host->req->stop) { 357 if (host->req->stop) {
341 if (host->state == SCMD) { 358 if (host->state == SCMD) {
342 host->req->stop->error = error_code; 359 host->req->stop->error = error_code;
343 } else if(host->state == BRS) { 360 } else if (host->state == BRS
361 || host->state == CARD
362 || host->state == FIFO) {
344 host->req->cmd->error = error_code; 363 host->req->cmd->error = error_code;
345 tifm_sd_exec(host, host->req->stop); 364 tifm_sd_exec(host, host->req->stop);
346 queue_delayed_work(sock->wq,
347 &host->abort_handler,
348 host->timeout_jiffies);
349 host->state = SCMD; 365 host->state = SCMD;
350 goto done; 366 goto done;
351 } else { 367 } else {
@@ -359,8 +375,8 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
359 375
360 if (host_status & TIFM_MMCSD_CB) 376 if (host_status & TIFM_MMCSD_CB)
361 host->flags |= CARD_BUSY; 377 host->flags |= CARD_BUSY;
362 if ((host_status & TIFM_MMCSD_EOFB) && 378 if ((host_status & TIFM_MMCSD_EOFB)
363 (host->flags & CARD_BUSY)) { 379 && (host->flags & CARD_BUSY)) {
364 host->written_blocks++; 380 host->written_blocks++;
365 host->flags &= ~CARD_BUSY; 381 host->flags &= ~CARD_BUSY;
366 } 382 }
@@ -370,22 +386,22 @@ static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
370 tifm_sd_process_cmd(sock, host, host_status); 386 tifm_sd_process_cmd(sock, host, host_status);
371done: 387done:
372 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n", 388 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n",
373 host_status, fifo_status); 389 host_status, fifo_status);
374 spin_unlock(&sock->lock); 390 spin_unlock(&sock->lock);
375 return sock_irq_status;
376} 391}
377 392
378static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd) 393static void tifm_sd_prepare_data(struct tifm_sd *host, struct mmc_command *cmd)
379{ 394{
380 struct tifm_dev *sock = card->dev; 395 struct tifm_dev *sock = host->dev;
381 unsigned int dest_cnt; 396 unsigned int dest_cnt;
382 397
383 /* DMA style IO */ 398 /* DMA style IO */
384 399 dev_dbg(&sock->dev, "setting dma for %d blocks\n",
400 cmd->data->blocks);
385 writel(TIFM_FIFO_INT_SETALL, 401 writel(TIFM_FIFO_INT_SETALL,
386 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 402 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
387 writel(ilog2(cmd->data->blksz) - 2, 403 writel(ilog2(cmd->data->blksz) - 2,
388 sock->addr + SOCK_FIFO_PAGE_SIZE); 404 sock->addr + SOCK_FIFO_PAGE_SIZE);
389 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL); 405 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL);
390 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); 406 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
391 407
@@ -399,7 +415,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
399 if (cmd->data->flags & MMC_DATA_WRITE) { 415 if (cmd->data->flags & MMC_DATA_WRITE) {
400 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 416 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
401 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN, 417 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN,
402 sock->addr + SOCK_DMA_CONTROL); 418 sock->addr + SOCK_DMA_CONTROL);
403 } else { 419 } else {
404 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 420 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
405 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL); 421 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL);
@@ -407,7 +423,7 @@ static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
407} 423}
408 424
409static void tifm_sd_set_data_timeout(struct tifm_sd *host, 425static void tifm_sd_set_data_timeout(struct tifm_sd *host,
410 struct mmc_data *data) 426 struct mmc_data *data)
411{ 427{
412 struct tifm_dev *sock = host->dev; 428 struct tifm_dev *sock = host->dev;
413 unsigned int data_timeout = data->timeout_clks; 429 unsigned int data_timeout = data->timeout_clks;
@@ -416,22 +432,21 @@ static void tifm_sd_set_data_timeout(struct tifm_sd *host,
416 return; 432 return;
417 433
418 data_timeout += data->timeout_ns / 434 data_timeout += data->timeout_ns /
419 ((1000000000 / host->clk_freq) * host->clk_div); 435 ((1000000000UL / host->clk_freq) * host->clk_div);
420 data_timeout *= 10; // call it fudge factor for now
421 436
422 if (data_timeout < 0xffff) { 437 if (data_timeout < 0xffff) {
423 writel((~TIFM_MMCSD_DPE) &
424 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
425 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
426 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 438 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
439 writel((~TIFM_MMCSD_DPE)
440 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
441 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
427 } else { 442 } else {
428 writel(TIFM_MMCSD_DPE |
429 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
430 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
431 data_timeout = (data_timeout >> 10) + 1; 443 data_timeout = (data_timeout >> 10) + 1;
432 if(data_timeout > 0xffff) 444 if (data_timeout > 0xffff)
433 data_timeout = 0; /* set to unlimited */ 445 data_timeout = 0; /* set to unlimited */
434 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 446 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
447 writel(TIFM_MMCSD_DPE
448 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
449 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
435 } 450 }
436} 451}
437 452
@@ -474,11 +489,10 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
474 } 489 }
475 490
476 host->req = mrq; 491 host->req = mrq;
492 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
477 host->state = CMD; 493 host->state = CMD;
478 queue_delayed_work(sock->wq, &host->abort_handler,
479 host->timeout_jiffies);
480 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 494 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
481 sock->addr + SOCK_CONTROL); 495 sock->addr + SOCK_CONTROL);
482 tifm_sd_exec(host, mrq->cmd); 496 tifm_sd_exec(host, mrq->cmd);
483 spin_unlock_irqrestore(&sock->lock, flags); 497 spin_unlock_irqrestore(&sock->lock, flags);
484 return; 498 return;
@@ -493,9 +507,9 @@ err_out:
493 mmc_request_done(mmc, mrq); 507 mmc_request_done(mmc, mrq);
494} 508}
495 509
496static void tifm_sd_end_cmd(struct work_struct *work) 510static void tifm_sd_end_cmd(unsigned long data)
497{ 511{
498 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 512 struct tifm_sd *host = (struct tifm_sd*)data;
499 struct tifm_dev *sock = host->dev; 513 struct tifm_dev *sock = host->dev;
500 struct mmc_host *mmc = tifm_get_drvdata(sock); 514 struct mmc_host *mmc = tifm_get_drvdata(sock);
501 struct mmc_request *mrq; 515 struct mmc_request *mrq;
@@ -504,6 +518,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
504 518
505 spin_lock_irqsave(&sock->lock, flags); 519 spin_lock_irqsave(&sock->lock, flags);
506 520
521 del_timer(&host->timer);
507 mrq = host->req; 522 mrq = host->req;
508 host->req = NULL; 523 host->req = NULL;
509 host->state = IDLE; 524 host->state = IDLE;
@@ -517,8 +532,8 @@ static void tifm_sd_end_cmd(struct work_struct *work)
517 r_data = mrq->cmd->data; 532 r_data = mrq->cmd->data;
518 if (r_data) { 533 if (r_data) {
519 if (r_data->flags & MMC_DATA_WRITE) { 534 if (r_data->flags & MMC_DATA_WRITE) {
520 r_data->bytes_xfered = host->written_blocks * 535 r_data->bytes_xfered = host->written_blocks
521 r_data->blksz; 536 * r_data->blksz;
522 } else { 537 } else {
523 r_data->bytes_xfered = r_data->blocks - 538 r_data->bytes_xfered = r_data->blocks -
524 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 539 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -532,7 +547,7 @@ static void tifm_sd_end_cmd(struct work_struct *work)
532 } 547 }
533 548
534 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 549 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
535 sock->addr + SOCK_CONTROL); 550 sock->addr + SOCK_CONTROL);
536 551
537 spin_unlock_irqrestore(&sock->lock, flags); 552 spin_unlock_irqrestore(&sock->lock, flags);
538 mmc_request_done(mmc, mrq); 553 mmc_request_done(mmc, mrq);
@@ -544,15 +559,6 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
544 struct tifm_dev *sock = host->dev; 559 struct tifm_dev *sock = host->dev;
545 unsigned long flags; 560 unsigned long flags;
546 struct mmc_data *r_data = mrq->cmd->data; 561 struct mmc_data *r_data = mrq->cmd->data;
547 char *t_buffer = NULL;
548
549 if (r_data) {
550 t_buffer = kmap(r_data->sg->page);
551 if (!t_buffer) {
552 printk(KERN_ERR DRIVER_NAME ": kmap failed\n");
553 goto err_out;
554 }
555 }
556 562
557 spin_lock_irqsave(&sock->lock, flags); 563 spin_lock_irqsave(&sock->lock, flags);
558 if (host->flags & EJECT) { 564 if (host->flags & EJECT) {
@@ -569,15 +575,14 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
569 if (r_data) { 575 if (r_data) {
570 tifm_sd_set_data_timeout(host, r_data); 576 tifm_sd_set_data_timeout(host, r_data);
571 577
572 host->buffer = t_buffer + r_data->sg->offset; 578 host->buffer_size = mrq->cmd->data->blocks
573 host->buffer_size = mrq->cmd->data->blocks * 579 * mrq->cmd->data->blksz;
574 mrq->cmd->data->blksz;
575 580
576 writel(TIFM_MMCSD_BUFINT | 581 writel(TIFM_MMCSD_BUFINT
577 readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 582 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
578 sock->addr + SOCK_MMCSD_INT_ENABLE); 583 sock->addr + SOCK_MMCSD_INT_ENABLE);
579 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) | 584 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8)
580 (TIFM_MMCSD_FIFO_SIZE - 1), 585 | (TIFM_MMCSD_FIFO_SIZE - 1),
581 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 586 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
582 587
583 host->written_blocks = 0; 588 host->written_blocks = 0;
@@ -588,26 +593,22 @@ static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
588 } 593 }
589 594
590 host->req = mrq; 595 host->req = mrq;
596 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
591 host->state = CMD; 597 host->state = CMD;
592 queue_delayed_work(sock->wq, &host->abort_handler,
593 host->timeout_jiffies);
594 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 598 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
595 sock->addr + SOCK_CONTROL); 599 sock->addr + SOCK_CONTROL);
596 tifm_sd_exec(host, mrq->cmd); 600 tifm_sd_exec(host, mrq->cmd);
597 spin_unlock_irqrestore(&sock->lock, flags); 601 spin_unlock_irqrestore(&sock->lock, flags);
598 return; 602 return;
599 603
600err_out: 604err_out:
601 if (t_buffer)
602 kunmap(r_data->sg->page);
603
604 mrq->cmd->error = MMC_ERR_TIMEOUT; 605 mrq->cmd->error = MMC_ERR_TIMEOUT;
605 mmc_request_done(mmc, mrq); 606 mmc_request_done(mmc, mrq);
606} 607}
607 608
608static void tifm_sd_end_cmd_nodma(struct work_struct *work) 609static void tifm_sd_end_cmd_nodma(unsigned long data)
609{ 610{
610 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 611 struct tifm_sd *host = (struct tifm_sd*)data;
611 struct tifm_dev *sock = host->dev; 612 struct tifm_dev *sock = host->dev;
612 struct mmc_host *mmc = tifm_get_drvdata(sock); 613 struct mmc_host *mmc = tifm_get_drvdata(sock);
613 struct mmc_request *mrq; 614 struct mmc_request *mrq;
@@ -616,6 +617,7 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
616 617
617 spin_lock_irqsave(&sock->lock, flags); 618 spin_lock_irqsave(&sock->lock, flags);
618 619
620 del_timer(&host->timer);
619 mrq = host->req; 621 mrq = host->req;
620 host->req = NULL; 622 host->req = NULL;
621 host->state = IDLE; 623 host->state = IDLE;
@@ -633,8 +635,8 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
633 sock->addr + SOCK_MMCSD_INT_ENABLE); 635 sock->addr + SOCK_MMCSD_INT_ENABLE);
634 636
635 if (r_data->flags & MMC_DATA_WRITE) { 637 if (r_data->flags & MMC_DATA_WRITE) {
636 r_data->bytes_xfered = host->written_blocks * 638 r_data->bytes_xfered = host->written_blocks
637 r_data->blksz; 639 * r_data->blksz;
638 } else { 640 } else {
639 r_data->bytes_xfered = r_data->blocks - 641 r_data->bytes_xfered = r_data->blocks -
640 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 642 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
@@ -642,29 +644,44 @@ static void tifm_sd_end_cmd_nodma(struct work_struct *work)
642 r_data->bytes_xfered += r_data->blksz - 644 r_data->bytes_xfered += r_data->blksz -
643 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; 645 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
644 } 646 }
645 host->buffer = NULL;
646 host->buffer_pos = 0; 647 host->buffer_pos = 0;
647 host->buffer_size = 0; 648 host->buffer_size = 0;
648 } 649 }
649 650
650 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 651 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
651 sock->addr + SOCK_CONTROL); 652 sock->addr + SOCK_CONTROL);
652 653
653 spin_unlock_irqrestore(&sock->lock, flags); 654 spin_unlock_irqrestore(&sock->lock, flags);
654 655
655 if (r_data)
656 kunmap(r_data->sg->page);
657
658 mmc_request_done(mmc, mrq); 656 mmc_request_done(mmc, mrq);
659} 657}
660 658
661static void tifm_sd_abort(struct work_struct *work) 659static void tifm_sd_terminate(struct tifm_sd *host)
660{
661 struct tifm_dev *sock = host->dev;
662 unsigned long flags;
663
664 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
665 mmiowb();
666 spin_lock_irqsave(&sock->lock, flags);
667 host->flags |= EJECT;
668 if (host->req) {
669 writel(TIFM_FIFO_INT_SETALL,
670 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
671 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
672 tasklet_schedule(&host->finish_tasklet);
673 }
674 spin_unlock_irqrestore(&sock->lock, flags);
675}
676
677static void tifm_sd_abort(unsigned long data)
662{ 678{
663 struct tifm_sd *host = 679 struct tifm_sd *host = (struct tifm_sd*)data;
664 container_of(work, struct tifm_sd, abort_handler.work);
665 680
666 printk(KERN_ERR DRIVER_NAME 681 printk(KERN_ERR DRIVER_NAME
667 ": card failed to respond for a long period of time"); 682 ": card failed to respond for a long period of time");
683
684 tifm_sd_terminate(host);
668 tifm_eject(host->dev); 685 tifm_eject(host->dev);
669} 686}
670 687
@@ -683,9 +700,9 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
683 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), 700 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
684 sock->addr + SOCK_MMCSD_CONFIG); 701 sock->addr + SOCK_MMCSD_CONFIG);
685 } else { 702 } else {
686 writel((~TIFM_MMCSD_4BBUS) & 703 writel((~TIFM_MMCSD_4BBUS)
687 readl(sock->addr + SOCK_MMCSD_CONFIG), 704 & readl(sock->addr + SOCK_MMCSD_CONFIG),
688 sock->addr + SOCK_MMCSD_CONFIG); 705 sock->addr + SOCK_MMCSD_CONFIG);
689 } 706 }
690 707
691 if (ios->clock) { 708 if (ios->clock) {
@@ -704,23 +721,24 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
704 if ((20000000 / clk_div1) > (24000000 / clk_div2)) { 721 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
705 host->clk_freq = 20000000; 722 host->clk_freq = 20000000;
706 host->clk_div = clk_div1; 723 host->clk_div = clk_div1;
707 writel((~TIFM_CTRL_FAST_CLK) & 724 writel((~TIFM_CTRL_FAST_CLK)
708 readl(sock->addr + SOCK_CONTROL), 725 & readl(sock->addr + SOCK_CONTROL),
709 sock->addr + SOCK_CONTROL); 726 sock->addr + SOCK_CONTROL);
710 } else { 727 } else {
711 host->clk_freq = 24000000; 728 host->clk_freq = 24000000;
712 host->clk_div = clk_div2; 729 host->clk_div = clk_div2;
713 writel(TIFM_CTRL_FAST_CLK | 730 writel(TIFM_CTRL_FAST_CLK
714 readl(sock->addr + SOCK_CONTROL), 731 | readl(sock->addr + SOCK_CONTROL),
715 sock->addr + SOCK_CONTROL); 732 sock->addr + SOCK_CONTROL);
716 } 733 }
717 } else { 734 } else {
718 host->clk_div = 0; 735 host->clk_div = 0;
719 } 736 }
720 host->clk_div &= TIFM_MMCSD_CLKMASK; 737 host->clk_div &= TIFM_MMCSD_CLKMASK;
721 writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) & 738 writel(host->clk_div
722 readl(sock->addr + SOCK_MMCSD_CONFIG)), 739 | ((~TIFM_MMCSD_CLKMASK)
723 sock->addr + SOCK_MMCSD_CONFIG); 740 & readl(sock->addr + SOCK_MMCSD_CONFIG)),
741 sock->addr + SOCK_MMCSD_CONFIG);
724 742
725 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) 743 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
726 host->flags |= OPENDRAIN; 744 host->flags |= OPENDRAIN;
@@ -734,7 +752,7 @@ static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
734 // allow removal. 752 // allow removal.
735 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) { 753 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) {
736 host->flags |= EJECT_DONE; 754 host->flags |= EJECT_DONE;
737 wake_up_all(&host->can_eject); 755 wake_up_all(&host->notify);
738 } 756 }
739 757
740 spin_unlock_irqrestore(&sock->lock, flags); 758 spin_unlock_irqrestore(&sock->lock, flags);
@@ -762,20 +780,67 @@ static struct mmc_host_ops tifm_sd_ops = {
762 .get_ro = tifm_sd_ro 780 .get_ro = tifm_sd_ro
763}; 781};
764 782
765static void tifm_sd_register_host(struct work_struct *work) 783static int tifm_sd_initialize_host(struct tifm_sd *host)
766{ 784{
767 struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); 785 int rc;
786 unsigned int host_status = 0;
768 struct tifm_dev *sock = host->dev; 787 struct tifm_dev *sock = host->dev;
769 struct mmc_host *mmc = tifm_get_drvdata(sock);
770 unsigned long flags;
771 788
772 spin_lock_irqsave(&sock->lock, flags); 789 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
773 host->flags |= HOST_REG; 790 mmiowb();
774 PREPARE_WORK(&host->cmd_handler, 791 host->clk_div = 61;
775 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); 792 host->clk_freq = 20000000;
776 spin_unlock_irqrestore(&sock->lock, flags); 793 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
777 dev_dbg(&sock->dev, "adding host\n"); 794 writel(host->clk_div | TIFM_MMCSD_POWER,
778 mmc_add_host(mmc); 795 sock->addr + SOCK_MMCSD_CONFIG);
796
797 /* wait up to 0.51 sec for reset */
798 for (rc = 2; rc <= 256; rc <<= 1) {
799 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
800 rc = 0;
801 break;
802 }
803 msleep(rc);
804 }
805
806 if (rc) {
807 printk(KERN_ERR DRIVER_NAME
808 ": controller failed to reset\n");
809 return -ENODEV;
810 }
811
812 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
813 writel(host->clk_div | TIFM_MMCSD_POWER,
814 sock->addr + SOCK_MMCSD_CONFIG);
815 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
816
817 // command timeout fixed to 64 clocks for now
818 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO);
819 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
820
821 /* INAB should take much less than reset */
822 for (rc = 1; rc <= 16; rc <<= 1) {
823 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
824 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
825 if (!(host_status & TIFM_MMCSD_ERRMASK)
826 && (host_status & TIFM_MMCSD_EOC)) {
827 rc = 0;
828 break;
829 }
830 msleep(rc);
831 }
832
833 if (rc) {
834 printk(KERN_ERR DRIVER_NAME
835 ": card not ready - probe failed on initialization\n");
836 return -ENODEV;
837 }
838
839 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
840 sock->addr + SOCK_MMCSD_INT_ENABLE);
841 mmiowb();
842
843 return 0;
779} 844}
780 845
781static int tifm_sd_probe(struct tifm_dev *sock) 846static int tifm_sd_probe(struct tifm_dev *sock)
@@ -784,8 +849,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
784 struct tifm_sd *host; 849 struct tifm_sd *host;
785 int rc = -EIO; 850 int rc = -EIO;
786 851
787 if (!(TIFM_SOCK_STATE_OCCUPIED & 852 if (!(TIFM_SOCK_STATE_OCCUPIED
788 readl(sock->addr + SOCK_PRESENT_STATE))) { 853 & readl(sock->addr + SOCK_PRESENT_STATE))) {
789 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n"); 854 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n");
790 return rc; 855 return rc;
791 } 856 }
@@ -795,109 +860,99 @@ static int tifm_sd_probe(struct tifm_dev *sock)
795 return -ENOMEM; 860 return -ENOMEM;
796 861
797 host = mmc_priv(mmc); 862 host = mmc_priv(mmc);
798 host->dev = sock;
799 host->clk_div = 61;
800 init_waitqueue_head(&host->can_eject);
801 INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
802 INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
803
804 tifm_set_drvdata(sock, mmc); 863 tifm_set_drvdata(sock, mmc);
805 sock->signal_irq = tifm_sd_signal_irq; 864 host->dev = sock;
806
807 host->clk_freq = 20000000;
808 host->timeout_jiffies = msecs_to_jiffies(1000); 865 host->timeout_jiffies = msecs_to_jiffies(1000);
809 866
867 init_waitqueue_head(&host->notify);
868 tasklet_init(&host->finish_tasklet,
869 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
870 (unsigned long)host);
871 setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
872
810 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request; 873 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request;
811 mmc->ops = &tifm_sd_ops; 874 mmc->ops = &tifm_sd_ops;
812 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 875 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
813 mmc->caps = MMC_CAP_4_BIT_DATA; 876 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE;
814 mmc->f_min = 20000000 / 60; 877 mmc->f_min = 20000000 / 60;
815 mmc->f_max = 24000000; 878 mmc->f_max = 24000000;
816 mmc->max_hw_segs = 1; 879 mmc->max_hw_segs = 1;
817 mmc->max_phys_segs = 1; 880 mmc->max_phys_segs = 1;
818 mmc->max_sectors = 127; 881 // limited by DMA counter - it's safer to stick with
819 mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length 882 // block counter has 11 bits though
820 883 mmc->max_blk_count = 256;
821 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); 884 // 2k maximum hw block length
822 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); 885 mmc->max_blk_size = 2048;
823 writel(host->clk_div | TIFM_MMCSD_POWER, 886 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
824 sock->addr + SOCK_MMCSD_CONFIG); 887 mmc->max_seg_size = mmc->max_req_size;
888 sock->signal_irq = tifm_sd_signal_irq;
889 rc = tifm_sd_initialize_host(host);
825 890
826 for (rc = 0; rc < 50; rc++) { 891 if (!rc)
827 /* Wait for reset ack */ 892 rc = mmc_add_host(mmc);
828 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { 893 if (rc)
829 rc = 0; 894 goto out_free_mmc;
830 break;
831 }
832 msleep(10);
833 }
834 895
835 if (rc) { 896 return 0;
836 printk(KERN_ERR DRIVER_NAME 897out_free_mmc:
837 ": card not ready - probe failed\n"); 898 mmc_free_host(mmc);
838 mmc_free_host(mmc); 899 return rc;
839 return -ENODEV; 900}
840 }
841 901
842 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); 902static void tifm_sd_remove(struct tifm_dev *sock)
843 writel(host->clk_div | TIFM_MMCSD_POWER, 903{
844 sock->addr + SOCK_MMCSD_CONFIG); 904 struct mmc_host *mmc = tifm_get_drvdata(sock);
845 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 905 struct tifm_sd *host = mmc_priv(mmc);
846 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
847 sock->addr + SOCK_MMCSD_INT_ENABLE);
848 906
849 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now 907 del_timer_sync(&host->timer);
850 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); 908 tifm_sd_terminate(host);
851 writel(host->clk_div | TIFM_MMCSD_POWER, 909 wait_event_timeout(host->notify, host->flags & EJECT_DONE,
852 sock->addr + SOCK_MMCSD_CONFIG); 910 host->timeout_jiffies);
911 tasklet_kill(&host->finish_tasklet);
912 mmc_remove_host(mmc);
853 913
854 queue_delayed_work(sock->wq, &host->abort_handler, 914 /* The meaning of the bit majority in this constant is unknown. */
855 host->timeout_jiffies); 915 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
916 sock->addr + SOCK_CONTROL);
856 917
857 return 0; 918 tifm_set_drvdata(sock, NULL);
919 mmc_free_host(mmc);
858} 920}
859 921
860static int tifm_sd_host_is_down(struct tifm_dev *sock) 922#ifdef CONFIG_PM
923
924static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
861{ 925{
862 struct mmc_host *mmc = tifm_get_drvdata(sock); 926 struct mmc_host *mmc = tifm_get_drvdata(sock);
863 struct tifm_sd *host = mmc_priv(mmc); 927 int rc;
864 unsigned long flags;
865 int rc = 0;
866 928
867 spin_lock_irqsave(&sock->lock, flags); 929 rc = mmc_suspend_host(mmc, state);
868 rc = (host->flags & EJECT_DONE); 930 /* The meaning of the bit majority in this constant is unknown. */
869 spin_unlock_irqrestore(&sock->lock, flags); 931 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
932 sock->addr + SOCK_CONTROL);
870 return rc; 933 return rc;
871} 934}
872 935
873static void tifm_sd_remove(struct tifm_dev *sock) 936static int tifm_sd_resume(struct tifm_dev *sock)
874{ 937{
875 struct mmc_host *mmc = tifm_get_drvdata(sock); 938 struct mmc_host *mmc = tifm_get_drvdata(sock);
876 struct tifm_sd *host = mmc_priv(mmc); 939 struct tifm_sd *host = mmc_priv(mmc);
877 unsigned long flags;
878 940
879 spin_lock_irqsave(&sock->lock, flags); 941 if (sock->media_id != FM_SD
880 host->flags |= EJECT; 942 || tifm_sd_initialize_host(host)) {
881 if (host->req) 943 tifm_eject(sock);
882 queue_work(sock->wq, &host->cmd_handler); 944 return 0;
883 spin_unlock_irqrestore(&sock->lock, flags); 945 } else {
884 wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock), 946 return mmc_resume_host(mmc);
885 host->timeout_jiffies); 947 }
948}
886 949
887 if (host->flags & HOST_REG) 950#else
888 mmc_remove_host(mmc);
889 951
890 /* The meaning of the bit majority in this constant is unknown. */ 952#define tifm_sd_suspend NULL
891 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL), 953#define tifm_sd_resume NULL
892 sock->addr + SOCK_CONTROL);
893 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
894 writel(TIFM_FIFO_INT_SETALL,
895 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
896 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
897 954
898 tifm_set_drvdata(sock, NULL); 955#endif /* CONFIG_PM */
899 mmc_free_host(mmc);
900}
901 956
902static tifm_media_id tifm_sd_id_tbl[] = { 957static tifm_media_id tifm_sd_id_tbl[] = {
903 FM_SD, 0 958 FM_SD, 0
@@ -910,7 +965,9 @@ static struct tifm_driver tifm_sd_driver = {
910 }, 965 },
911 .id_table = tifm_sd_id_tbl, 966 .id_table = tifm_sd_id_tbl,
912 .probe = tifm_sd_probe, 967 .probe = tifm_sd_probe,
913 .remove = tifm_sd_remove 968 .remove = tifm_sd_remove,
969 .suspend = tifm_sd_suspend,
970 .resume = tifm_sd_resume
914}; 971};
915 972
916static int __init tifm_sd_init(void) 973static int __init tifm_sd_init(void)
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 7a282672f8e9..a44d8777ab9f 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver 2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 * 3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved. 4 * Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
272 return host->num_sg; 272 return host->num_sg;
273} 273}
274 274
275static inline char *wbsd_kmap_sg(struct wbsd_host *host) 275static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
276{ 276{
277 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) + 277 return page_address(host->cur_sg->page) + host->cur_sg->offset;
278 host->cur_sg->offset;
279 return host->mapped_sg;
280}
281
282static inline void wbsd_kunmap_sg(struct wbsd_host *host)
283{
284 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
285} 278}
286 279
287static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data) 280static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
302 * we do not transfer too much. 295 * we do not transfer too much.
303 */ 296 */
304 for (i = 0; i < len; i++) { 297 for (i = 0; i < len; i++) {
305 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 298 sgbuf = page_address(sg[i].page) + sg[i].offset;
306 if (size < sg[i].length) 299 if (size < sg[i].length)
307 memcpy(dmabuf, sgbuf, size); 300 memcpy(dmabuf, sgbuf, size);
308 else 301 else
309 memcpy(dmabuf, sgbuf, sg[i].length); 302 memcpy(dmabuf, sgbuf, sg[i].length);
310 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
311 dmabuf += sg[i].length; 303 dmabuf += sg[i].length;
312 304
313 if (size < sg[i].length) 305 if (size < sg[i].length)
@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
347 * we do not transfer too much. 339 * we do not transfer too much.
348 */ 340 */
349 for (i = 0; i < len; i++) { 341 for (i = 0; i < len; i++) {
350 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset; 342 sgbuf = page_address(sg[i].page) + sg[i].offset;
351 if (size < sg[i].length) 343 if (size < sg[i].length)
352 memcpy(sgbuf, dmabuf, size); 344 memcpy(sgbuf, dmabuf, size);
353 else 345 else
@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
497 if (data->bytes_xfered == host->size) 489 if (data->bytes_xfered == host->size)
498 return; 490 return;
499 491
500 buffer = wbsd_kmap_sg(host) + host->offset; 492 buffer = wbsd_sg_to_buffer(host) + host->offset;
501 493
502 /* 494 /*
503 * Drain the fifo. This has a tendency to loop longer 495 * Drain the fifo. This has a tendency to loop longer
@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
526 /* 518 /*
527 * Transfer done? 519 * Transfer done?
528 */ 520 */
529 if (data->bytes_xfered == host->size) { 521 if (data->bytes_xfered == host->size)
530 wbsd_kunmap_sg(host);
531 return; 522 return;
532 }
533 523
534 /* 524 /*
535 * End of scatter list entry? 525 * End of scatter list entry?
536 */ 526 */
537 if (host->remain == 0) { 527 if (host->remain == 0) {
538 wbsd_kunmap_sg(host);
539
540 /* 528 /*
541 * Get next entry. Check if last. 529 * Get next entry. Check if last.
542 */ 530 */
@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
554 return; 542 return;
555 } 543 }
556 544
557 buffer = wbsd_kmap_sg(host); 545 buffer = wbsd_sg_to_buffer(host);
558 } 546 }
559 } 547 }
560 } 548 }
561 549
562 wbsd_kunmap_sg(host);
563
564 /* 550 /*
565 * This is a very dirty hack to solve a 551 * This is a very dirty hack to solve a
566 * hardware problem. The chip doesn't trigger 552 * hardware problem. The chip doesn't trigger
@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
583 if (data->bytes_xfered == host->size) 569 if (data->bytes_xfered == host->size)
584 return; 570 return;
585 571
586 buffer = wbsd_kmap_sg(host) + host->offset; 572 buffer = wbsd_sg_to_buffer(host) + host->offset;
587 573
588 /* 574 /*
589 * Fill the fifo. This has a tendency to loop longer 575 * Fill the fifo. This has a tendency to loop longer
@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
612 /* 598 /*
613 * Transfer done? 599 * Transfer done?
614 */ 600 */
615 if (data->bytes_xfered == host->size) { 601 if (data->bytes_xfered == host->size)
616 wbsd_kunmap_sg(host);
617 return; 602 return;
618 }
619 603
620 /* 604 /*
621 * End of scatter list entry? 605 * End of scatter list entry?
622 */ 606 */
623 if (host->remain == 0) { 607 if (host->remain == 0) {
624 wbsd_kunmap_sg(host);
625
626 /* 608 /*
627 * Get next entry. Check if last. 609 * Get next entry. Check if last.
628 */ 610 */
@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
640 return; 622 return;
641 } 623 }
642 624
643 buffer = wbsd_kmap_sg(host); 625 buffer = wbsd_sg_to_buffer(host);
644 } 626 }
645 } 627 }
646 } 628 }
647 629
648 wbsd_kunmap_sg(host);
649
650 /* 630 /*
651 * The controller stops sending interrupts for 631 * The controller stops sending interrupts for
652 * 'FIFO empty' under certain conditions. So we 632 * 'FIFO empty' under certain conditions. So we
@@ -910,6 +890,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
910 */ 890 */
911 if (cmd->data && (cmd->error == MMC_ERR_NONE)) { 891 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
912 /* 892 /*
893 * The hardware is so delightfully stupid that it has a list
894 * of "data" commands. If a command isn't on this list, it'll
895 * just go back to the idle state and won't send any data
896 * interrupts.
897 */
898 switch (cmd->opcode) {
899 case 11:
900 case 17:
901 case 18:
902 case 20:
903 case 24:
904 case 25:
905 case 26:
906 case 27:
907 case 30:
908 case 42:
909 case 56:
910 break;
911
912 /* ACMDs. We don't keep track of state, so we just treat them
913 * like any other command. */
914 case 51:
915 break;
916
917 default:
918#ifdef CONFIG_MMC_DEBUG
919 printk(KERN_WARNING "%s: Data command %d is not "
920 "supported by this controller.\n",
921 mmc_hostname(host->mmc), cmd->opcode);
922#endif
923 cmd->data->error = MMC_ERR_INVALID;
924
925 if (cmd->data->stop)
926 wbsd_send_command(host, cmd->data->stop);
927
928 goto done;
929 };
930
931 /*
913 * Dirty fix for hardware bug. 932 * Dirty fix for hardware bug.
914 */ 933 */
915 if (host->dma == -1) 934 if (host->dma == -1)
@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
1343 mmc->max_phys_segs = 128; 1362 mmc->max_phys_segs = 128;
1344 1363
1345 /* 1364 /*
1346 * Maximum number of sectors in one transfer. Also limited by 64kB 1365 * Maximum request size. Also limited by 64KiB buffer.
1347 * buffer.
1348 */ 1366 */
1349 mmc->max_sectors = 128; 1367 mmc->max_req_size = 65536;
1350 1368
1351 /* 1369 /*
1352 * Maximum segment size. Could be one segment with the maximum number 1370 * Maximum segment size. Could be one segment with the maximum number
1353 * of segments. 1371 * of bytes.
1372 */
1373 mmc->max_seg_size = mmc->max_req_size;
1374
1375 /*
1376 * Maximum block size. We have 12 bits (= 4095) but have to subtract
1377 * space for CRC. So the maximum is 4095 - 4*2 = 4087.
1378 */
1379 mmc->max_blk_size = 4087;
1380
1381 /*
1382 * Maximum block count. There is no real limit so the maximum
1383 * request size will be the only restriction.
1354 */ 1384 */
1355 mmc->max_seg_size = mmc->max_sectors * 512; 1385 mmc->max_blk_count = mmc->max_req_size;
1356 1386
1357 dev_set_drvdata(dev, mmc); 1387 dev_set_drvdata(dev, mmc);
1358 1388
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
index 6072993f01e3..d06718b0e2ab 100644
--- a/drivers/mmc/wbsd.h
+++ b/drivers/mmc/wbsd.h
@@ -154,7 +154,6 @@ struct wbsd_host
154 154
155 struct scatterlist* cur_sg; /* Current SG entry */ 155 struct scatterlist* cur_sg; /* Current SG entry */
156 unsigned int num_sg; /* Number of entries left */ 156 unsigned int num_sg; /* Number of entries left */
157 void* mapped_sg; /* vaddr of mapped sg */
158 157
159 unsigned int offset; /* Offset into current entry */ 158 unsigned int offset; /* Offset into current entry */
160 unsigned int remain; /* Data left in curren entry */ 159 unsigned int remain; /* Data left in curren entry */
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 7e34c4f07b70..bc7e906571d3 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -600,8 +600,7 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
600 count -= semi_count; 600 count -= semi_count;
601 memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count); 601 memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
602 } else { 602 } else {
603 /* Packet is in one chunk -- we can copy + cksum. */ 603 memcpy_fromio(skb->data, base + ring_offset, count);
604 eth_io_copy_and_sum(skb, base + ring_offset, count, 0);
605 } 604 }
606 return; 605 return;
607 } 606 }
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80bdcf846234..716a47210aa3 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -792,8 +792,7 @@ static void poll_vortex(struct net_device *dev)
792{ 792{
793 struct vortex_private *vp = netdev_priv(dev); 793 struct vortex_private *vp = netdev_priv(dev);
794 unsigned long flags; 794 unsigned long flags;
795 local_save_flags(flags); 795 local_irq_save(flags);
796 local_irq_disable();
797 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); 796 (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
798 local_irq_restore(flags); 797 local_irq_restore(flags);
799} 798}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8aa8dd02b910..38f41a593b12 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -190,7 +190,7 @@ config MII
190 190
191config MACB 191config MACB
192 tristate "Atmel MACB support" 192 tristate "Atmel MACB support"
193 depends on NET_ETHERNET && AVR32 193 depends on NET_ETHERNET && (AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263)
194 select MII 194 select MII
195 help 195 help
196 The Atmel MACB ethernet interface is found on many AT32 and AT91 196 The Atmel MACB ethernet interface is found on many AT32 and AT91
@@ -235,16 +235,6 @@ config BMAC
235 To compile this driver as a module, choose M here: the module 235 To compile this driver as a module, choose M here: the module
236 will be called bmac. 236 will be called bmac.
237 237
238config OAKNET
239 tristate "National DP83902AV (Oak ethernet) support"
240 depends on NET_ETHERNET && PPC && BROKEN
241 select CRC32
242 help
243 Say Y if your machine has this type of Ethernet network card.
244
245 To compile this driver as a module, choose M here: the module
246 will be called oaknet.
247
248config ARIADNE 238config ARIADNE
249 tristate "Ariadne support" 239 tristate "Ariadne support"
250 depends on NET_ETHERNET && ZORRO 240 depends on NET_ETHERNET && ZORRO
@@ -1155,21 +1145,6 @@ config SEEQ8005
1155 <file:Documentation/networking/net-modules.txt>. The module 1145 <file:Documentation/networking/net-modules.txt>. The module
1156 will be called seeq8005. 1146 will be called seeq8005.
1157 1147
1158config SKMC
1159 tristate "SKnet MCA support"
1160 depends on NET_ETHERNET && MCA && BROKEN
1161 ---help---
1162 These are Micro Channel Ethernet adapters. You need to say Y to "MCA
1163 support" in order to use this driver. Supported cards are the SKnet
1164 Junior MC2 and the SKnet MC2(+). The driver automatically
1165 distinguishes between the two cards. Note that using multiple boards
1166 of different type hasn't been tested with this driver. Say Y if you
1167 have one of these Ethernet adapters.
1168
1169 To compile this driver as a module, choose M here and read
1170 <file:Documentation/networking/net-modules.txt>. The module
1171 will be called sk_mca.
1172
1173config NE2_MCA 1148config NE2_MCA
1174 tristate "NE/2 (ne2000 MCA version) support" 1149 tristate "NE/2 (ne2000 MCA version) support"
1175 depends on NET_ETHERNET && MCA_LEGACY 1150 depends on NET_ETHERNET && MCA_LEGACY
@@ -1788,6 +1763,18 @@ config LAN_SAA9730
1788 workstations. 1763 workstations.
1789 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>. 1764 See <http://www.semiconductors.philips.com/pip/SAA9730_flyer_1>.
1790 1765
1766config SC92031
1767 tristate "Silan SC92031 PCI Fast Ethernet Adapter driver (EXPERIMENTAL)"
1768 depends on NET_PCI && PCI && EXPERIMENTAL
1769 select CRC32
1770 ---help---
1771 This is a driver for the Fast Ethernet PCI network cards based on
1772 the Silan SC92031 chip (sometimes also called Rsltek 8139D). If you
1773 have one of these, say Y here.
1774
1775 To compile this driver as a module, choose M here: the module
1776 will be called sc92031. This is recommended.
1777
1791config NET_POCKET 1778config NET_POCKET
1792 bool "Pocket and portable adapters" 1779 bool "Pocket and portable adapters"
1793 depends on NET_ETHERNET && PARPORT 1780 depends on NET_ETHERNET && PARPORT
@@ -2348,6 +2335,17 @@ config QLA3XXX
2348 To compile this driver as a module, choose M here: the module 2335 To compile this driver as a module, choose M here: the module
2349 will be called qla3xxx. 2336 will be called qla3xxx.
2350 2337
2338config ATL1
2339 tristate "Attansic L1 Gigabit Ethernet support (EXPERIMENTAL)"
2340 depends on NET_PCI && PCI && EXPERIMENTAL
2341 select CRC32
2342 select MII
2343 help
2344 This driver supports the Attansic L1 gigabit ethernet adapter.
2345
2346 To compile this driver as a module, choose M here. The module
2347 will be called atl1.
2348
2351endmenu 2349endmenu
2352 2350
2353# 2351#
@@ -2392,6 +2390,24 @@ config CHELSIO_T1_NAPI
2392 NAPI is a driver API designed to reduce CPU and interrupt load 2390 NAPI is a driver API designed to reduce CPU and interrupt load
2393 when the driver is receiving lots of packets from the card. 2391 when the driver is receiving lots of packets from the card.
2394 2392
2393config CHELSIO_T3
2394 tristate "Chelsio Communications T3 10Gb Ethernet support"
2395 depends on PCI
2396 help
2397 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2398 adapters.
2399
2400 For general information about Chelsio and our products, visit
2401 our website at <http://www.chelsio.com>.
2402
2403 For customer support, please visit our customer support page at
2404 <http://www.chelsio.com/support.htm>.
2405
2406 Please send feedback to <linux-bugs@chelsio.com>.
2407
2408 To compile this driver as a module, choose M here: the module
2409 will be called cxgb3.
2410
2395config EHEA 2411config EHEA
2396 tristate "eHEA Ethernet support" 2412 tristate "eHEA Ethernet support"
2397 depends on IBMEBUS 2413 depends on IBMEBUS
@@ -2488,6 +2504,13 @@ config NETXEN_NIC
2488 help 2504 help
2489 This enables the support for NetXen's Gigabit Ethernet card. 2505 This enables the support for NetXen's Gigabit Ethernet card.
2490 2506
2507config PASEMI_MAC
2508 tristate "PA Semi 1/10Gbit MAC"
2509 depends on PPC64 && PCI
2510 help
2511 This driver supports the on-chip 1/10Gbit Ethernet controller on
2512 PA Semi's PWRficient line of chips.
2513
2491endmenu 2514endmenu
2492 2515
2493source "drivers/net/tokenring/Kconfig" 2516source "drivers/net/tokenring/Kconfig"
@@ -2522,7 +2545,7 @@ config RIONET_RX_SIZE
2522 2545
2523config FDDI 2546config FDDI
2524 bool "FDDI driver support" 2547 bool "FDDI driver support"
2525 depends on (PCI || EISA) 2548 depends on (PCI || EISA || TC)
2526 help 2549 help
2527 Fiber Distributed Data Interface is a high speed local area network 2550 Fiber Distributed Data Interface is a high speed local area network
2528 design; essentially a replacement for high speed Ethernet. FDDI can 2551 design; essentially a replacement for high speed Ethernet. FDDI can
@@ -2532,15 +2555,36 @@ config FDDI
2532 will say N. 2555 will say N.
2533 2556
2534config DEFXX 2557config DEFXX
2535 tristate "Digital DEFEA and DEFPA adapter support" 2558 tristate "Digital DEFTA/DEFEA/DEFPA adapter support"
2536 depends on FDDI && (PCI || EISA) 2559 depends on FDDI && (PCI || EISA || TC)
2537 help 2560 ---help---
2538 This is support for the DIGITAL series of EISA (DEFEA) and PCI 2561 This is support for the DIGITAL series of TURBOchannel (DEFTA),
2539 (DEFPA) controllers which can connect you to a local FDDI network. 2562 EISA (DEFEA) and PCI (DEFPA) controllers which can connect you
2563 to a local FDDI network.
2564
2565 To compile this driver as a module, choose M here: the module
2566 will be called defxx. If unsure, say N.
2567
2568config DEFXX_MMIO
2569 bool
2570 prompt "Use MMIO instead of PIO" if PCI || EISA
2571 depends on DEFXX
2572 default n if PCI || EISA
2573 default y
2574 ---help---
2575 This instructs the driver to use EISA or PCI memory-mapped I/O
2576 (MMIO) as appropriate instead of programmed I/O ports (PIO).
2577 Enabling this gives an improvement in processing time in parts
2578 of the driver, but it may cause problems with EISA (DEFEA)
2579 adapters. TURBOchannel does not have the concept of I/O ports,
2580 so MMIO is always used for these (DEFTA) adapters.
2581
2582 If unsure, say N.
2540 2583
2541config SKFP 2584config SKFP
2542 tristate "SysKonnect FDDI PCI support" 2585 tristate "SysKonnect FDDI PCI support"
2543 depends on FDDI && PCI 2586 depends on FDDI && PCI
2587 select BITREVERSE
2544 ---help--- 2588 ---help---
2545 Say Y here if you have a SysKonnect FDDI PCI adapter. 2589 Say Y here if you have a SysKonnect FDDI PCI adapter.
2546 The following adapters are supported by this driver: 2590 The following adapters are supported by this driver:
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 4c0d4e5ce42b..33af833667da 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -6,8 +6,10 @@ obj-$(CONFIG_E1000) += e1000/
6obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 6obj-$(CONFIG_IBM_EMAC) += ibm_emac/
7obj-$(CONFIG_IXGB) += ixgb/ 7obj-$(CONFIG_IXGB) += ixgb/
8obj-$(CONFIG_CHELSIO_T1) += chelsio/ 8obj-$(CONFIG_CHELSIO_T1) += chelsio/
9obj-$(CONFIG_CHELSIO_T3) += cxgb3/
9obj-$(CONFIG_EHEA) += ehea/ 10obj-$(CONFIG_EHEA) += ehea/
10obj-$(CONFIG_BONDING) += bonding/ 11obj-$(CONFIG_BONDING) += bonding/
12obj-$(CONFIG_ATL1) += atl1/
11obj-$(CONFIG_GIANFAR) += gianfar_driver.o 13obj-$(CONFIG_GIANFAR) += gianfar_driver.o
12 14
13gianfar_driver-objs := gianfar.o \ 15gianfar_driver-objs := gianfar.o \
@@ -36,8 +38,6 @@ obj-$(CONFIG_CASSINI) += cassini.o
36obj-$(CONFIG_MACE) += mace.o 38obj-$(CONFIG_MACE) += mace.o
37obj-$(CONFIG_BMAC) += bmac.o 39obj-$(CONFIG_BMAC) += bmac.o
38 40
39obj-$(CONFIG_OAKNET) += oaknet.o 8390.o
40
41obj-$(CONFIG_DGRS) += dgrs.o 41obj-$(CONFIG_DGRS) += dgrs.o
42obj-$(CONFIG_VORTEX) += 3c59x.o 42obj-$(CONFIG_VORTEX) += 3c59x.o
43obj-$(CONFIG_TYPHOON) += typhoon.o 43obj-$(CONFIG_TYPHOON) += typhoon.o
@@ -137,7 +137,6 @@ obj-$(CONFIG_AT1700) += at1700.o
137obj-$(CONFIG_EL1) += 3c501.o 137obj-$(CONFIG_EL1) += 3c501.o
138obj-$(CONFIG_EL16) += 3c507.o 138obj-$(CONFIG_EL16) += 3c507.o
139obj-$(CONFIG_ELMC) += 3c523.o 139obj-$(CONFIG_ELMC) += 3c523.o
140obj-$(CONFIG_SKMC) += sk_mca.o
141obj-$(CONFIG_IBMLANA) += ibmlana.o 140obj-$(CONFIG_IBMLANA) += ibmlana.o
142obj-$(CONFIG_ELMC_II) += 3c527.o 141obj-$(CONFIG_ELMC_II) += 3c527.o
143obj-$(CONFIG_EL3) += 3c509.o 142obj-$(CONFIG_EL3) += 3c509.o
@@ -160,6 +159,7 @@ obj-$(CONFIG_APRICOT) += 82596.o
160obj-$(CONFIG_LASI_82596) += lasi_82596.o 159obj-$(CONFIG_LASI_82596) += lasi_82596.o
161obj-$(CONFIG_MVME16x_NET) += 82596.o 160obj-$(CONFIG_MVME16x_NET) += 82596.o
162obj-$(CONFIG_BVME6000_NET) += 82596.o 161obj-$(CONFIG_BVME6000_NET) += 82596.o
162obj-$(CONFIG_SC92031) += sc92031.o
163 163
164# This is also a 82596 and should probably be merged 164# This is also a 82596 and should probably be merged
165obj-$(CONFIG_LP486E) += lp486e.o 165obj-$(CONFIG_LP486E) += lp486e.o
@@ -196,6 +196,7 @@ obj-$(CONFIG_SMC91X) += smc91x.o
196obj-$(CONFIG_SMC911X) += smc911x.o 196obj-$(CONFIG_SMC911X) += smc911x.o
197obj-$(CONFIG_DM9000) += dm9000.o 197obj-$(CONFIG_DM9000) += dm9000.o
198obj-$(CONFIG_FEC_8XX) += fec_8xx/ 198obj-$(CONFIG_FEC_8XX) += fec_8xx/
199obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
199 200
200obj-$(CONFIG_MACB) += macb.o 201obj-$(CONFIG_MACB) += macb.o
201 202
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 9305eb9b1b98..dd8ed456c8b2 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -59,7 +59,6 @@ extern struct net_device *wavelan_probe(int unit);
59extern struct net_device *arlan_probe(int unit); 59extern struct net_device *arlan_probe(int unit);
60extern struct net_device *el16_probe(int unit); 60extern struct net_device *el16_probe(int unit);
61extern struct net_device *elmc_probe(int unit); 61extern struct net_device *elmc_probe(int unit);
62extern struct net_device *skmca_probe(int unit);
63extern struct net_device *elplus_probe(int unit); 62extern struct net_device *elplus_probe(int unit);
64extern struct net_device *ac3200_probe(int unit); 63extern struct net_device *ac3200_probe(int unit);
65extern struct net_device *es_probe(int unit); 64extern struct net_device *es_probe(int unit);
@@ -153,9 +152,6 @@ static struct devprobe2 mca_probes[] __initdata = {
153#ifdef CONFIG_ELMC_II /* 3c527 */ 152#ifdef CONFIG_ELMC_II /* 3c527 */
154 {mc32_probe, 0}, 153 {mc32_probe, 0},
155#endif 154#endif
156#ifdef CONFIG_SKMC /* SKnet Microchannel */
157 {skmca_probe, 0},
158#endif
159 {NULL, 0}, 155 {NULL, 0},
160}; 156};
161 157
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index c01f87f5bed7..644c408515df 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -327,8 +327,7 @@ static void ac_block_input(struct net_device *dev, int count, struct sk_buff *sk
327 memcpy_fromio(skb->data + semi_count, 327 memcpy_fromio(skb->data + semi_count,
328 ei_status.mem + TX_PAGES*256, count); 328 ei_status.mem + TX_PAGES*256, count);
329 } else { 329 } else {
330 /* Packet is in one chunk -- we can copy + cksum. */ 330 memcpy_fromio(skb->data, start, count);
331 eth_io_copy_and_sum(skb, start, count, 0);
332 } 331 }
333} 332}
334 333
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 18896f24d407..9c399aaefbdd 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1334,8 +1334,7 @@ err_no_interrupt:
1334static void amd8111e_poll(struct net_device *dev) 1334static void amd8111e_poll(struct net_device *dev)
1335{ 1335{
1336 unsigned long flags; 1336 unsigned long flags;
1337 local_save_flags(flags); 1337 local_irq_save(flags);
1338 local_irq_disable();
1339 amd8111e_interrupt(0, dev); 1338 amd8111e_interrupt(0, dev);
1340 local_irq_restore(flags); 1339 local_irq_restore(flags);
1341} 1340}
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index fada15d959de..1621b8fe35cf 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -641,7 +641,7 @@ static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo
641{ 641{
642 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 642 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
643 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 643 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
644 strlcpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); 644 strlcpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
645} 645}
646 646
647static const struct ethtool_ops at91ether_ethtool_ops = { 647static const struct ethtool_ops at91ether_ethtool_ops = {
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index f3faa4fe58e7..72c41f5907f2 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -587,7 +587,7 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
587{ 587{
588 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 588 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
589 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 589 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
590 strlcpy(info->bus_info, dev->class_dev.dev->bus_id, 590 strlcpy(info->bus_info, dev->dev.parent->bus_id,
591 sizeof(info->bus_info)); 591 sizeof(info->bus_info));
592} 592}
593 593
diff --git a/drivers/net/atl1/Makefile b/drivers/net/atl1/Makefile
new file mode 100644
index 000000000000..a6b707e4e69e
--- /dev/null
+++ b/drivers/net/atl1/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_ATL1) += atl1.o
2atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
new file mode 100644
index 000000000000..b1c6034e68fa
--- /dev/null
+++ b/drivers/net/atl1/atl1.h
@@ -0,0 +1,283 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef _ATL1_H_
25#define _ATL1_H_
26
27#include <linux/types.h>
28#include <linux/if_vlan.h>
29
30#include "atl1_hw.h"
31
32/* function prototypes needed by multiple files */
33s32 atl1_up(struct atl1_adapter *adapter);
34void atl1_down(struct atl1_adapter *adapter);
35int atl1_reset(struct atl1_adapter *adapter);
36s32 atl1_setup_ring_resources(struct atl1_adapter *adapter);
37void atl1_free_ring_resources(struct atl1_adapter *adapter);
38
39extern char atl1_driver_name[];
40extern char atl1_driver_version[];
41extern const struct ethtool_ops atl1_ethtool_ops;
42
43struct atl1_adapter;
44
45#define ATL1_MAX_INTR 3
46
47#define ATL1_DEFAULT_TPD 256
48#define ATL1_MAX_TPD 1024
49#define ATL1_MIN_TPD 64
50#define ATL1_DEFAULT_RFD 512
51#define ATL1_MIN_RFD 128
52#define ATL1_MAX_RFD 2048
53
54#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
55#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
56#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
57#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
58
59/*
60 * Some workarounds require millisecond delays and are run during interrupt
61 * context. Most notably, when establishing link, the phy may need tweaking
62 * but cannot process phy register reads/writes faster than millisecond
63 * intervals...and we establish link due to a "link status change" interrupt.
64 */
65
66/*
67 * wrapper around a pointer to a socket buffer,
68 * so a DMA handle can be stored along with the buffer
69 */
70struct atl1_buffer {
71 struct sk_buff *skb;
72 u16 length;
73 u16 alloced;
74 dma_addr_t dma;
75};
76
77#define MAX_TX_BUF_LEN 0x3000 /* 12KB */
78
79struct atl1_tpd_ring {
80 void *desc; /* pointer to the descriptor ring memory */
81 dma_addr_t dma; /* physical adress of the descriptor ring */
82 u16 size; /* length of descriptor ring in bytes */
83 u16 count; /* number of descriptors in the ring */
84 u16 hw_idx; /* hardware index */
85 atomic_t next_to_clean;
86 atomic_t next_to_use;
87 struct atl1_buffer *buffer_info;
88};
89
90struct atl1_rfd_ring {
91 void *desc;
92 dma_addr_t dma;
93 u16 size;
94 u16 count;
95 atomic_t next_to_use;
96 u16 next_to_clean;
97 struct atl1_buffer *buffer_info;
98};
99
100struct atl1_rrd_ring {
101 void *desc;
102 dma_addr_t dma;
103 unsigned int size;
104 u16 count;
105 u16 next_to_use;
106 atomic_t next_to_clean;
107};
108
109struct atl1_ring_header {
110 void *desc; /* pointer to the descriptor ring memory */
111 dma_addr_t dma; /* physical adress of the descriptor ring */
112 unsigned int size; /* length of descriptor ring in bytes */
113};
114
115struct atl1_cmb {
116 struct coals_msg_block *cmb;
117 dma_addr_t dma;
118};
119
120struct atl1_smb {
121 struct stats_msg_block *smb;
122 dma_addr_t dma;
123};
124
125/* Statistics counters */
126struct atl1_sft_stats {
127 u64 rx_packets;
128 u64 tx_packets;
129 u64 rx_bytes;
130 u64 tx_bytes;
131 u64 multicast;
132 u64 collisions;
133 u64 rx_errors;
134 u64 rx_length_errors;
135 u64 rx_crc_errors;
136 u64 rx_frame_errors;
137 u64 rx_fifo_errors;
138 u64 rx_missed_errors;
139 u64 tx_errors;
140 u64 tx_fifo_errors;
141 u64 tx_aborted_errors;
142 u64 tx_window_errors;
143 u64 tx_carrier_errors;
144
145 u64 tx_pause; /* num Pause packet transmitted. */
146 u64 excecol; /* num tx packets aborted due to excessive collisions. */
147 u64 deffer; /* num deferred tx packets */
148 u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */
149 u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */
150 u64 latecol; /* num tx packets w/ late collisions. */
151 u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
152 u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */
153 u64 rx_pause; /* num Pause packets received. */
154 u64 rx_rrd_ov;
155 u64 rx_trunc;
156};
157
158/* board specific private data structure */
159#define ATL1_REGS_LEN 8
160
161/* Structure containing variables used by the shared code */
162struct atl1_hw {
163 u8 __iomem *hw_addr;
164 struct atl1_adapter *back;
165 enum atl1_dma_order dma_ord;
166 enum atl1_dma_rcb rcb_value;
167 enum atl1_dma_req_block dmar_block;
168 enum atl1_dma_req_block dmaw_block;
169 u8 preamble_len;
170 u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */
171 u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */
172 u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */
173 u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */
174 u8 ipgr1; /* 64bit Carrier-Sense window */
175 u8 ipgr2; /* 96-bit IPG window */
176 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */
177 u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */
178 u8 rfd_fetch_gap;
179 u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */
180 u8 tpd_fetch_th;
181 u8 tpd_fetch_gap;
182 u16 tx_jumbo_task_th;
183 u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is
184 8 bytes long */
185 u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */
186 u16 rx_jumbo_lkah;
187 u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */
188 u16 lcol; /* Collision Window */
189
190 u16 cmb_tpd;
191 u16 cmb_rrd;
192 u16 cmb_rx_timer;
193 u16 cmb_tx_timer;
194 u32 smb_timer;
195 u16 media_type;
196 u16 autoneg_advertised;
197 u16 pci_cmd_word;
198
199 u16 mii_autoneg_adv_reg;
200 u16 mii_1000t_ctrl_reg;
201
202 u32 mem_rang;
203 u32 txcw;
204 u32 max_frame_size;
205 u32 min_frame_size;
206 u32 mc_filter_type;
207 u32 num_mc_addrs;
208 u32 collision_delta;
209 u32 tx_packet_delta;
210 u16 phy_spd_default;
211
212 u16 dev_rev;
213 u8 revision_id;
214
215 /* spi flash */
216 u8 flash_vendor;
217
218 u8 dma_fairness;
219 u8 mac_addr[ETH_ALEN];
220 u8 perm_mac_addr[ETH_ALEN];
221
222 /* bool phy_preamble_sup; */
223 bool phy_configured;
224};
225
226struct atl1_adapter {
227 /* OS defined structs */
228 struct net_device *netdev;
229 struct pci_dev *pdev;
230 struct net_device_stats net_stats;
231 struct atl1_sft_stats soft_stats;
232
233 struct vlan_group *vlgrp;
234 u32 rx_buffer_len;
235 u32 wol;
236 u16 link_speed;
237 u16 link_duplex;
238 spinlock_t lock;
239 atomic_t irq_sem;
240 struct work_struct tx_timeout_task;
241 struct work_struct link_chg_task;
242 struct work_struct pcie_dma_to_rst_task;
243 struct timer_list watchdog_timer;
244 struct timer_list phy_config_timer;
245 bool phy_timer_pending;
246
247 bool mac_disabled;
248
249 /* All descriptor rings' memory */
250 struct atl1_ring_header ring_header;
251
252 /* TX */
253 struct atl1_tpd_ring tpd_ring;
254 spinlock_t mb_lock;
255
256 /* RX */
257 struct atl1_rfd_ring rfd_ring;
258 struct atl1_rrd_ring rrd_ring;
259 u64 hw_csum_err;
260 u64 hw_csum_good;
261
262 u32 gorcl;
263 u64 gorcl_old;
264
265 /* Interrupt Moderator timer ( 2us resolution) */
266 u16 imt;
267 /* Interrupt Clear timer (2us resolution) */
268 u16 ict;
269
270 /* MII interface info */
271 struct mii_if_info mii;
272
273 /* structs defined in atl1_hw.h */
274 u32 bd_number; /* board number */
275 bool pci_using_64;
276 struct atl1_hw hw;
277 struct atl1_smb smb;
278 struct atl1_cmb cmb;
279
280 u32 pci_state[16];
281};
282
283#endif /* _ATL1_H_ */
diff --git a/drivers/net/atl1/atl1_ethtool.c b/drivers/net/atl1/atl1_ethtool.c
new file mode 100644
index 000000000000..c11c27798e5c
--- /dev/null
+++ b/drivers/net/atl1/atl1_ethtool.c
@@ -0,0 +1,508 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/ethtool.h>
27#include <linux/netdevice.h>
28#include <linux/mii.h>
29#include <asm/uaccess.h>
30
31#include "atl1.h"
32
33struct atl1_stats {
34 char stat_string[ETH_GSTRING_LEN];
35 int sizeof_stat;
36 int stat_offset;
37};
38
39#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \
40 offsetof(struct atl1_adapter, m)
41
42static struct atl1_stats atl1_gstrings_stats[] = {
43 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
44 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
45 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
46 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
47 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
48 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
49 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
50 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
51 {"multicast", ATL1_STAT(soft_stats.multicast)},
52 {"collisions", ATL1_STAT(soft_stats.collisions)},
53 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
54 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
55 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
56 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
57 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
58 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
59 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
60 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
61 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
62 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
63 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
64 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
65 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
66 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
67 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
68 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
69 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
70 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
71 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
72 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
73 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
74};
75
76static void atl1_get_ethtool_stats(struct net_device *netdev,
77 struct ethtool_stats *stats, u64 *data)
78{
79 struct atl1_adapter *adapter = netdev_priv(netdev);
80 int i;
81 char *p;
82
83 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
84 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
85 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
86 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
87 }
88
89}
90
91static int atl1_get_stats_count(struct net_device *netdev)
92{
93 return ARRAY_SIZE(atl1_gstrings_stats);
94}
95
96static int atl1_get_settings(struct net_device *netdev,
97 struct ethtool_cmd *ecmd)
98{
99 struct atl1_adapter *adapter = netdev_priv(netdev);
100 struct atl1_hw *hw = &adapter->hw;
101
102 ecmd->supported = (SUPPORTED_10baseT_Half |
103 SUPPORTED_10baseT_Full |
104 SUPPORTED_100baseT_Half |
105 SUPPORTED_100baseT_Full |
106 SUPPORTED_1000baseT_Full |
107 SUPPORTED_Autoneg | SUPPORTED_TP);
108 ecmd->advertising = ADVERTISED_TP;
109 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
110 hw->media_type == MEDIA_TYPE_1000M_FULL) {
111 ecmd->advertising |= ADVERTISED_Autoneg;
112 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
113 ecmd->advertising |= ADVERTISED_Autoneg;
114 ecmd->advertising |=
115 (ADVERTISED_10baseT_Half |
116 ADVERTISED_10baseT_Full |
117 ADVERTISED_100baseT_Half |
118 ADVERTISED_100baseT_Full |
119 ADVERTISED_1000baseT_Full);
120 }
121 else
122 ecmd->advertising |= (ADVERTISED_1000baseT_Full);
123 }
124 ecmd->port = PORT_TP;
125 ecmd->phy_address = 0;
126 ecmd->transceiver = XCVR_INTERNAL;
127
128 if (netif_carrier_ok(adapter->netdev)) {
129 u16 link_speed, link_duplex;
130 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
131 ecmd->speed = link_speed;
132 if (link_duplex == FULL_DUPLEX)
133 ecmd->duplex = DUPLEX_FULL;
134 else
135 ecmd->duplex = DUPLEX_HALF;
136 } else {
137 ecmd->speed = -1;
138 ecmd->duplex = -1;
139 }
140 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
141 hw->media_type == MEDIA_TYPE_1000M_FULL)
142 ecmd->autoneg = AUTONEG_ENABLE;
143 else
144 ecmd->autoneg = AUTONEG_DISABLE;
145
146 return 0;
147}
148
149static int atl1_set_settings(struct net_device *netdev,
150 struct ethtool_cmd *ecmd)
151{
152 struct atl1_adapter *adapter = netdev_priv(netdev);
153 struct atl1_hw *hw = &adapter->hw;
154 u16 phy_data;
155 int ret_val = 0;
156 u16 old_media_type = hw->media_type;
157
158 if (netif_running(adapter->netdev)) {
159 printk(KERN_DEBUG "%s: ethtool shutting down adapter\n",
160 atl1_driver_name);
161 atl1_down(adapter);
162 }
163
164 if (ecmd->autoneg == AUTONEG_ENABLE)
165 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
166 else {
167 if (ecmd->speed == SPEED_1000) {
168 if (ecmd->duplex != DUPLEX_FULL) {
169 printk(KERN_WARNING
170 "%s: can't force to 1000M half duplex\n",
171 atl1_driver_name);
172 ret_val = -EINVAL;
173 goto exit_sset;
174 }
175 hw->media_type = MEDIA_TYPE_1000M_FULL;
176 } else if (ecmd->speed == SPEED_100) {
177 if (ecmd->duplex == DUPLEX_FULL) {
178 hw->media_type = MEDIA_TYPE_100M_FULL;
179 } else
180 hw->media_type = MEDIA_TYPE_100M_HALF;
181 } else {
182 if (ecmd->duplex == DUPLEX_FULL)
183 hw->media_type = MEDIA_TYPE_10M_FULL;
184 else
185 hw->media_type = MEDIA_TYPE_10M_HALF;
186 }
187 }
188 switch (hw->media_type) {
189 case MEDIA_TYPE_AUTO_SENSOR:
190 ecmd->advertising =
191 ADVERTISED_10baseT_Half |
192 ADVERTISED_10baseT_Full |
193 ADVERTISED_100baseT_Half |
194 ADVERTISED_100baseT_Full |
195 ADVERTISED_1000baseT_Full |
196 ADVERTISED_Autoneg | ADVERTISED_TP;
197 break;
198 case MEDIA_TYPE_1000M_FULL:
199 ecmd->advertising =
200 ADVERTISED_1000baseT_Full |
201 ADVERTISED_Autoneg | ADVERTISED_TP;
202 break;
203 default:
204 ecmd->advertising = 0;
205 break;
206 }
207 if (atl1_phy_setup_autoneg_adv(hw)) {
208 ret_val = -EINVAL;
209 printk(KERN_WARNING
210 "%s: invalid ethtool speed/duplex setting\n",
211 atl1_driver_name);
212 goto exit_sset;
213 }
214 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
215 hw->media_type == MEDIA_TYPE_1000M_FULL)
216 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
217 else {
218 switch (hw->media_type) {
219 case MEDIA_TYPE_100M_FULL:
220 phy_data =
221 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
222 MII_CR_RESET;
223 break;
224 case MEDIA_TYPE_100M_HALF:
225 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
226 break;
227 case MEDIA_TYPE_10M_FULL:
228 phy_data =
229 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
230 break;
231 default: /* MEDIA_TYPE_10M_HALF: */
232 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
233 break;
234 }
235 }
236 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
237exit_sset:
238 if (ret_val)
239 hw->media_type = old_media_type;
240
241 if (netif_running(adapter->netdev)) {
242 printk(KERN_DEBUG "%s: ethtool starting adapter\n",
243 atl1_driver_name);
244 atl1_up(adapter);
245 } else if (!ret_val) {
246 printk(KERN_DEBUG "%s: ethtool resetting adapter\n",
247 atl1_driver_name);
248 atl1_reset(adapter);
249 }
250 return ret_val;
251}
252
253static void atl1_get_drvinfo(struct net_device *netdev,
254 struct ethtool_drvinfo *drvinfo)
255{
256 struct atl1_adapter *adapter = netdev_priv(netdev);
257
258 strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver));
259 strncpy(drvinfo->version, atl1_driver_version,
260 sizeof(drvinfo->version));
261 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
262 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
263 sizeof(drvinfo->bus_info));
264 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
265}
266
267static void atl1_get_wol(struct net_device *netdev,
268 struct ethtool_wolinfo *wol)
269{
270 struct atl1_adapter *adapter = netdev_priv(netdev);
271
272 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
273 wol->wolopts = 0;
274 if (adapter->wol & ATL1_WUFC_EX)
275 wol->wolopts |= WAKE_UCAST;
276 if (adapter->wol & ATL1_WUFC_MC)
277 wol->wolopts |= WAKE_MCAST;
278 if (adapter->wol & ATL1_WUFC_BC)
279 wol->wolopts |= WAKE_BCAST;
280 if (adapter->wol & ATL1_WUFC_MAG)
281 wol->wolopts |= WAKE_MAGIC;
282 return;
283}
284
285static int atl1_set_wol(struct net_device *netdev,
286 struct ethtool_wolinfo *wol)
287{
288 struct atl1_adapter *adapter = netdev_priv(netdev);
289
290 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
291 return -EOPNOTSUPP;
292 adapter->wol = 0;
293 if (wol->wolopts & WAKE_UCAST)
294 adapter->wol |= ATL1_WUFC_EX;
295 if (wol->wolopts & WAKE_MCAST)
296 adapter->wol |= ATL1_WUFC_MC;
297 if (wol->wolopts & WAKE_BCAST)
298 adapter->wol |= ATL1_WUFC_BC;
299 if (wol->wolopts & WAKE_MAGIC)
300 adapter->wol |= ATL1_WUFC_MAG;
301 return 0;
302}
303
304static void atl1_get_ringparam(struct net_device *netdev,
305 struct ethtool_ringparam *ring)
306{
307 struct atl1_adapter *adapter = netdev_priv(netdev);
308 struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
309 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
310
311 ring->rx_max_pending = ATL1_MAX_RFD;
312 ring->tx_max_pending = ATL1_MAX_TPD;
313 ring->rx_mini_max_pending = 0;
314 ring->rx_jumbo_max_pending = 0;
315 ring->rx_pending = rxdr->count;
316 ring->tx_pending = txdr->count;
317 ring->rx_mini_pending = 0;
318 ring->rx_jumbo_pending = 0;
319}
320
321static int atl1_set_ringparam(struct net_device *netdev,
322 struct ethtool_ringparam *ring)
323{
324 struct atl1_adapter *adapter = netdev_priv(netdev);
325 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
326 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
327 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
328
329 struct atl1_tpd_ring tpd_old, tpd_new;
330 struct atl1_rfd_ring rfd_old, rfd_new;
331 struct atl1_rrd_ring rrd_old, rrd_new;
332 struct atl1_ring_header rhdr_old, rhdr_new;
333 int err;
334
335 tpd_old = adapter->tpd_ring;
336 rfd_old = adapter->rfd_ring;
337 rrd_old = adapter->rrd_ring;
338 rhdr_old = adapter->ring_header;
339
340 if (netif_running(adapter->netdev))
341 atl1_down(adapter);
342
343 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
344 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
345 rfdr->count;
346 rfdr->count = (rfdr->count + 3) & ~3;
347 rrdr->count = rfdr->count;
348
349 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
350 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
351 tpdr->count;
352 tpdr->count = (tpdr->count + 3) & ~3;
353
354 if (netif_running(adapter->netdev)) {
355 /* try to get new resources before deleting old */
356 err = atl1_setup_ring_resources(adapter);
357 if (err)
358 goto err_setup_ring;
359
360 /*
361 * save the new, restore the old in order to free it,
362 * then restore the new back again
363 */
364
365 rfd_new = adapter->rfd_ring;
366 rrd_new = adapter->rrd_ring;
367 tpd_new = adapter->tpd_ring;
368 rhdr_new = adapter->ring_header;
369 adapter->rfd_ring = rfd_old;
370 adapter->rrd_ring = rrd_old;
371 adapter->tpd_ring = tpd_old;
372 adapter->ring_header = rhdr_old;
373 atl1_free_ring_resources(adapter);
374 adapter->rfd_ring = rfd_new;
375 adapter->rrd_ring = rrd_new;
376 adapter->tpd_ring = tpd_new;
377 adapter->ring_header = rhdr_new;
378
379 err = atl1_up(adapter);
380 if (err)
381 return err;
382 }
383 return 0;
384
385err_setup_ring:
386 adapter->rfd_ring = rfd_old;
387 adapter->rrd_ring = rrd_old;
388 adapter->tpd_ring = tpd_old;
389 adapter->ring_header = rhdr_old;
390 atl1_up(adapter);
391 return err;
392}
393
394static void atl1_get_pauseparam(struct net_device *netdev,
395 struct ethtool_pauseparam *epause)
396{
397 struct atl1_adapter *adapter = netdev_priv(netdev);
398 struct atl1_hw *hw = &adapter->hw;
399
400 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
401 hw->media_type == MEDIA_TYPE_1000M_FULL) {
402 epause->autoneg = AUTONEG_ENABLE;
403 } else {
404 epause->autoneg = AUTONEG_DISABLE;
405 }
406 epause->rx_pause = 1;
407 epause->tx_pause = 1;
408}
409
410static int atl1_set_pauseparam(struct net_device *netdev,
411 struct ethtool_pauseparam *epause)
412{
413 struct atl1_adapter *adapter = netdev_priv(netdev);
414 struct atl1_hw *hw = &adapter->hw;
415
416 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
417 hw->media_type == MEDIA_TYPE_1000M_FULL) {
418 epause->autoneg = AUTONEG_ENABLE;
419 } else {
420 epause->autoneg = AUTONEG_DISABLE;
421 }
422
423 epause->rx_pause = 1;
424 epause->tx_pause = 1;
425
426 return 0;
427}
428
429static u32 atl1_get_rx_csum(struct net_device *netdev)
430{
431 return 1;
432}
433
434static void atl1_get_strings(struct net_device *netdev, u32 stringset,
435 u8 *data)
436{
437 u8 *p = data;
438 int i;
439
440 switch (stringset) {
441 case ETH_SS_STATS:
442 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
443 memcpy(p, atl1_gstrings_stats[i].stat_string,
444 ETH_GSTRING_LEN);
445 p += ETH_GSTRING_LEN;
446 }
447 break;
448 }
449}
450
451static int atl1_nway_reset(struct net_device *netdev)
452{
453 struct atl1_adapter *adapter = netdev_priv(netdev);
454 struct atl1_hw *hw = &adapter->hw;
455
456 if (netif_running(netdev)) {
457 u16 phy_data;
458 atl1_down(adapter);
459
460 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
461 hw->media_type == MEDIA_TYPE_1000M_FULL) {
462 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
463 } else {
464 switch (hw->media_type) {
465 case MEDIA_TYPE_100M_FULL:
466 phy_data = MII_CR_FULL_DUPLEX |
467 MII_CR_SPEED_100 | MII_CR_RESET;
468 break;
469 case MEDIA_TYPE_100M_HALF:
470 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
471 break;
472 case MEDIA_TYPE_10M_FULL:
473 phy_data = MII_CR_FULL_DUPLEX |
474 MII_CR_SPEED_10 | MII_CR_RESET;
475 break;
476 default: /* MEDIA_TYPE_10M_HALF */
477 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
478 }
479 }
480 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
481 atl1_up(adapter);
482 }
483 return 0;
484}
485
486const struct ethtool_ops atl1_ethtool_ops = {
487 .get_settings = atl1_get_settings,
488 .set_settings = atl1_set_settings,
489 .get_drvinfo = atl1_get_drvinfo,
490 .get_wol = atl1_get_wol,
491 .set_wol = atl1_set_wol,
492 .get_ringparam = atl1_get_ringparam,
493 .set_ringparam = atl1_set_ringparam,
494 .get_pauseparam = atl1_get_pauseparam,
495 .set_pauseparam = atl1_set_pauseparam,
496 .get_rx_csum = atl1_get_rx_csum,
497 .get_tx_csum = ethtool_op_get_tx_csum,
498 .set_tx_csum = ethtool_op_set_tx_hw_csum,
499 .get_link = ethtool_op_get_link,
500 .get_sg = ethtool_op_get_sg,
501 .set_sg = ethtool_op_set_sg,
502 .get_strings = atl1_get_strings,
503 .nway_reset = atl1_nway_reset,
504 .get_ethtool_stats = atl1_get_ethtool_stats,
505 .get_stats_count = atl1_get_stats_count,
506 .get_tso = ethtool_op_get_tso,
507 .set_tso = ethtool_op_set_tso,
508};
diff --git a/drivers/net/atl1/atl1_hw.c b/drivers/net/atl1/atl1_hw.c
new file mode 100644
index 000000000000..08b2d785469d
--- /dev/null
+++ b/drivers/net/atl1/atl1_hw.c
@@ -0,0 +1,718 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/if_vlan.h>
28#include <linux/etherdevice.h>
29#include <linux/crc32.h>
30#include <asm/byteorder.h>
31
32#include "atl1.h"
33
34/*
35 * Reset the transmit and receive units; mask and clear all interrupts.
36 * hw - Struct containing variables accessed by shared code
37 * return : ATL1_SUCCESS or idle status (if error)
38 */
39s32 atl1_reset_hw(struct atl1_hw *hw)
40{
41 u32 icr;
42 int i;
43
44 /*
45 * Clear Interrupt mask to stop board from generating
46 * interrupts & Clear any pending interrupt events
47 */
48 /*
49 * iowrite32(0, hw->hw_addr + REG_IMR);
50 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
51 */
52
53 /*
54 * Issue Soft Reset to the MAC. This will reset the chip's
55 * transmit, receive, DMA. It will not effect
56 * the current PCI configuration. The global reset bit is self-
57 * clearing, and should clear within a microsecond.
58 */
59 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
60 ioread32(hw->hw_addr + REG_MASTER_CTRL);
61
62 iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE);
63 ioread16(hw->hw_addr + REG_GPHY_ENABLE);
64
65 msleep(1); /* delay about 1ms */
66
67 /* Wait at least 10ms for All module to be Idle */
68 for (i = 0; i < 10; i++) {
69 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
70 if (!icr)
71 break;
72 msleep(1); /* delay 1 ms */
73 cpu_relax(); /* FIXME: is this still the right way to do this? */
74 }
75
76 if (icr) {
77 printk (KERN_DEBUG "icr = %x\n", icr);
78 return icr;
79 }
80
81 return ATL1_SUCCESS;
82}
83
84/* function about EEPROM
85 *
86 * check_eeprom_exist
87 * return 0 if eeprom exist
88 */
89static int atl1_check_eeprom_exist(struct atl1_hw *hw)
90{
91 u32 value;
92 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
93 if (value & SPI_FLASH_CTRL_EN_VPD) {
94 value &= ~SPI_FLASH_CTRL_EN_VPD;
95 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
96 }
97
98 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
99 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
100}
101
102static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
103{
104 int i;
105 u32 control;
106
107 if (offset & 3)
108 return false; /* address do not align */
109
110 iowrite32(0, hw->hw_addr + REG_VPD_DATA);
111 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
112 iowrite32(control, hw->hw_addr + REG_VPD_CAP);
113 ioread32(hw->hw_addr + REG_VPD_CAP);
114
115 for (i = 0; i < 10; i++) {
116 msleep(2);
117 control = ioread32(hw->hw_addr + REG_VPD_CAP);
118 if (control & VPD_CAP_VPD_FLAG)
119 break;
120 }
121 if (control & VPD_CAP_VPD_FLAG) {
122 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
123 return true;
124 }
125 return false; /* timeout */
126}
127
128/*
129 * Reads the value from a PHY register
130 * hw - Struct containing variables accessed by shared code
131 * reg_addr - address of the PHY register to read
132 */
133s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
134{
135 u32 val;
136 int i;
137
138 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
139 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
140 MDIO_CLK_SEL_SHIFT;
141 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
142 ioread32(hw->hw_addr + REG_MDIO_CTRL);
143
144 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
145 udelay(2);
146 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
147 if (!(val & (MDIO_START | MDIO_BUSY)))
148 break;
149 }
150 if (!(val & (MDIO_START | MDIO_BUSY))) {
151 *phy_data = (u16) val;
152 return ATL1_SUCCESS;
153 }
154 return ATL1_ERR_PHY;
155}
156
157#define CUSTOM_SPI_CS_SETUP 2
158#define CUSTOM_SPI_CLK_HI 2
159#define CUSTOM_SPI_CLK_LO 2
160#define CUSTOM_SPI_CS_HOLD 2
161#define CUSTOM_SPI_CS_HI 3
162
163static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
164{
165 int i;
166 u32 value;
167
168 iowrite32(0, hw->hw_addr + REG_SPI_DATA);
169 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
170
171 value = SPI_FLASH_CTRL_WAIT_READY |
172 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
173 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
174 SPI_FLASH_CTRL_CLK_HI_MASK) <<
175 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
176 SPI_FLASH_CTRL_CLK_LO_MASK) <<
177 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
178 SPI_FLASH_CTRL_CS_HOLD_MASK) <<
179 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
180 SPI_FLASH_CTRL_CS_HI_MASK) <<
181 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
182 SPI_FLASH_CTRL_INS_SHIFT;
183
184 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
185
186 value |= SPI_FLASH_CTRL_START;
187 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
188 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
189
190 for (i = 0; i < 10; i++) {
191 msleep(1); /* 1ms */
192 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
193 if (!(value & SPI_FLASH_CTRL_START))
194 break;
195 }
196
197 if (value & SPI_FLASH_CTRL_START)
198 return false;
199
200 *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
201
202 return true;
203}
204
205/*
206 * get_permanent_address
207 * return 0 if get valid mac address,
208 */
209static int atl1_get_permanent_address(struct atl1_hw *hw)
210{
211 u32 addr[2];
212 u32 i, control;
213 u16 reg;
214 u8 eth_addr[ETH_ALEN];
215 bool key_valid;
216
217 if (is_valid_ether_addr(hw->perm_mac_addr))
218 return 0;
219
220 /* init */
221 addr[0] = addr[1] = 0;
222
223 if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */
224 reg = 0;
225 key_valid = false;
226 /* Read out all EEPROM content */
227 i = 0;
228 while (1) {
229 if (atl1_read_eeprom(hw, i + 0x100, &control)) {
230 if (key_valid) {
231 if (reg == REG_MAC_STA_ADDR)
232 addr[0] = control;
233 else if (reg == (REG_MAC_STA_ADDR + 4))
234 addr[1] = control;
235 key_valid = false;
236 } else if ((control & 0xff) == 0x5A) {
237 key_valid = true;
238 reg = (u16) (control >> 16);
239 } else
240 break; /* assume data end while encount an invalid KEYWORD */
241 } else
242 break; /* read error */
243 i += 4;
244 }
245
246/*
247 * The following 2 lines are the Attansic originals. Saving for posterity.
248 * *(u32 *) & eth_addr[2] = LONGSWAP(addr[0]);
249 * *(u16 *) & eth_addr[0] = SHORTSWAP(*(u16 *) & addr[1]);
250 */
251 *(u32 *) & eth_addr[2] = swab32(addr[0]);
252 *(u16 *) & eth_addr[0] = swab16(*(u16 *) & addr[1]);
253
254 if (is_valid_ether_addr(eth_addr)) {
255 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
256 return 0;
257 }
258 return 1;
259 }
260
261 /* see if SPI FLAGS exist ? */
262 addr[0] = addr[1] = 0;
263 reg = 0;
264 key_valid = false;
265 i = 0;
266 while (1) {
267 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
268 if (key_valid) {
269 if (reg == REG_MAC_STA_ADDR)
270 addr[0] = control;
271 else if (reg == (REG_MAC_STA_ADDR + 4))
272 addr[1] = control;
273 key_valid = false;
274 } else if ((control & 0xff) == 0x5A) {
275 key_valid = true;
276 reg = (u16) (control >> 16);
277 } else
278 break; /* data end */
279 } else
280 break; /* read error */
281 i += 4;
282 }
283
284/*
285 * The following 2 lines are the Attansic originals. Saving for posterity.
286 * *(u32 *) & eth_addr[2] = LONGSWAP(addr[0]);
287 * *(u16 *) & eth_addr[0] = SHORTSWAP(*(u16 *) & addr[1]);
288 */
289 *(u32 *) & eth_addr[2] = swab32(addr[0]);
290 *(u16 *) & eth_addr[0] = swab16(*(u16 *) & addr[1]);
291 if (is_valid_ether_addr(eth_addr)) {
292 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
293 return 0;
294 }
295 return 1;
296}
297
298/*
299 * Reads the adapter's MAC address from the EEPROM
300 * hw - Struct containing variables accessed by shared code
301 */
302s32 atl1_read_mac_addr(struct atl1_hw *hw)
303{
304 u16 i;
305
306 if (atl1_get_permanent_address(hw))
307 random_ether_addr(hw->perm_mac_addr);
308
309 for (i = 0; i < ETH_ALEN; i++)
310 hw->mac_addr[i] = hw->perm_mac_addr[i];
311 return ATL1_SUCCESS;
312}
313
314/*
315 * Hashes an address to determine its location in the multicast table
316 * hw - Struct containing variables accessed by shared code
317 * mc_addr - the multicast address to hash
318 *
319 * atl1_hash_mc_addr
320 * purpose
321 * set hash value for a multicast address
322 * hash calcu processing :
323 * 1. calcu 32bit CRC for multicast address
324 * 2. reverse crc with MSB to LSB
325 */
326u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
327{
328 u32 crc32, value = 0;
329 int i;
330
331 crc32 = ether_crc_le(6, mc_addr);
332 crc32 = ~crc32;
333 for (i = 0; i < 32; i++)
334 value |= (((crc32 >> i) & 1) << (31 - i));
335
336 return value;
337}
338
339/*
340 * Sets the bit in the multicast table corresponding to the hash value.
341 * hw - Struct containing variables accessed by shared code
342 * hash_value - Multicast address hash value
343 */
344void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
345{
346 u32 hash_bit, hash_reg;
347 u32 mta;
348
349 /*
350 * The HASH Table is a register array of 2 32-bit registers.
351 * It is treated like an array of 64 bits. We want to set
352 * bit BitArray[hash_value]. So we figure out what register
353 * the bit is in, read it, OR in the new bit, then write
354 * back the new value. The register is determined by the
355 * upper 7 bits of the hash value and the bit within that
356 * register are determined by the lower 5 bits of the value.
357 */
358 hash_reg = (hash_value >> 31) & 0x1;
359 hash_bit = (hash_value >> 26) & 0x1F;
360 mta = ioread32((hw + REG_RX_HASH_TABLE) + (hash_reg << 2));
361 mta |= (1 << hash_bit);
362 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
363}
364
365/*
366 * Writes a value to a PHY register
367 * hw - Struct containing variables accessed by shared code
368 * reg_addr - address of the PHY register to write
369 * data - data to write to the PHY
370 */
371s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
372{
373 int i;
374 u32 val;
375
376 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
377 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
378 MDIO_SUP_PREAMBLE |
379 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
380 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
381 ioread32(hw->hw_addr + REG_MDIO_CTRL);
382
383 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
384 udelay(2);
385 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
386 if (!(val & (MDIO_START | MDIO_BUSY)))
387 break;
388 }
389
390 if (!(val & (MDIO_START | MDIO_BUSY)))
391 return ATL1_SUCCESS;
392
393 return ATL1_ERR_PHY;
394}
395
396/*
397 * Make L001's PHY out of Power Saving State (bug)
398 * hw - Struct containing variables accessed by shared code
399 * when power on, L001's PHY always on Power saving State
400 * (Gigabit Link forbidden)
401 */
402static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
403{
404 s32 ret;
405 ret = atl1_write_phy_reg(hw, 29, 0x0029);
406 if (ret)
407 return ret;
408 return atl1_write_phy_reg(hw, 30, 0);
409}
410
411/*
412 *TODO: do something or get rid of this
413 */
414s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
415{
416/* s32 ret_val;
417 * u16 phy_data;
418 */
419
420/*
421 ret_val = atl1_write_phy_reg(hw, ...);
422 ret_val = atl1_write_phy_reg(hw, ...);
423 ....
424*/
425 return ATL1_SUCCESS;
426}
427
428/*
429 * Resets the PHY and make all config validate
430 * hw - Struct containing variables accessed by shared code
431 *
432 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
433 */
434static s32 atl1_phy_reset(struct atl1_hw *hw)
435{
436 s32 ret_val;
437 u16 phy_data;
438
439 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
440 hw->media_type == MEDIA_TYPE_1000M_FULL)
441 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
442 else {
443 switch (hw->media_type) {
444 case MEDIA_TYPE_100M_FULL:
445 phy_data =
446 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
447 MII_CR_RESET;
448 break;
449 case MEDIA_TYPE_100M_HALF:
450 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
451 break;
452 case MEDIA_TYPE_10M_FULL:
453 phy_data =
454 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
455 break;
456 default: /* MEDIA_TYPE_10M_HALF: */
457 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
458 break;
459 }
460 }
461
462 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
463 if (ret_val) {
464 u32 val;
465 int i;
466 /* pcie serdes link may be down! */
467 printk(KERN_DEBUG "%s: autoneg caused pcie phy link down\n",
468 atl1_driver_name);
469
470 for (i = 0; i < 25; i++) {
471 msleep(1);
472 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
473 if (!(val & (MDIO_START | MDIO_BUSY)))
474 break;
475 }
476
477 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
478 printk(KERN_WARNING
479 "%s: pcie link down at least for 25ms\n",
480 atl1_driver_name);
481 return ret_val;
482 }
483 }
484 return ATL1_SUCCESS;
485}
486
487/*
488 * Configures PHY autoneg and flow control advertisement settings
489 * hw - Struct containing variables accessed by shared code
490 */
491s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
492{
493 s32 ret_val;
494 s16 mii_autoneg_adv_reg;
495 s16 mii_1000t_ctrl_reg;
496
497 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
498 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
499
500 /* Read the MII 1000Base-T Control Register (Address 9). */
501 mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
502
503 /*
504 * First we clear all the 10/100 mb speed bits in the Auto-Neg
505 * Advertisement Register (Address 4) and the 1000 mb speed bits in
506 * the 1000Base-T Control Register (Address 9).
507 */
508 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
509 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
510
511 /*
512 * Need to parse media_type and set up
513 * the appropriate PHY registers.
514 */
515 switch (hw->media_type) {
516 case MEDIA_TYPE_AUTO_SENSOR:
517 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
518 MII_AR_10T_FD_CAPS |
519 MII_AR_100TX_HD_CAPS |
520 MII_AR_100TX_FD_CAPS);
521 mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
522 break;
523
524 case MEDIA_TYPE_1000M_FULL:
525 mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
526 break;
527
528 case MEDIA_TYPE_100M_FULL:
529 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
530 break;
531
532 case MEDIA_TYPE_100M_HALF:
533 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
534 break;
535
536 case MEDIA_TYPE_10M_FULL:
537 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
538 break;
539
540 default:
541 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
542 break;
543 }
544
545 /* flow control fixed to enable all */
546 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
547
548 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
549 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
550
551 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
552 if (ret_val)
553 return ret_val;
554
555 ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg);
556 if (ret_val)
557 return ret_val;
558
559 return ATL1_SUCCESS;
560}
561
562/*
563 * Configures link settings.
564 * hw - Struct containing variables accessed by shared code
565 * Assumes the hardware has previously been reset and the
566 * transmitter and receiver are not enabled.
567 */
568static s32 atl1_setup_link(struct atl1_hw *hw)
569{
570 s32 ret_val;
571
572 /*
573 * Options:
574 * PHY will advertise value(s) parsed from
575 * autoneg_advertised and fc
576 * no matter what autoneg is , We will not wait link result.
577 */
578 ret_val = atl1_phy_setup_autoneg_adv(hw);
579 if (ret_val) {
580 printk(KERN_DEBUG "%s: error setting up autonegotiation\n",
581 atl1_driver_name);
582 return ret_val;
583 }
584 /* SW.Reset , En-Auto-Neg if needed */
585 ret_val = atl1_phy_reset(hw);
586 if (ret_val) {
587 printk(KERN_DEBUG "%s: error resetting the phy\n",
588 atl1_driver_name);
589 return ret_val;
590 }
591 hw->phy_configured = true;
592 return ret_val;
593}
594
595static struct atl1_spi_flash_dev flash_table[] = {
596/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
597 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
598 {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
599 {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
600};
601
602static void atl1_init_flash_opcode(struct atl1_hw *hw)
603{
604 if (hw->flash_vendor >= sizeof(flash_table) / sizeof(flash_table[0]))
605 hw->flash_vendor = 0; /* ATMEL */
606
607 /* Init OP table */
608 iowrite8(flash_table[hw->flash_vendor].cmd_program,
609 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
610 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
611 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
612 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
613 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
614 iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
615 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
616 iowrite8(flash_table[hw->flash_vendor].cmd_wren,
617 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
618 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
619 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
620 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
621 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
622 iowrite8(flash_table[hw->flash_vendor].cmd_read,
623 hw->hw_addr + REG_SPI_FLASH_OP_READ);
624}
625
626/*
627 * Performs basic configuration of the adapter.
628 * hw - Struct containing variables accessed by shared code
629 * Assumes that the controller has previously been reset and is in a
630 * post-reset uninitialized state. Initializes multicast table,
631 * and Calls routines to setup link
632 * Leaves the transmit and receive units disabled and uninitialized.
633 */
634s32 atl1_init_hw(struct atl1_hw *hw)
635{
636 u32 ret_val = 0;
637
638 /* Zero out the Multicast HASH table */
639 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
640 /* clear the old settings from the multicast hash table */
641 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
642
643 atl1_init_flash_opcode(hw);
644
645 if (!hw->phy_configured) {
646 /* enable GPHY LinkChange Interrrupt */
647 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
648 if (ret_val)
649 return ret_val;
650 /* make PHY out of power-saving state */
651 ret_val = atl1_phy_leave_power_saving(hw);
652 if (ret_val)
653 return ret_val;
654 /* Call a subroutine to configure the link */
655 ret_val = atl1_setup_link(hw);
656 }
657 return ret_val;
658}
659
660/*
661 * Detects the current speed and duplex settings of the hardware.
662 * hw - Struct containing variables accessed by shared code
663 * speed - Speed of the connection
664 * duplex - Duplex setting of the connection
665 */
666s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
667{
668 s32 ret_val;
669 u16 phy_data;
670
671 /* ; --- Read PHY Specific Status Register (17) */
672 ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
673 if (ret_val)
674 return ret_val;
675
676 if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
677 return ATL1_ERR_PHY_RES;
678
679 switch (phy_data & MII_AT001_PSSR_SPEED) {
680 case MII_AT001_PSSR_1000MBS:
681 *speed = SPEED_1000;
682 break;
683 case MII_AT001_PSSR_100MBS:
684 *speed = SPEED_100;
685 break;
686 case MII_AT001_PSSR_10MBS:
687 *speed = SPEED_10;
688 break;
689 default:
690 printk(KERN_DEBUG "%s: error getting speed\n",
691 atl1_driver_name);
692 return ATL1_ERR_PHY_SPEED;
693 break;
694 }
695 if (phy_data & MII_AT001_PSSR_DPLX)
696 *duplex = FULL_DUPLEX;
697 else
698 *duplex = HALF_DUPLEX;
699
700 return ATL1_SUCCESS;
701}
702
703void atl1_set_mac_addr(struct atl1_hw *hw)
704{
705 u32 value;
706 /*
707 * 00-0B-6A-F6-00-DC
708 * 0: 6AF600DC 1: 000B
709 * low dword
710 */
711 value = (((u32) hw->mac_addr[2]) << 24) |
712 (((u32) hw->mac_addr[3]) << 16) |
713 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
714 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
715 /* high dword */
716 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
717 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
718}
diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h
new file mode 100644
index 000000000000..100c09c66e64
--- /dev/null
+++ b/drivers/net/atl1/atl1_hw.h
@@ -0,0 +1,951 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * There are a lot of defines in here that are unused and/or have cryptic
24 * names. Please leave them alone, as they're the closest thing we have
25 * to a spec from Attansic at present. *ahem* -- CHS
26 */
27
28#ifndef _ATL1_HW_H_
29#define _ATL1_HW_H_
30
31#include <linux/types.h>
32#include <linux/mii.h>
33
34struct atl1_adapter;
35struct atl1_hw;
36
37/* function prototypes needed by multiple files */
38s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw);
39s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data);
40s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
41s32 atl1_read_mac_addr(struct atl1_hw *hw);
42s32 atl1_init_hw(struct atl1_hw *hw);
43s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
44s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex);
45u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
46void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
47s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
48void atl1_set_mac_addr(struct atl1_hw *hw);
49s32 atl1_phy_enter_power_saving(struct atl1_hw *hw);
50s32 atl1_reset_hw(struct atl1_hw *hw);
51void atl1_check_options(struct atl1_adapter *adapter);
52
53/* register definitions */
54#define REG_PCIE_CAP_LIST 0x58
55
56#define REG_VPD_CAP 0x6C
57#define VPD_CAP_ID_MASK 0xff
58#define VPD_CAP_ID_SHIFT 0
59#define VPD_CAP_NEXT_PTR_MASK 0xFF
60#define VPD_CAP_NEXT_PTR_SHIFT 8
61#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
62#define VPD_CAP_VPD_ADDR_SHIFT 16
63#define VPD_CAP_VPD_FLAG 0x80000000
64
65#define REG_VPD_DATA 0x70
66
67#define REG_SPI_FLASH_CTRL 0x200
68#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
69#define SPI_FLASH_CTRL_STS_WEN 0x2
70#define SPI_FLASH_CTRL_STS_WPEN 0x80
71#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
72#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
73#define SPI_FLASH_CTRL_INS_MASK 0x7
74#define SPI_FLASH_CTRL_INS_SHIFT 8
75#define SPI_FLASH_CTRL_START 0x800
76#define SPI_FLASH_CTRL_EN_VPD 0x2000
77#define SPI_FLASH_CTRL_LDSTART 0x8000
78#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
79#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
80#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
81#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
82#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
83#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
84#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
85#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
86#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
87#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
88#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
89#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
90#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
91
92#define REG_SPI_ADDR 0x204
93
94#define REG_SPI_DATA 0x208
95
96#define REG_SPI_FLASH_CONFIG 0x20C
97#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
98#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
99#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
100#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
101#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
102
103#define REG_SPI_FLASH_OP_PROGRAM 0x210
104#define REG_SPI_FLASH_OP_SC_ERASE 0x211
105#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
106#define REG_SPI_FLASH_OP_RDID 0x213
107#define REG_SPI_FLASH_OP_WREN 0x214
108#define REG_SPI_FLASH_OP_RDSR 0x215
109#define REG_SPI_FLASH_OP_WRSR 0x216
110#define REG_SPI_FLASH_OP_READ 0x217
111
112#define REG_TWSI_CTRL 0x218
113#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
114#define TWSI_CTRL_LD_OFFSET_SHIFT 0
115#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
116#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
117#define TWSI_CTRL_SW_LDSTART 0x800
118#define TWSI_CTRL_HW_LDSTART 0x1000
119#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
120#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
121#define TWSI_CTRL_LD_EXIST 0x400000
122#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
123#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
124#define TWSI_CTRL_FREQ_SEL_100K 0
125#define TWSI_CTRL_FREQ_SEL_200K 1
126#define TWSI_CTRL_FREQ_SEL_300K 2
127#define TWSI_CTRL_FREQ_SEL_400K 3
128#define TWSI_CTRL_SMB_SLV_ADDR
129#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
130#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
131
132#define REG_PCIE_DEV_MISC_CTRL 0x21C
133#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
134#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
135#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
136#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
137#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
138
139/* Selene Master Control Register */
140#define REG_MASTER_CTRL 0x1400
141#define MASTER_CTRL_SOFT_RST 0x1
142#define MASTER_CTRL_MTIMER_EN 0x2
143#define MASTER_CTRL_ITIMER_EN 0x4
144#define MASTER_CTRL_MANUAL_INT 0x8
145#define MASTER_CTRL_REV_NUM_SHIFT 16
146#define MASTER_CTRL_REV_NUM_MASK 0xff
147#define MASTER_CTRL_DEV_ID_SHIFT 24
148#define MASTER_CTRL_DEV_ID_MASK 0xff
149
150/* Timer Initial Value Register */
151#define REG_MANUAL_TIMER_INIT 0x1404
152
153/* IRQ ModeratorTimer Initial Value Register */
154#define REG_IRQ_MODU_TIMER_INIT 0x1408
155
156#define REG_GPHY_ENABLE 0x140C
157
158/* IRQ Anti-Lost Timer Initial Value Register */
159#define REG_CMBDISDMA_TIMER 0x140E
160
161/* Block IDLE Status Register */
162#define REG_IDLE_STATUS 0x1410
163#define IDLE_STATUS_RXMAC 1
164#define IDLE_STATUS_TXMAC 2
165#define IDLE_STATUS_RXQ 4
166#define IDLE_STATUS_TXQ 8
167#define IDLE_STATUS_DMAR 0x10
168#define IDLE_STATUS_DMAW 0x20
169#define IDLE_STATUS_SMB 0x40
170#define IDLE_STATUS_CMB 0x80
171
172/* MDIO Control Register */
173#define REG_MDIO_CTRL 0x1414
174#define MDIO_DATA_MASK 0xffff
175#define MDIO_DATA_SHIFT 0
176#define MDIO_REG_ADDR_MASK 0x1f
177#define MDIO_REG_ADDR_SHIFT 16
178#define MDIO_RW 0x200000
179#define MDIO_SUP_PREAMBLE 0x400000
180#define MDIO_START 0x800000
181#define MDIO_CLK_SEL_SHIFT 24
182#define MDIO_CLK_25_4 0
183#define MDIO_CLK_25_6 2
184#define MDIO_CLK_25_8 3
185#define MDIO_CLK_25_10 4
186#define MDIO_CLK_25_14 5
187#define MDIO_CLK_25_20 6
188#define MDIO_CLK_25_28 7
189#define MDIO_BUSY 0x8000000
190#define MDIO_WAIT_TIMES 30
191
192/* MII PHY Status Register */
193#define REG_PHY_STATUS 0x1418
194
195/* BIST Control and Status Register0 (for the Packet Memory) */
196#define REG_BIST0_CTRL 0x141c
197#define BIST0_NOW 0x1
198#define BIST0_SRAM_FAIL 0x2
199#define BIST0_FUSE_FLAG 0x4
200#define REG_BIST1_CTRL 0x1420
201#define BIST1_NOW 0x1
202#define BIST1_SRAM_FAIL 0x2
203#define BIST1_FUSE_FLAG 0x4
204
205/* MAC Control Register */
206#define REG_MAC_CTRL 0x1480
207#define MAC_CTRL_TX_EN 1
208#define MAC_CTRL_RX_EN 2
209#define MAC_CTRL_TX_FLOW 4
210#define MAC_CTRL_RX_FLOW 8
211#define MAC_CTRL_LOOPBACK 0x10
212#define MAC_CTRL_DUPLX 0x20
213#define MAC_CTRL_ADD_CRC 0x40
214#define MAC_CTRL_PAD 0x80
215#define MAC_CTRL_LENCHK 0x100
216#define MAC_CTRL_HUGE_EN 0x200
217#define MAC_CTRL_PRMLEN_SHIFT 10
218#define MAC_CTRL_PRMLEN_MASK 0xf
219#define MAC_CTRL_RMV_VLAN 0x4000
220#define MAC_CTRL_PROMIS_EN 0x8000
221#define MAC_CTRL_TX_PAUSE 0x10000
222#define MAC_CTRL_SCNT 0x20000
223#define MAC_CTRL_SRST_TX 0x40000
224#define MAC_CTRL_TX_SIMURST 0x80000
225#define MAC_CTRL_SPEED_SHIFT 20
226#define MAC_CTRL_SPEED_MASK 0x300000
227#define MAC_CTRL_SPEED_1000 2
228#define MAC_CTRL_SPEED_10_100 1
229#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
230#define MAC_CTRL_TX_HUGE 0x800000
231#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
232#define MAC_CTRL_MC_ALL_EN 0x2000000
233#define MAC_CTRL_BC_EN 0x4000000
234#define MAC_CTRL_DBG 0x8000000
235
236/* MAC IPG/IFG Control Register */
237#define REG_MAC_IPG_IFG 0x1484
238#define MAC_IPG_IFG_IPGT_SHIFT 0
239#define MAC_IPG_IFG_IPGT_MASK 0x7f
240#define MAC_IPG_IFG_MIFG_SHIFT 8
241#define MAC_IPG_IFG_MIFG_MASK 0xff
242#define MAC_IPG_IFG_IPGR1_SHIFT 16
243#define MAC_IPG_IFG_IPGR1_MASK 0x7f
244#define MAC_IPG_IFG_IPGR2_SHIFT 24
245#define MAC_IPG_IFG_IPGR2_MASK 0x7f
246
247/* MAC STATION ADDRESS */
248#define REG_MAC_STA_ADDR 0x1488
249
250/* Hash table for multicast address */
251#define REG_RX_HASH_TABLE 0x1490
252
253/* MAC Half-Duplex Control Register */
254#define REG_MAC_HALF_DUPLX_CTRL 0x1498
255#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
256#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
257#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
258#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
259#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
260#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
261#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
262#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
263#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
264#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
265#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
266#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf
267
268/* Maximum Frame Length Control Register */
269#define REG_MTU 0x149c
270
271/* Wake-On-Lan control register */
272#define REG_WOL_CTRL 0x14a0
273#define WOL_PATTERN_EN 0x00000001
274#define WOL_PATTERN_PME_EN 0x00000002
275#define WOL_MAGIC_EN 0x00000004
276#define WOL_MAGIC_PME_EN 0x00000008
277#define WOL_LINK_CHG_EN 0x00000010
278#define WOL_LINK_CHG_PME_EN 0x00000020
279#define WOL_PATTERN_ST 0x00000100
280#define WOL_MAGIC_ST 0x00000200
281#define WOL_LINKCHG_ST 0x00000400
282#define WOL_CLK_SWITCH_EN 0x00008000
283#define WOL_PT0_EN 0x00010000
284#define WOL_PT1_EN 0x00020000
285#define WOL_PT2_EN 0x00040000
286#define WOL_PT3_EN 0x00080000
287#define WOL_PT4_EN 0x00100000
288#define WOL_PT5_EN 0x00200000
289#define WOL_PT6_EN 0x00400000
290
291/* WOL Length ( 2 DWORD ) */
292#define REG_WOL_PATTERN_LEN 0x14a4
293#define WOL_PT_LEN_MASK 0x7f
294#define WOL_PT0_LEN_SHIFT 0
295#define WOL_PT1_LEN_SHIFT 8
296#define WOL_PT2_LEN_SHIFT 16
297#define WOL_PT3_LEN_SHIFT 24
298#define WOL_PT4_LEN_SHIFT 0
299#define WOL_PT5_LEN_SHIFT 8
300#define WOL_PT6_LEN_SHIFT 16
301
302/* Internal SRAM Partition Register */
303#define REG_SRAM_RFD_ADDR 0x1500
304#define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4)
305#define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8)
306#define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12)
307#define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16)
308#define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20)
309#define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24)
310#define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28)
311#define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32)
312#define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36)
313#define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40)
314#define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44)
315#define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48)
316#define SRAM_TCPH_ADDR_MASK 0x0fff
317#define SRAM_TCPH_ADDR_SHIFT 0
318#define SRAM_PATH_ADDR_MASK 0x0fff
319#define SRAM_PATH_ADDR_SHIFT 16
320
321/* Load Ptr Register */
322#define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52)
323
324/* Descriptor Control register */
325#define REG_DESC_BASE_ADDR_HI 0x1540
326#define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4)
327#define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8)
328#define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12)
329#define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16)
330#define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20)
331#define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24)
332#define DESC_RFD_RING_SIZE_MASK 0x7ff
333#define DESC_RFD_RING_SIZE_SHIFT 0
334#define DESC_RRD_RING_SIZE_MASK 0x7ff
335#define DESC_RRD_RING_SIZE_SHIFT 16
336#define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28)
337#define DESC_TPD_RING_SIZE_MASK 0x3ff
338#define DESC_TPD_RING_SIZE_SHIFT 0
339
340/* TXQ Control Register */
341#define REG_TXQ_CTRL 0x1580
342#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
343#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f
344#define TXQ_CTRL_EN 0x20
345#define TXQ_CTRL_ENH_MODE 0x40
346#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
347#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f
348#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
349#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
350
351/* Jumbo packet Threshold for task offload */
352#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
353#define TX_JUMBO_TASK_TH_MASK 0x7ff
354#define TX_JUMBO_TASK_TH_SHIFT 0
355#define TX_TPD_MIN_IPG_MASK 0x1f
356#define TX_TPD_MIN_IPG_SHIFT 16
357
358/* RXQ Control Register */
359#define REG_RXQ_CTRL 0x15a0
360#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
361#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff
362#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
363#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff
364#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
365#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f
366#define RXQ_CTRL_CUT_THRU_EN 0x40000000
367#define RXQ_CTRL_EN 0x80000000
368
369/* Rx jumbo packet threshold and rrd retirement timer */
370#define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4)
371#define RXQ_JMBOSZ_TH_MASK 0x7ff
372#define RXQ_JMBOSZ_TH_SHIFT 0
373#define RXQ_JMBO_LKAH_MASK 0xf
374#define RXQ_JMBO_LKAH_SHIFT 11
375#define RXQ_RRD_TIMER_MASK 0xffff
376#define RXQ_RRD_TIMER_SHIFT 16
377
378/* RFD flow control register */
379#define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8)
380#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
381#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
382#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
383#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
384
385/* RRD flow control register */
386#define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12)
387#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
388#define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff
389#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
390#define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff
391
392/* DMA Engine Control Register */
393#define REG_DMA_CTRL 0x15c0
394#define DMA_CTRL_DMAR_IN_ORDER 0x1
395#define DMA_CTRL_DMAR_ENH_ORDER 0x2
396#define DMA_CTRL_DMAR_OUT_ORDER 0x4
397#define DMA_CTRL_RCB_VALUE 0x8
398#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
399#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
400#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
401#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
402#define DMA_CTRL_DMAR_EN 0x400
403#define DMA_CTRL_DMAW_EN 0x800
404
405/* CMB/SMB Control Register */
406#define REG_CSMB_CTRL 0x15d0
407#define CSMB_CTRL_CMB_NOW 1
408#define CSMB_CTRL_SMB_NOW 2
409#define CSMB_CTRL_CMB_EN 4
410#define CSMB_CTRL_SMB_EN 8
411
412/* CMB DMA Write Threshold Register */
413#define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4)
414#define CMB_RRD_TH_SHIFT 0
415#define CMB_RRD_TH_MASK 0x7ff
416#define CMB_TPD_TH_SHIFT 16
417#define CMB_TPD_TH_MASK 0x7ff
418
419/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
420#define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8)
421#define CMB_RX_TM_SHIFT 0
422#define CMB_RX_TM_MASK 0xffff
423#define CMB_TX_TM_SHIFT 16
424#define CMB_TX_TM_MASK 0xffff
425
426/* Number of packet received since last CMB write */
427#define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12)
428
429/* Number of packet transmitted since last CMB write */
430#define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16)
431
432/* SMB auto DMA timer register */
433#define REG_SMB_TIMER (REG_CSMB_CTRL+20)
434
435/* Mailbox Register */
436#define REG_MAILBOX 0x15f0
437#define MB_RFD_PROD_INDX_SHIFT 0
438#define MB_RFD_PROD_INDX_MASK 0x7ff
439#define MB_RRD_CONS_INDX_SHIFT 11
440#define MB_RRD_CONS_INDX_MASK 0x7ff
441#define MB_TPD_PROD_INDX_SHIFT 22
442#define MB_TPD_PROD_INDX_MASK 0x3ff
443
444/* Interrupt Status Register */
445#define REG_ISR 0x1600
446#define ISR_SMB 1
447#define ISR_TIMER 2
448#define ISR_MANUAL 4
449#define ISR_RXF_OV 8
450#define ISR_RFD_UNRUN 0x10
451#define ISR_RRD_OV 0x20
452#define ISR_TXF_UNRUN 0x40
453#define ISR_LINK 0x80
454#define ISR_HOST_RFD_UNRUN 0x100
455#define ISR_HOST_RRD_OV 0x200
456#define ISR_DMAR_TO_RST 0x400
457#define ISR_DMAW_TO_RST 0x800
458#define ISR_GPHY 0x1000
459#define ISR_RX_PKT 0x10000
460#define ISR_TX_PKT 0x20000
461#define ISR_TX_DMA 0x40000
462#define ISR_RX_DMA 0x80000
463#define ISR_CMB_RX 0x100000
464#define ISR_CMB_TX 0x200000
465#define ISR_MAC_RX 0x400000
466#define ISR_MAC_TX 0x800000
467#define ISR_UR_DETECTED 0x1000000
468#define ISR_FERR_DETECTED 0x2000000
469#define ISR_NFERR_DETECTED 0x4000000
470#define ISR_CERR_DETECTED 0x8000000
471#define ISR_PHY_LINKDOWN 0x10000000
472#define ISR_DIS_SMB 0x20000000
473#define ISR_DIS_DMA 0x40000000
474#define ISR_DIS_INT 0x80000000
475
476/* Interrupt Mask Register */
477#define REG_IMR 0x1604
478
479/* Normal Interrupt mask */
480#define IMR_NORMAL_MASK (\
481 ISR_SMB |\
482 ISR_GPHY |\
483 ISR_PHY_LINKDOWN|\
484 ISR_DMAR_TO_RST |\
485 ISR_DMAW_TO_RST |\
486 ISR_CMB_TX |\
487 ISR_CMB_RX )
488
489/* Debug Interrupt Mask (enable all interrupt) */
490#define IMR_DEBUG_MASK (\
491 ISR_SMB |\
492 ISR_TIMER |\
493 ISR_MANUAL |\
494 ISR_RXF_OV |\
495 ISR_RFD_UNRUN |\
496 ISR_RRD_OV |\
497 ISR_TXF_UNRUN |\
498 ISR_LINK |\
499 ISR_CMB_TX |\
500 ISR_CMB_RX |\
501 ISR_RX_PKT |\
502 ISR_TX_PKT |\
503 ISR_MAC_RX |\
504 ISR_MAC_TX )
505
506/* Interrupt Status Register */
507#define REG_RFD_RRD_IDX 0x1800
508#define REG_TPD_IDX 0x1804
509
510/* MII definition */
511/* PHY Common Register */
512#define MII_AT001_CR 0x09
513#define MII_AT001_SR 0x0A
514#define MII_AT001_ESR 0x0F
515#define MII_AT001_PSCR 0x10
516#define MII_AT001_PSSR 0x11
517
518/* PHY Control Register */
519#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
520#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
521#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
522#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
523#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
524#define MII_CR_POWER_DOWN 0x0800 /* Power down */
525#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
526#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
527#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
528#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
529#define MII_CR_SPEED_MASK 0x2040
530#define MII_CR_SPEED_1000 0x0040
531#define MII_CR_SPEED_100 0x2000
532#define MII_CR_SPEED_10 0x0000
533
534/* PHY Status Register */
535#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
536#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
537#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
538#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
539#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
540#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
541#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
542#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
543#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
544#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
545#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
546#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
547#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
548#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
549#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
550
551/* Link partner ability register. */
552#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
553#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
554#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
555#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
556#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
557#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
558#define MII_LPA_PAUSE 0x0400 /* PAUSE */
559#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
560#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
561#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
562#define MII_LPA_NPAGE 0x8000 /* Next page bit */
563
564/* Autoneg Advertisement Register */
565#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
566#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
567#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
568#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
569#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
570#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
571#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
572#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
573#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
574#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
575#define MII_AR_SPEED_MASK 0x01E0
576#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
577
578/* 1000BASE-T Control Register */
579#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
580#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
581#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */
582#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */
583#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */
584#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
585#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
586#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
587#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
588#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
589#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
590#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
591
592/* 1000BASE-T Status Register */
593#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
594#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
595#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
596#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
597#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
598#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
599#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
600#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
601
602/* Extended Status Register */
603#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
604#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
605#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
606#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
607
608/* AT001 PHY Specific Control Register */
609#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
610#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
611#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
612#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
613#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */
614#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */
615#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
616#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
617#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */
618#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */
619#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
620#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
621#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
622#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
623#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
624#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
625#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
626
627/* AT001 PHY Specific Status Register */
628#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
629#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
630#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
631#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
632#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
633#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
634
635/* PCI Command Register Bit Definitions */
636#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
637#define CMD_IO_SPACE 0x0001
638#define CMD_MEMORY_SPACE 0x0002
639#define CMD_BUS_MASTER 0x0004
640
641/* Wake Up Filter Control */
642#define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
643#define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
644#define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
645#define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
646#define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
647
648/* Error Codes */
649#define ATL1_SUCCESS 0
650#define ATL1_ERR_EEPROM 1
651#define ATL1_ERR_PHY 2
652#define ATL1_ERR_CONFIG 3
653#define ATL1_ERR_PARAM 4
654#define ATL1_ERR_MAC_TYPE 5
655#define ATL1_ERR_PHY_TYPE 6
656#define ATL1_ERR_PHY_SPEED 7
657#define ATL1_ERR_PHY_RES 8
658
659#define SPEED_0 0xffff
660#define SPEED_10 10
661#define SPEED_100 100
662#define SPEED_1000 1000
663#define HALF_DUPLEX 1
664#define FULL_DUPLEX 2
665
666#define MEDIA_TYPE_AUTO_SENSOR 0
667#define MEDIA_TYPE_1000M_FULL 1
668#define MEDIA_TYPE_100M_FULL 2
669#define MEDIA_TYPE_100M_HALF 3
670#define MEDIA_TYPE_10M_FULL 4
671#define MEDIA_TYPE_10M_HALF 5
672
673#define ADVERTISE_10_HALF 0x0001
674#define ADVERTISE_10_FULL 0x0002
675#define ADVERTISE_100_HALF 0x0004
676#define ADVERTISE_100_FULL 0x0008
677#define ADVERTISE_1000_HALF 0x0010
678#define ADVERTISE_1000_FULL 0x0020
679#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
680#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
681#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
682
683/* The size (in bytes) of a ethernet packet */
684#define ENET_HEADER_SIZE 14
685#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
686#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
687#define ETHERNET_FCS_SIZE 4
688#define MAX_JUMBO_FRAME_SIZE 0x2800
689
690#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
691#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
692
693/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */
694#define EEPROM_SUM 0xBABA
695
696#define ATL1_EEDUMP_LEN 48
697
698/* Statistics counters collected by the MAC */
699struct stats_msg_block {
700 /* rx */
701 u32 rx_ok; /* The number of good packet received. */
702 u32 rx_bcast; /* The number of good broadcast packet received. */
703 u32 rx_mcast; /* The number of good multicast packet received. */
704 u32 rx_pause; /* The number of Pause packet received. */
705 u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */
706 u32 rx_fcs_err; /* The number of packets with bad FCS. */
707 u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */
708 u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
709 u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
710 u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
711 u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
712 u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
713 u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
714 u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
715 u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
716 u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
717 u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
718 u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size Å¡C truncated by Selene. */
719 u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
720 u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
721 u32 rx_align_err; /* Alignment Error */
722 u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
723 u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
724 u32 rx_err_addr; /* The number of packets dropped due to address filtering. */
725
726 /* tx */
727 u32 tx_ok; /* The number of good packet transmitted. */
728 u32 tx_bcast; /* The number of good broadcast packet transmitted. */
729 u32 tx_mcast; /* The number of good multicast packet transmitted. */
730 u32 tx_pause; /* The number of Pause packet transmitted. */
731 u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
732 u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
733 u32 tx_defer; /* The number of packets transmitted that is deferred. */
734 u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
735 u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
736 u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
737 u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
738 u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
739 u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
740 u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
741 u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
742 u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
743 u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
744 u32 tx_late_col; /* The number of packets transmitted with late collisions. */
745 u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
746 u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
747 u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
748 u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
749 u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
750 u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
751 u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
752 u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update.
753 * Software should clear this bit as soon as retrieving the statistics information. */
754};
755
756/* Coalescing Message Block */
757struct coals_msg_block {
758 u32 int_stats; /* interrupt status */
759 u16 rrd_prod_idx; /* TRD Producer Index. */
760 u16 rfd_cons_idx; /* RFD Consumer Index. */
761 u16 update; /* Selene sets this bit every time it DMA the CMB to host memory.
762 * Software supposes to clear this bit when CMB information is processed. */
763 u16 tpd_cons_idx; /* TPD Consumer Index. */
764};
765
766/* RRD descriptor */
767struct rx_return_desc {
768 u8 num_buf; /* Number of RFD buffers used by the received packet */
769 u8 resved;
770 u16 buf_indx; /* RFD Index of the first buffer */
771 union {
772 u32 valid;
773 struct {
774 u16 rx_chksum;
775 u16 pkt_size;
776 } xsum_sz;
777 } xsz;
778
779 u16 pkt_flg; /* Packet flags */
780 u16 err_flg; /* Error flags */
781 u16 resved2;
782 u16 vlan_tag; /* VLAN TAG */
783};
784
785#define PACKET_FLAG_ETH_TYPE 0x0080
786#define PACKET_FLAG_VLAN_INS 0x0100
787#define PACKET_FLAG_ERR 0x0200
788#define PACKET_FLAG_IPV4 0x0400
789#define PACKET_FLAG_UDP 0x0800
790#define PACKET_FLAG_TCP 0x1000
791#define PACKET_FLAG_BCAST 0x2000
792#define PACKET_FLAG_MCAST 0x4000
793#define PACKET_FLAG_PAUSE 0x8000
794
795#define ERR_FLAG_CRC 0x0001
796#define ERR_FLAG_CODE 0x0002
797#define ERR_FLAG_DRIBBLE 0x0004
798#define ERR_FLAG_RUNT 0x0008
799#define ERR_FLAG_OV 0x0010
800#define ERR_FLAG_TRUNC 0x0020
801#define ERR_FLAG_IP_CHKSUM 0x0040
802#define ERR_FLAG_L4_CHKSUM 0x0080
803#define ERR_FLAG_LEN 0x0100
804#define ERR_FLAG_DES_ADDR 0x0200
805
806/* RFD descriptor */
807struct rx_free_desc {
808 __le64 buffer_addr; /* Address of the descriptor's data buffer */
809 __le16 buf_len; /* Size of the receive buffer in host memory, in byte */
810 u16 coalese; /* Update consumer index to host after the reception of this frame */
811 /* __attribute__ ((packed)) is required */
812} __attribute__ ((packed));
813
814/* tsopu defines */
815#define TSO_PARAM_BUFLEN_MASK 0x3FFF
816#define TSO_PARAM_BUFLEN_SHIFT 0
817#define TSO_PARAM_DMAINT_MASK 0x0001
818#define TSO_PARAM_DMAINT_SHIFT 14
819#define TSO_PARAM_PKTNT_MASK 0x0001
820#define TSO_PARAM_PKTINT_SHIFT 15
821#define TSO_PARAM_VLANTAG_MASK 0xFFFF
822#define TSO_PARAM_VLAN_SHIFT 16
823
824/* tsopl defines */
825#define TSO_PARAM_EOP_MASK 0x0001
826#define TSO_PARAM_EOP_SHIFT 0
827#define TSO_PARAM_COALESCE_MASK 0x0001
828#define TSO_PARAM_COALESCE_SHIFT 1
829#define TSO_PARAM_INSVLAG_MASK 0x0001
830#define TSO_PARAM_INSVLAG_SHIFT 2
831#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001
832#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3
833#define TSO_PARAM_SEGMENT_MASK 0x0001
834#define TSO_PARAM_SEGMENT_SHIFT 4
835#define TSO_PARAM_IPCKSUM_MASK 0x0001
836#define TSO_PARAM_IPCKSUM_SHIFT 5
837#define TSO_PARAM_TCPCKSUM_MASK 0x0001
838#define TSO_PARAM_TCPCKSUM_SHIFT 6
839#define TSO_PARAM_UDPCKSUM_MASK 0x0001
840#define TSO_PARAM_UDPCKSUM_SHIFT 7
841#define TSO_PARAM_VLANTAGGED_MASK 0x0001
842#define TSO_PARAM_VLANTAGGED_SHIFT 8
843#define TSO_PARAM_ETHTYPE_MASK 0x0001
844#define TSO_PARAM_ETHTYPE_SHIFT 9
845#define TSO_PARAM_IPHL_MASK 0x000F
846#define TSO_PARAM_IPHL_SHIFT 10
847#define TSO_PARAM_TCPHDRLEN_MASK 0x000F
848#define TSO_PARAM_TCPHDRLEN_SHIFT 14
849#define TSO_PARAM_HDRFLAG_MASK 0x0001
850#define TSO_PARAM_HDRFLAG_SHIFT 18
851#define TSO_PARAM_MSS_MASK 0x1FFF
852#define TSO_PARAM_MSS_SHIFT 19
853
854/* csumpu defines */
855#define CSUM_PARAM_BUFLEN_MASK 0x3FFF
856#define CSUM_PARAM_BUFLEN_SHIFT 0
857#define CSUM_PARAM_DMAINT_MASK 0x0001
858#define CSUM_PARAM_DMAINT_SHIFT 14
859#define CSUM_PARAM_PKTINT_MASK 0x0001
860#define CSUM_PARAM_PKTINT_SHIFT 15
861#define CSUM_PARAM_VALANTAG_MASK 0xFFFF
862#define CSUM_PARAM_VALAN_SHIFT 16
863
864/* csumpl defines*/
865#define CSUM_PARAM_EOP_MASK 0x0001
866#define CSUM_PARAM_EOP_SHIFT 0
867#define CSUM_PARAM_COALESCE_MASK 0x0001
868#define CSUM_PARAM_COALESCE_SHIFT 1
869#define CSUM_PARAM_INSVLAG_MASK 0x0001
870#define CSUM_PARAM_INSVLAG_SHIFT 2
871#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001
872#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3
873#define CSUM_PARAM_SEGMENT_MASK 0x0001
874#define CSUM_PARAM_SEGMENT_SHIFT 4
875#define CSUM_PARAM_IPCKSUM_MASK 0x0001
876#define CSUM_PARAM_IPCKSUM_SHIFT 5
877#define CSUM_PARAM_TCPCKSUM_MASK 0x0001
878#define CSUM_PARAM_TCPCKSUM_SHIFT 6
879#define CSUM_PARAM_UDPCKSUM_MASK 0x0001
880#define CSUM_PARAM_UDPCKSUM_SHIFT 7
881#define CSUM_PARAM_VLANTAGGED_MASK 0x0001
882#define CSUM_PARAM_VLANTAGGED_SHIFT 8
883#define CSUM_PARAM_ETHTYPE_MASK 0x0001
884#define CSUM_PARAM_ETHTYPE_SHIFT 9
885#define CSUM_PARAM_IPHL_MASK 0x000F
886#define CSUM_PARAM_IPHL_SHIFT 10
887#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF
888#define CSUM_PARAM_PLOADOFFSET_SHIFT 16
889#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF
890#define CSUM_PARAM_XSUMOFFSET_SHIFT 24
891
892/* TPD descriptor */
893struct tso_param {
894 /* The order of these declarations is important -- don't change it */
895 u32 tsopu; /* tso_param upper word */
896 u32 tsopl; /* tso_param lower word */
897};
898
899struct csum_param {
900 /* The order of these declarations is important -- don't change it */
901 u32 csumpu; /* csum_param upper word */
902 u32 csumpl; /* csum_param lower word */
903};
904
905union tpd_descr {
906 u64 data;
907 struct csum_param csum;
908 struct tso_param tso;
909};
910
911struct tx_packet_desc {
912 __le64 buffer_addr;
913 union tpd_descr desc;
914};
915
916/* DMA Order Settings */
917enum atl1_dma_order {
918 atl1_dma_ord_in = 1,
919 atl1_dma_ord_enh = 2,
920 atl1_dma_ord_out = 4
921};
922
923enum atl1_dma_rcb {
924 atl1_rcb_64 = 0,
925 atl1_rcb_128 = 1
926};
927
928enum atl1_dma_req_block {
929 atl1_dma_req_128 = 0,
930 atl1_dma_req_256 = 1,
931 atl1_dma_req_512 = 2,
932 atl1_dam_req_1024 = 3,
933 atl1_dam_req_2048 = 4,
934 atl1_dma_req_4096 = 5
935};
936
937struct atl1_spi_flash_dev {
938 const char *manu_name; /* manufacturer id */
939 /* op-code */
940 u8 cmd_wrsr;
941 u8 cmd_read;
942 u8 cmd_program;
943 u8 cmd_wren;
944 u8 cmd_wrdi;
945 u8 cmd_rdsr;
946 u8 cmd_rdid;
947 u8 cmd_sector_erase;
948 u8 cmd_chip_erase;
949};
950
951#endif /* _ATL1_HW_H_ */
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
new file mode 100644
index 000000000000..6655640eb4ca
--- /dev/null
+++ b/drivers/net/atl1/atl1_main.c
@@ -0,0 +1,2468 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called COPYING.
25 *
26 * Contact Information:
27 * Xiong Huang <xiong_huang@attansic.com>
28 * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
29 * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA
30 *
31 * Chris Snook <csnook@redhat.com>
32 * Jay Cliburn <jcliburn@gmail.com>
33 *
34 * This version is adapted from the Attansic reference driver for
35 * inclusion in the Linux kernel. It is currently under heavy development.
36 * A very incomplete list of things that need to be dealt with:
37 *
38 * TODO:
39 * Fix TSO; tx performance is horrible with TSO enabled.
40 * Wake on LAN.
41 * Add more ethtool functions, including set ring parameters.
42 * Fix abstruse irq enable/disable condition described here:
43 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
44 *
45 * NEEDS TESTING:
46 * VLAN
47 * multicast
48 * promiscuous mode
49 * interrupt coalescing
50 * SMP torture testing
51 */
52
53#include <linux/types.h>
54#include <linux/netdevice.h>
55#include <linux/pci.h>
56#include <linux/spinlock.h>
57#include <linux/slab.h>
58#include <linux/string.h>
59#include <linux/skbuff.h>
60#include <linux/etherdevice.h>
61#include <linux/if_vlan.h>
62#include <linux/irqreturn.h>
63#include <linux/workqueue.h>
64#include <linux/timer.h>
65#include <linux/jiffies.h>
66#include <linux/hardirq.h>
67#include <linux/interrupt.h>
68#include <linux/irqflags.h>
69#include <linux/dma-mapping.h>
70#include <linux/net.h>
71#include <linux/pm.h>
72#include <linux/in.h>
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <linux/compiler.h>
76#include <linux/delay.h>
77#include <linux/mii.h>
78#include <net/checksum.h>
79
80#include <asm/atomic.h>
81#include <asm/byteorder.h>
82
83#include "atl1.h"
84
85#define RUN_REALTIME 0
86#define DRIVER_VERSION "2.0.6"
87
88char atl1_driver_name[] = "atl1";
89static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver";
90static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation.";
91char atl1_driver_version[] = DRIVER_VERSION;
92
93MODULE_AUTHOR
94 ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
95MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver");
96MODULE_LICENSE("GPL");
97MODULE_VERSION(DRIVER_VERSION);
98
99/*
100 * atl1_pci_tbl - PCI Device ID Table
101 */
102static const struct pci_device_id atl1_pci_tbl[] = {
103 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1048)},
104 /* required last entry */
105 {0,}
106};
107
108MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
109
110/*
111 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
112 * @adapter: board private structure to initialize
113 *
114 * atl1_sw_init initializes the Adapter private data structure.
115 * Fields are initialized based on PCI device information and
116 * OS network device settings (MTU size).
117 */
118static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
119{
120 struct atl1_hw *hw = &adapter->hw;
121 struct net_device *netdev = adapter->netdev;
122 struct pci_dev *pdev = adapter->pdev;
123
124 /* PCI config space info */
125 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
126
127 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
128 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
129
130 adapter->wol = 0;
131 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
132 adapter->ict = 50000; /* 100ms */
133 adapter->link_speed = SPEED_0; /* hardware init */
134 adapter->link_duplex = FULL_DUPLEX;
135
136 hw->phy_configured = false;
137 hw->preamble_len = 7;
138 hw->ipgt = 0x60;
139 hw->min_ifg = 0x50;
140 hw->ipgr1 = 0x40;
141 hw->ipgr2 = 0x60;
142 hw->max_retry = 0xf;
143 hw->lcol = 0x37;
144 hw->jam_ipg = 7;
145 hw->rfd_burst = 8;
146 hw->rrd_burst = 8;
147 hw->rfd_fetch_gap = 1;
148 hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
149 hw->rx_jumbo_lkah = 1;
150 hw->rrd_ret_timer = 16;
151 hw->tpd_burst = 4;
152 hw->tpd_fetch_th = 16;
153 hw->txf_burst = 0x100;
154 hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
155 hw->tpd_fetch_gap = 1;
156 hw->rcb_value = atl1_rcb_64;
157 hw->dma_ord = atl1_dma_ord_enh;
158 hw->dmar_block = atl1_dma_req_256;
159 hw->dmaw_block = atl1_dma_req_256;
160 hw->cmb_rrd = 4;
161 hw->cmb_tpd = 4;
162 hw->cmb_rx_timer = 1; /* about 2us */
163 hw->cmb_tx_timer = 1; /* about 2us */
164 hw->smb_timer = 100000; /* about 200ms */
165
166 atomic_set(&adapter->irq_sem, 0);
167 spin_lock_init(&adapter->lock);
168 spin_lock_init(&adapter->mb_lock);
169
170 return 0;
171}
172
173/*
174 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
175 * @adapter: board private structure
176 *
177 * Return 0 on success, negative on failure
178 */
179s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
180{
181 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
182 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
183 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
184 struct atl1_ring_header *ring_header = &adapter->ring_header;
185 struct pci_dev *pdev = adapter->pdev;
186 int size;
187 u8 offset = 0;
188
189 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
190 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
191 if (unlikely(!tpd_ring->buffer_info)) {
192 printk(KERN_WARNING "%s: kzalloc failed , size = D%d\n",
193 atl1_driver_name, size);
194 goto err_nomem;
195 }
196 rfd_ring->buffer_info =
197 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
198
199 /* real ring DMA buffer */
200 ring_header->size = size = sizeof(struct tx_packet_desc) *
201 tpd_ring->count
202 + sizeof(struct rx_free_desc) * rfd_ring->count
203 + sizeof(struct rx_return_desc) * rrd_ring->count
204 + sizeof(struct coals_msg_block)
205 + sizeof(struct stats_msg_block)
206 + 40; /* "40: for 8 bytes align" huh? -- CHS */
207
208 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
209 &ring_header->dma);
210 if (unlikely(!ring_header->desc)) {
211 printk(KERN_WARNING
212 "%s: pci_alloc_consistent failed, size = D%d\n",
213 atl1_driver_name, size);
214 goto err_nomem;
215 }
216
217 memset(ring_header->desc, 0, ring_header->size);
218
219 /* init TPD ring */
220 tpd_ring->dma = ring_header->dma;
221 offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
222 tpd_ring->dma += offset;
223 tpd_ring->desc = (u8 *) ring_header->desc + offset;
224 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
225 atomic_set(&tpd_ring->next_to_use, 0);
226 atomic_set(&tpd_ring->next_to_clean, 0);
227
228 /* init RFD ring */
229 rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
230 offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
231 rfd_ring->dma += offset;
232 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
233 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
234 rfd_ring->next_to_clean = 0;
235 /* rfd_ring->next_to_use = rfd_ring->count - 1; */
236 atomic_set(&rfd_ring->next_to_use, 0);
237
238 /* init RRD ring */
239 rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
240 offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
241 rrd_ring->dma += offset;
242 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
243 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
244 rrd_ring->next_to_use = 0;
245 atomic_set(&rrd_ring->next_to_clean, 0);
246
247 /* init CMB */
248 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
249 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
250 adapter->cmb.dma += offset;
251 adapter->cmb.cmb =
252 (struct coals_msg_block *) ((u8 *) rrd_ring->desc +
253 (rrd_ring->size + offset));
254
255 /* init SMB */
256 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
257 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
258 adapter->smb.dma += offset;
259 adapter->smb.smb = (struct stats_msg_block *)
260 ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset));
261
262 return ATL1_SUCCESS;
263
264err_nomem:
265 kfree(tpd_ring->buffer_info);
266 return -ENOMEM;
267}
268
269/*
270 * atl1_irq_enable - Enable default interrupt generation settings
271 * @adapter: board private structure
272 */
273static void atl1_irq_enable(struct atl1_adapter *adapter)
274{
275 if (likely(!atomic_dec_and_test(&adapter->irq_sem)))
276 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
277}
278
279static void atl1_clear_phy_int(struct atl1_adapter *adapter)
280{
281 u16 phy_data;
282 unsigned long flags;
283
284 spin_lock_irqsave(&adapter->lock, flags);
285 atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
286 spin_unlock_irqrestore(&adapter->lock, flags);
287}
288
289static void atl1_inc_smb(struct atl1_adapter *adapter)
290{
291 struct stats_msg_block *smb = adapter->smb.smb;
292
293 /* Fill out the OS statistics structure */
294 adapter->soft_stats.rx_packets += smb->rx_ok;
295 adapter->soft_stats.tx_packets += smb->tx_ok;
296 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
297 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
298 adapter->soft_stats.multicast += smb->rx_mcast;
299 adapter->soft_stats.collisions += (smb->tx_1_col +
300 smb->tx_2_col * 2 +
301 smb->tx_late_col +
302 smb->tx_abort_col *
303 adapter->hw.max_retry);
304
305 /* Rx Errors */
306 adapter->soft_stats.rx_errors += (smb->rx_frag +
307 smb->rx_fcs_err +
308 smb->rx_len_err +
309 smb->rx_sz_ov +
310 smb->rx_rxf_ov +
311 smb->rx_rrd_ov + smb->rx_align_err);
312 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
313 adapter->soft_stats.rx_length_errors += smb->rx_len_err;
314 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
315 adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
316 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
317 smb->rx_rxf_ov);
318
319 adapter->soft_stats.rx_pause += smb->rx_pause;
320 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
321 adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
322
323 /* Tx Errors */
324 adapter->soft_stats.tx_errors += (smb->tx_late_col +
325 smb->tx_abort_col +
326 smb->tx_underrun + smb->tx_trunc);
327 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
328 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
329 adapter->soft_stats.tx_window_errors += smb->tx_late_col;
330
331 adapter->soft_stats.excecol += smb->tx_abort_col;
332 adapter->soft_stats.deffer += smb->tx_defer;
333 adapter->soft_stats.scc += smb->tx_1_col;
334 adapter->soft_stats.mcc += smb->tx_2_col;
335 adapter->soft_stats.latecol += smb->tx_late_col;
336 adapter->soft_stats.tx_underun += smb->tx_underrun;
337 adapter->soft_stats.tx_trunc += smb->tx_trunc;
338 adapter->soft_stats.tx_pause += smb->tx_pause;
339
340 adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
341 adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
342 adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
343 adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
344 adapter->net_stats.multicast = adapter->soft_stats.multicast;
345 adapter->net_stats.collisions = adapter->soft_stats.collisions;
346 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
347 adapter->net_stats.rx_over_errors =
348 adapter->soft_stats.rx_missed_errors;
349 adapter->net_stats.rx_length_errors =
350 adapter->soft_stats.rx_length_errors;
351 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
352 adapter->net_stats.rx_frame_errors =
353 adapter->soft_stats.rx_frame_errors;
354 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
355 adapter->net_stats.rx_missed_errors =
356 adapter->soft_stats.rx_missed_errors;
357 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
358 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
359 adapter->net_stats.tx_aborted_errors =
360 adapter->soft_stats.tx_aborted_errors;
361 adapter->net_stats.tx_window_errors =
362 adapter->soft_stats.tx_window_errors;
363 adapter->net_stats.tx_carrier_errors =
364 adapter->soft_stats.tx_carrier_errors;
365}
366
367static void atl1_rx_checksum(struct atl1_adapter *adapter,
368 struct rx_return_desc *rrd,
369 struct sk_buff *skb)
370{
371 skb->ip_summed = CHECKSUM_NONE;
372
373 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
374 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
375 ERR_FLAG_CODE | ERR_FLAG_OV)) {
376 adapter->hw_csum_err++;
377 printk(KERN_DEBUG "%s: rx checksum error\n",
378 atl1_driver_name);
379 return;
380 }
381 }
382
383 /* not IPv4 */
384 if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
385 /* checksum is invalid, but it's not an IPv4 pkt, so ok */
386 return;
387
388 /* IPv4 packet */
389 if (likely(!(rrd->err_flg &
390 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
391 skb->ip_summed = CHECKSUM_UNNECESSARY;
392 adapter->hw_csum_good++;
393 return;
394 }
395
396 /* IPv4, but hardware thinks its checksum is wrong */
397 printk(KERN_DEBUG "%s: hw csum wrong pkt_flag:%x, err_flag:%x\n",
398 atl1_driver_name, rrd->pkt_flg, rrd->err_flg);
399 skb->ip_summed = CHECKSUM_COMPLETE;
400 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
401 adapter->hw_csum_err++;
402 return;
403}
404
405/*
406 * atl1_alloc_rx_buffers - Replace used receive buffers
407 * @adapter: address of board private structure
408 */
409static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
410{
411 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
412 struct net_device *netdev = adapter->netdev;
413 struct pci_dev *pdev = adapter->pdev;
414 struct page *page;
415 unsigned long offset;
416 struct atl1_buffer *buffer_info, *next_info;
417 struct sk_buff *skb;
418 u16 num_alloc = 0;
419 u16 rfd_next_to_use, next_next;
420 struct rx_free_desc *rfd_desc;
421
422 next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
423 if (++next_next == rfd_ring->count)
424 next_next = 0;
425 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
426 next_info = &rfd_ring->buffer_info[next_next];
427
428 while (!buffer_info->alloced && !next_info->alloced) {
429 if (buffer_info->skb) {
430 buffer_info->alloced = 1;
431 goto next;
432 }
433
434 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
435
436 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
437 if (unlikely(!skb)) { /* Better luck next round */
438 adapter->net_stats.rx_dropped++;
439 break;
440 }
441
442 /*
443 * Make buffer alignment 2 beyond a 16 byte boundary
444 * this will result in a 16 byte aligned IP header after
445 * the 14 byte MAC header is removed
446 */
447 skb_reserve(skb, NET_IP_ALIGN);
448 skb->dev = netdev;
449
450 buffer_info->alloced = 1;
451 buffer_info->skb = skb;
452 buffer_info->length = (u16) adapter->rx_buffer_len;
453 page = virt_to_page(skb->data);
454 offset = (unsigned long)skb->data & ~PAGE_MASK;
455 buffer_info->dma = pci_map_page(pdev, page, offset,
456 adapter->rx_buffer_len,
457 PCI_DMA_FROMDEVICE);
458 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
459 rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
460 rfd_desc->coalese = 0;
461
462next:
463 rfd_next_to_use = next_next;
464 if (unlikely(++next_next == rfd_ring->count))
465 next_next = 0;
466
467 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
468 next_info = &rfd_ring->buffer_info[next_next];
469 num_alloc++;
470 }
471
472 if (num_alloc) {
473 /*
474 * Force memory writes to complete before letting h/w
475 * know there are new descriptors to fetch. (Only
476 * applicable for weak-ordered memory model archs,
477 * such as IA-64).
478 */
479 wmb();
480 atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
481 }
482 return num_alloc;
483}
484
485static void atl1_intr_rx(struct atl1_adapter *adapter)
486{
487 int i, count;
488 u16 length;
489 u16 rrd_next_to_clean;
490 u32 value;
491 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
492 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
493 struct atl1_buffer *buffer_info;
494 struct rx_return_desc *rrd;
495 struct sk_buff *skb;
496
497 count = 0;
498
499 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
500
501 while (1) {
502 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
503 i = 1;
504 if (likely(rrd->xsz.valid)) { /* packet valid */
505chk_rrd:
506 /* check rrd status */
507 if (likely(rrd->num_buf == 1))
508 goto rrd_ok;
509
510 /* rrd seems to be bad */
511 if (unlikely(i-- > 0)) {
512 /* rrd may not be DMAed completely */
513 printk(KERN_DEBUG
514 "%s: RRD may not be DMAed completely\n",
515 atl1_driver_name);
516 udelay(1);
517 goto chk_rrd;
518 }
519 /* bad rrd */
520 printk(KERN_DEBUG "%s: bad RRD\n", atl1_driver_name);
521 /* see if update RFD index */
522 if (rrd->num_buf > 1) {
523 u16 num_buf;
524 num_buf =
525 (rrd->xsz.xsum_sz.pkt_size +
526 adapter->rx_buffer_len -
527 1) / adapter->rx_buffer_len;
528 if (rrd->num_buf == num_buf) {
529 /* clean alloc flag for bad rrd */
530 while (rfd_ring->next_to_clean !=
531 (rrd->buf_indx + num_buf)) {
532 rfd_ring->buffer_info[rfd_ring->
533 next_to_clean].alloced = 0;
534 if (++rfd_ring->next_to_clean ==
535 rfd_ring->count) {
536 rfd_ring->
537 next_to_clean = 0;
538 }
539 }
540 }
541 }
542
543 /* update rrd */
544 rrd->xsz.valid = 0;
545 if (++rrd_next_to_clean == rrd_ring->count)
546 rrd_next_to_clean = 0;
547 count++;
548 continue;
549 } else { /* current rrd still not be updated */
550
551 break;
552 }
553rrd_ok:
554 /* clean alloc flag for bad rrd */
555 while (rfd_ring->next_to_clean != rrd->buf_indx) {
556 rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced =
557 0;
558 if (++rfd_ring->next_to_clean == rfd_ring->count)
559 rfd_ring->next_to_clean = 0;
560 }
561
562 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
563 if (++rfd_ring->next_to_clean == rfd_ring->count)
564 rfd_ring->next_to_clean = 0;
565
566 /* update rrd next to clean */
567 if (++rrd_next_to_clean == rrd_ring->count)
568 rrd_next_to_clean = 0;
569 count++;
570
571 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
572 if (!(rrd->err_flg &
573 (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
574 | ERR_FLAG_LEN))) {
575 /* packet error, don't need upstream */
576 buffer_info->alloced = 0;
577 rrd->xsz.valid = 0;
578 continue;
579 }
580 }
581
582 /* Good Receive */
583 pci_unmap_page(adapter->pdev, buffer_info->dma,
584 buffer_info->length, PCI_DMA_FROMDEVICE);
585 skb = buffer_info->skb;
586 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
587
588 skb_put(skb, length - ETHERNET_FCS_SIZE);
589
590 /* Receive Checksum Offload */
591 atl1_rx_checksum(adapter, rrd, skb);
592 skb->protocol = eth_type_trans(skb, adapter->netdev);
593
594 if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
595 u16 vlan_tag = (rrd->vlan_tag >> 4) |
596 ((rrd->vlan_tag & 7) << 13) |
597 ((rrd->vlan_tag & 8) << 9);
598 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
599 } else
600 netif_rx(skb);
601
602 /* let protocol layer free skb */
603 buffer_info->skb = NULL;
604 buffer_info->alloced = 0;
605 rrd->xsz.valid = 0;
606
607 adapter->netdev->last_rx = jiffies;
608 }
609
610 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
611
612 atl1_alloc_rx_buffers(adapter);
613
614 /* update mailbox ? */
615 if (count) {
616 u32 tpd_next_to_use;
617 u32 rfd_next_to_use;
618 u32 rrd_next_to_clean;
619
620 spin_lock(&adapter->mb_lock);
621
622 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
623 rfd_next_to_use =
624 atomic_read(&adapter->rfd_ring.next_to_use);
625 rrd_next_to_clean =
626 atomic_read(&adapter->rrd_ring.next_to_clean);
627 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
628 MB_RFD_PROD_INDX_SHIFT) |
629 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
630 MB_RRD_CONS_INDX_SHIFT) |
631 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
632 MB_TPD_PROD_INDX_SHIFT);
633 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
634 spin_unlock(&adapter->mb_lock);
635 }
636}
637
638static void atl1_intr_tx(struct atl1_adapter *adapter)
639{
640 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
641 struct atl1_buffer *buffer_info;
642 u16 sw_tpd_next_to_clean;
643 u16 cmb_tpd_next_to_clean;
644 u8 update = 0;
645
646 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
647 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
648
649 while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
650 struct tx_packet_desc *tpd;
651 update = 1;
652 tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
653 buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
654 if (buffer_info->dma) {
655 pci_unmap_page(adapter->pdev, buffer_info->dma,
656 buffer_info->length, PCI_DMA_TODEVICE);
657 buffer_info->dma = 0;
658 }
659
660 if (buffer_info->skb) {
661 dev_kfree_skb_irq(buffer_info->skb);
662 buffer_info->skb = NULL;
663 }
664 tpd->buffer_addr = 0;
665 tpd->desc.data = 0;
666
667 if (++sw_tpd_next_to_clean == tpd_ring->count)
668 sw_tpd_next_to_clean = 0;
669 }
670 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
671
672 if (netif_queue_stopped(adapter->netdev)
673 && netif_carrier_ok(adapter->netdev))
674 netif_wake_queue(adapter->netdev);
675}
676
677static void atl1_check_for_link(struct atl1_adapter *adapter)
678{
679 struct net_device *netdev = adapter->netdev;
680 u16 phy_data = 0;
681
682 spin_lock(&adapter->lock);
683 adapter->phy_timer_pending = false;
684 atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
685 atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
686 spin_unlock(&adapter->lock);
687
688 /* notify upper layer link down ASAP */
689 if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
690 if (netif_carrier_ok(netdev)) { /* old link state: Up */
691 printk(KERN_INFO "%s: %s link is down\n",
692 atl1_driver_name, netdev->name);
693 adapter->link_speed = SPEED_0;
694 netif_carrier_off(netdev);
695 netif_stop_queue(netdev);
696 }
697 }
698 schedule_work(&adapter->link_chg_task);
699}
700
701/*
702 * atl1_intr - Interrupt Handler
703 * @irq: interrupt number
704 * @data: pointer to a network interface device structure
705 * @pt_regs: CPU registers structure
706 */
707static irqreturn_t atl1_intr(int irq, void *data)
708{
709 /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/
710 struct atl1_adapter *adapter = netdev_priv(data);
711 u32 status;
712 u8 update_rx;
713 int max_ints = 10;
714
715 status = adapter->cmb.cmb->int_stats;
716 if (!status)
717 return IRQ_NONE;
718
719 update_rx = 0;
720
721 do {
722 /* clear CMB interrupt status at once */
723 adapter->cmb.cmb->int_stats = 0;
724
725 if (status & ISR_GPHY) /* clear phy status */
726 atl1_clear_phy_int(adapter);
727
728 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
729 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
730
731 /* check if SMB intr */
732 if (status & ISR_SMB)
733 atl1_inc_smb(adapter);
734
735 /* check if PCIE PHY Link down */
736 if (status & ISR_PHY_LINKDOWN) {
737 printk(KERN_DEBUG "%s: pcie phy link down %x\n",
738 atl1_driver_name, status);
739 if (netif_running(adapter->netdev)) { /* reset MAC */
740 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
741 schedule_work(&adapter->pcie_dma_to_rst_task);
742 return IRQ_HANDLED;
743 }
744 }
745
746 /* check if DMA read/write error ? */
747 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
748 printk(KERN_DEBUG
749 "%s: pcie DMA r/w error (status = 0x%x)\n",
750 atl1_driver_name, status);
751 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
752 schedule_work(&adapter->pcie_dma_to_rst_task);
753 return IRQ_HANDLED;
754 }
755
756 /* link event */
757 if (status & ISR_GPHY) {
758 adapter->soft_stats.tx_carrier_errors++;
759 atl1_check_for_link(adapter);
760 }
761
762 /* transmit event */
763 if (status & ISR_CMB_TX)
764 atl1_intr_tx(adapter);
765
766 /* rx exception */
767 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
768 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
769 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
770 if (status &
771 (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV |
772 ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV))
773 printk(KERN_INFO
774 "%s: rx exception: status = 0x%x\n",
775 atl1_driver_name, status);
776 atl1_intr_rx(adapter);
777 }
778
779 if (--max_ints < 0)
780 break;
781
782 } while ((status = adapter->cmb.cmb->int_stats));
783
784 /* re-enable Interrupt */
785 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
786 return IRQ_HANDLED;
787}
788
789/*
790 * atl1_set_multi - Multicast and Promiscuous mode set
791 * @netdev: network interface device structure
792 *
793 * The set_multi entry point is called whenever the multicast address
794 * list or the network interface flags are updated. This routine is
795 * responsible for configuring the hardware for proper multicast,
796 * promiscuous mode, and all-multi behavior.
797 */
798static void atl1_set_multi(struct net_device *netdev)
799{
800 struct atl1_adapter *adapter = netdev_priv(netdev);
801 struct atl1_hw *hw = &adapter->hw;
802 struct dev_mc_list *mc_ptr;
803 u32 rctl;
804 u32 hash_value;
805
806 /* Check for Promiscuous and All Multicast modes */
807 rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
808 if (netdev->flags & IFF_PROMISC)
809 rctl |= MAC_CTRL_PROMIS_EN;
810 else if (netdev->flags & IFF_ALLMULTI) {
811 rctl |= MAC_CTRL_MC_ALL_EN;
812 rctl &= ~MAC_CTRL_PROMIS_EN;
813 } else
814 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
815
816 iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
817
818 /* clear the old settings from the multicast hash table */
819 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
820 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
821
822 /* compute mc addresses' hash value ,and put it into hash table */
823 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
824 hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
825 atl1_hash_set(hw, hash_value);
826 }
827}
828
829static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
830{
831 u32 value;
832 struct atl1_hw *hw = &adapter->hw;
833 struct net_device *netdev = adapter->netdev;
834 /* Config MAC CTRL Register */
835 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
836 /* duplex */
837 if (FULL_DUPLEX == adapter->link_duplex)
838 value |= MAC_CTRL_DUPLX;
839 /* speed */
840 value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
841 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
842 MAC_CTRL_SPEED_SHIFT);
843 /* flow control */
844 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
845 /* PAD & CRC */
846 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
847 /* preamble length */
848 value |= (((u32) adapter->hw.preamble_len
849 & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
850 /* vlan */
851 if (adapter->vlgrp)
852 value |= MAC_CTRL_RMV_VLAN;
853 /* rx checksum
854 if (adapter->rx_csum)
855 value |= MAC_CTRL_RX_CHKSUM_EN;
856 */
857 /* filter mode */
858 value |= MAC_CTRL_BC_EN;
859 if (netdev->flags & IFF_PROMISC)
860 value |= MAC_CTRL_PROMIS_EN;
861 else if (netdev->flags & IFF_ALLMULTI)
862 value |= MAC_CTRL_MC_ALL_EN;
863 /* value |= MAC_CTRL_LOOPBACK; */
864 iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
865}
866
867static u32 atl1_check_link(struct atl1_adapter *adapter)
868{
869 struct atl1_hw *hw = &adapter->hw;
870 struct net_device *netdev = adapter->netdev;
871 u32 ret_val;
872 u16 speed, duplex, phy_data;
873 int reconfig = 0;
874
875 /* MII_BMSR must read twice */
876 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
877 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
878 if (!(phy_data & BMSR_LSTATUS)) { /* link down */
879 if (netif_carrier_ok(netdev)) { /* old link state: Up */
880 printk(KERN_INFO "%s: link is down\n",
881 atl1_driver_name);
882 adapter->link_speed = SPEED_0;
883 netif_carrier_off(netdev);
884 netif_stop_queue(netdev);
885 }
886 return ATL1_SUCCESS;
887 }
888
889 /* Link Up */
890 ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
891 if (ret_val)
892 return ret_val;
893
894 switch (hw->media_type) {
895 case MEDIA_TYPE_1000M_FULL:
896 if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
897 reconfig = 1;
898 break;
899 case MEDIA_TYPE_100M_FULL:
900 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
901 reconfig = 1;
902 break;
903 case MEDIA_TYPE_100M_HALF:
904 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
905 reconfig = 1;
906 break;
907 case MEDIA_TYPE_10M_FULL:
908 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
909 reconfig = 1;
910 break;
911 case MEDIA_TYPE_10M_HALF:
912 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
913 reconfig = 1;
914 break;
915 }
916
917 /* link result is our setting */
918 if (!reconfig) {
919 if (adapter->link_speed != speed
920 || adapter->link_duplex != duplex) {
921 adapter->link_speed = speed;
922 adapter->link_duplex = duplex;
923 atl1_setup_mac_ctrl(adapter);
924 printk(KERN_INFO "%s: %s link is up %d Mbps %s\n",
925 atl1_driver_name, netdev->name,
926 adapter->link_speed,
927 adapter->link_duplex ==
928 FULL_DUPLEX ? "full duplex" : "half duplex");
929 }
930 if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
931 netif_carrier_on(netdev);
932 netif_wake_queue(netdev);
933 }
934 return ATL1_SUCCESS;
935 }
936
937 /* change orignal link status */
938 if (netif_carrier_ok(netdev)) {
939 adapter->link_speed = SPEED_0;
940 netif_carrier_off(netdev);
941 netif_stop_queue(netdev);
942 }
943
944 if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
945 hw->media_type != MEDIA_TYPE_1000M_FULL) {
946 switch (hw->media_type) {
947 case MEDIA_TYPE_100M_FULL:
948 phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
949 MII_CR_RESET;
950 break;
951 case MEDIA_TYPE_100M_HALF:
952 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
953 break;
954 case MEDIA_TYPE_10M_FULL:
955 phy_data =
956 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
957 break;
958 default: /* MEDIA_TYPE_10M_HALF: */
959 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
960 break;
961 }
962 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
963 return ATL1_SUCCESS;
964 }
965
966 /* auto-neg, insert timer to re-config phy */
967 if (!adapter->phy_timer_pending) {
968 adapter->phy_timer_pending = true;
969 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
970 }
971
972 return ATL1_SUCCESS;
973}
974
975static void set_flow_ctrl_old(struct atl1_adapter *adapter)
976{
977 u32 hi, lo, value;
978
979 /* RFD Flow Control */
980 value = adapter->rfd_ring.count;
981 hi = value / 16;
982 if (hi < 2)
983 hi = 2;
984 lo = value * 7 / 8;
985
986 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
987 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
988 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
989
990 /* RRD Flow Control */
991 value = adapter->rrd_ring.count;
992 lo = value / 16;
993 hi = value * 7 / 8;
994 if (lo < 2)
995 lo = 2;
996 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
997 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
998 iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
999}
1000
1001static void set_flow_ctrl_new(struct atl1_hw *hw)
1002{
1003 u32 hi, lo, value;
1004
1005 /* RXF Flow Control */
1006 value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
1007 lo = value / 16;
1008 if (lo < 192)
1009 lo = 192;
1010 hi = value * 7 / 8;
1011 if (hi < lo)
1012 hi = lo + 16;
1013 value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1014 ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1015 iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
1016
1017 /* RRD Flow Control */
1018 value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
1019 lo = value / 8;
1020 hi = value * 7 / 8;
1021 if (lo < 2)
1022 lo = 2;
1023 if (hi < lo)
1024 hi = lo + 3;
1025 value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
1026 ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
1027 iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
1028}
1029
1030/*
1031 * atl1_configure - Configure Transmit&Receive Unit after Reset
1032 * @adapter: board private structure
1033 *
1034 * Configure the Tx /Rx unit of the MAC after a reset.
1035 */
1036static u32 atl1_configure(struct atl1_adapter *adapter)
1037{
1038 struct atl1_hw *hw = &adapter->hw;
1039 u32 value;
1040
1041 /* clear interrupt status */
1042 iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
1043
1044 /* set MAC Address */
1045 value = (((u32) hw->mac_addr[2]) << 24) |
1046 (((u32) hw->mac_addr[3]) << 16) |
1047 (((u32) hw->mac_addr[4]) << 8) |
1048 (((u32) hw->mac_addr[5]));
1049 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
1050 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
1051 iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
1052
1053 /* tx / rx ring */
1054
1055 /* HI base address */
1056 iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
1057 hw->hw_addr + REG_DESC_BASE_ADDR_HI);
1058 /* LO base address */
1059 iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
1060 hw->hw_addr + REG_DESC_RFD_ADDR_LO);
1061 iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
1062 hw->hw_addr + REG_DESC_RRD_ADDR_LO);
1063 iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
1064 hw->hw_addr + REG_DESC_TPD_ADDR_LO);
1065 iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
1066 hw->hw_addr + REG_DESC_CMB_ADDR_LO);
1067 iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
1068 hw->hw_addr + REG_DESC_SMB_ADDR_LO);
1069
1070 /* element count */
1071 value = adapter->rrd_ring.count;
1072 value <<= 16;
1073 value += adapter->rfd_ring.count;
1074 iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
1075 iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE);
1076
1077 /* Load Ptr */
1078 iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
1079
1080 /* config Mailbox */
1081 value = ((atomic_read(&adapter->tpd_ring.next_to_use)
1082 & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
1083 ((atomic_read(&adapter->rrd_ring.next_to_clean)
1084 & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
1085 ((atomic_read(&adapter->rfd_ring.next_to_use)
1086 & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
1087 iowrite32(value, hw->hw_addr + REG_MAILBOX);
1088
1089 /* config IPG/IFG */
1090 value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
1091 << MAC_IPG_IFG_IPGT_SHIFT) |
1092 (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
1093 << MAC_IPG_IFG_MIFG_SHIFT) |
1094 (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
1095 << MAC_IPG_IFG_IPGR1_SHIFT) |
1096 (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
1097 << MAC_IPG_IFG_IPGR2_SHIFT);
1098 iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
1099
1100 /* config Half-Duplex Control */
1101 value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
1102 (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
1103 << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
1104 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
1105 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
1106 (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
1107 << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
1108 iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
1109
1110 /* set Interrupt Moderator Timer */
1111 iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
1112 iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
1113
1114 /* set Interrupt Clear Timer */
1115 iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
1116
1117 /* set MTU, 4 : VLAN */
1118 iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU);
1119
1120 /* jumbo size & rrd retirement timer */
1121 value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
1122 << RXQ_JMBOSZ_TH_SHIFT) |
1123 (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
1124 << RXQ_JMBO_LKAH_SHIFT) |
1125 (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
1126 << RXQ_RRD_TIMER_SHIFT);
1127 iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
1128
1129 /* Flow Control */
1130 switch (hw->dev_rev) {
1131 case 0x8001:
1132 case 0x9001:
1133 case 0x9002:
1134 case 0x9003:
1135 set_flow_ctrl_old(adapter);
1136 break;
1137 default:
1138 set_flow_ctrl_new(hw);
1139 break;
1140 }
1141
1142 /* config TXQ */
1143 value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
1144 << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
1145 (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
1146 << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
1147 (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
1148 << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN;
1149 iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
1150
1151 /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
1152 value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
1153 << TX_JUMBO_TASK_TH_SHIFT) |
1154 (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
1155 << TX_TPD_MIN_IPG_SHIFT);
1156 iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
1157
1158 /* config RXQ */
1159 value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
1160 << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
1161 (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
1162 << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
1163 (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
1164 << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) |
1165 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
1166 iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
1167
1168 /* config DMA Engine */
1169 value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1170 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
1171 ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1172 << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
1173 DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN;
1174 value |= (u32) hw->dma_ord;
1175 if (atl1_rcb_128 == hw->rcb_value)
1176 value |= DMA_CTRL_RCB_VALUE;
1177 iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
1178
1179 /* config CMB / SMB */
1180 value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16);
1181 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
1182 value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
1183 iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
1184 iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
1185
1186 /* --- enable CMB / SMB */
1187 value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
1188 iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
1189
1190 value = ioread32(adapter->hw.hw_addr + REG_ISR);
1191 if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
1192 value = 1; /* config failed */
1193 else
1194 value = 0;
1195
1196 /* clear all interrupt status */
1197 iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
1198 iowrite32(0, adapter->hw.hw_addr + REG_ISR);
1199 return value;
1200}
1201
1202/*
1203 * atl1_irq_disable - Mask off interrupt generation on the NIC
1204 * @adapter: board private structure
1205 */
1206static void atl1_irq_disable(struct atl1_adapter *adapter)
1207{
1208 atomic_inc(&adapter->irq_sem);
1209 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1210 ioread32(adapter->hw.hw_addr + REG_IMR);
1211 synchronize_irq(adapter->pdev->irq);
1212}
1213
1214static void atl1_vlan_rx_register(struct net_device *netdev,
1215 struct vlan_group *grp)
1216{
1217 struct atl1_adapter *adapter = netdev_priv(netdev);
1218 unsigned long flags;
1219 u32 ctrl;
1220
1221 spin_lock_irqsave(&adapter->lock, flags);
1222 /* atl1_irq_disable(adapter); */
1223 adapter->vlgrp = grp;
1224
1225 if (grp) {
1226 /* enable VLAN tag insert/strip */
1227 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
1228 ctrl |= MAC_CTRL_RMV_VLAN;
1229 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
1230 } else {
1231 /* disable VLAN tag insert/strip */
1232 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
1233 ctrl &= ~MAC_CTRL_RMV_VLAN;
1234 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
1235 }
1236
1237 /* atl1_irq_enable(adapter); */
1238 spin_unlock_irqrestore(&adapter->lock, flags);
1239}
1240
1241/* FIXME: justify or remove -- CHS */
1242static void atl1_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1243{
1244 /* We don't do Vlan filtering */
1245 return;
1246}
1247
1248/* FIXME: this looks wrong too -- CHS */
1249static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1250{
1251 struct atl1_adapter *adapter = netdev_priv(netdev);
1252 unsigned long flags;
1253
1254 spin_lock_irqsave(&adapter->lock, flags);
1255 /* atl1_irq_disable(adapter); */
1256 if (adapter->vlgrp)
1257 adapter->vlgrp->vlan_devices[vid] = NULL;
1258 /* atl1_irq_enable(adapter); */
1259 spin_unlock_irqrestore(&adapter->lock, flags);
1260 /* We don't do Vlan filtering */
1261 return;
1262}
1263
1264static void atl1_restore_vlan(struct atl1_adapter *adapter)
1265{
1266 atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1267 if (adapter->vlgrp) {
1268 u16 vid;
1269 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1270 if (!adapter->vlgrp->vlan_devices[vid])
1271 continue;
1272 atl1_vlan_rx_add_vid(adapter->netdev, vid);
1273 }
1274 }
1275}
1276
1277static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring)
1278{
1279 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1280 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
1281 return ((next_to_clean >
1282 next_to_use) ? next_to_clean - next_to_use -
1283 1 : tpd_ring->count + next_to_clean - next_to_use - 1);
1284}
1285
1286static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1287 struct tso_param *tso)
1288{
1289 /* We enter this function holding a spinlock. */
1290 u8 ipofst;
1291 int err;
1292
1293 if (skb_shinfo(skb)->gso_size) {
1294 if (skb_header_cloned(skb)) {
1295 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1296 if (unlikely(err))
1297 return err;
1298 }
1299
1300 if (skb->protocol == ntohs(ETH_P_IP)) {
1301 skb->nh.iph->tot_len = 0;
1302 skb->nh.iph->check = 0;
1303 skb->h.th->check =
1304 ~csum_tcpudp_magic(skb->nh.iph->saddr,
1305 skb->nh.iph->daddr, 0,
1306 IPPROTO_TCP, 0);
1307 ipofst = skb->nh.raw - skb->data;
1308 if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
1309 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
1310
1311 tso->tsopl |= (skb->nh.iph->ihl &
1312 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
1313 tso->tsopl |= ((skb->h.th->doff << 2) &
1314 TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
1315 tso->tsopl |= (skb_shinfo(skb)->gso_size &
1316 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
1317 tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
1318 tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
1319 tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
1320 return true;
1321 }
1322 }
1323 return false;
1324}
1325
1326static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1327 struct csum_param *csum)
1328{
1329 u8 css, cso;
1330
1331 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1332 cso = skb->h.raw - skb->data;
1333 css = (skb->h.raw + skb->csum) - skb->data;
1334 if (unlikely(cso & 0x1)) {
1335 printk(KERN_DEBUG "%s: payload offset != even number\n",
1336 atl1_driver_name);
1337 return -1;
1338 }
1339 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
1340 CSUM_PARAM_PLOADOFFSET_SHIFT;
1341 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
1342 CSUM_PARAM_XSUMOFFSET_SHIFT;
1343 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
1344 return true;
1345 }
1346
1347 return true;
1348}
1349
1350static void atl1_tx_map(struct atl1_adapter *adapter,
1351 struct sk_buff *skb, bool tcp_seg)
1352{
1353 /* We enter this function holding a spinlock. */
1354 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1355 struct atl1_buffer *buffer_info;
1356 struct page *page;
1357 int first_buf_len = skb->len;
1358 unsigned long offset;
1359 unsigned int nr_frags;
1360 unsigned int f;
1361 u16 tpd_next_to_use;
1362 u16 proto_hdr_len;
1363 u16 i, m, len12;
1364
1365 first_buf_len -= skb->data_len;
1366 nr_frags = skb_shinfo(skb)->nr_frags;
1367 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1368 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1369 if (unlikely(buffer_info->skb))
1370 BUG();
1371 buffer_info->skb = NULL; /* put skb in last TPD */
1372
1373 if (tcp_seg) {
1374 /* TSO/GSO */
1375 proto_hdr_len =
1376 ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1377 buffer_info->length = proto_hdr_len;
1378 page = virt_to_page(skb->data);
1379 offset = (unsigned long)skb->data & ~PAGE_MASK;
1380 buffer_info->dma = pci_map_page(adapter->pdev, page,
1381 offset, proto_hdr_len,
1382 PCI_DMA_TODEVICE);
1383
1384 if (++tpd_next_to_use == tpd_ring->count)
1385 tpd_next_to_use = 0;
1386
1387 if (first_buf_len > proto_hdr_len) {
1388 len12 = first_buf_len - proto_hdr_len;
1389 m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1390 for (i = 0; i < m; i++) {
1391 buffer_info =
1392 &tpd_ring->buffer_info[tpd_next_to_use];
1393 buffer_info->skb = NULL;
1394 buffer_info->length =
1395 (MAX_TX_BUF_LEN >=
1396 len12) ? MAX_TX_BUF_LEN : len12;
1397 len12 -= buffer_info->length;
1398 page = virt_to_page(skb->data +
1399 (proto_hdr_len +
1400 i * MAX_TX_BUF_LEN));
1401 offset = (unsigned long)(skb->data +
1402 (proto_hdr_len +
1403 i * MAX_TX_BUF_LEN)) &
1404 ~PAGE_MASK;
1405 buffer_info->dma =
1406 pci_map_page(adapter->pdev, page, offset,
1407 buffer_info->length,
1408 PCI_DMA_TODEVICE);
1409 if (++tpd_next_to_use == tpd_ring->count)
1410 tpd_next_to_use = 0;
1411 }
1412 }
1413 } else {
1414 /* not TSO/GSO */
1415 buffer_info->length = first_buf_len;
1416 page = virt_to_page(skb->data);
1417 offset = (unsigned long)skb->data & ~PAGE_MASK;
1418 buffer_info->dma = pci_map_page(adapter->pdev, page,
1419 offset, first_buf_len,
1420 PCI_DMA_TODEVICE);
1421 if (++tpd_next_to_use == tpd_ring->count)
1422 tpd_next_to_use = 0;
1423 }
1424
1425 for (f = 0; f < nr_frags; f++) {
1426 struct skb_frag_struct *frag;
1427 u16 lenf, i, m;
1428
1429 frag = &skb_shinfo(skb)->frags[f];
1430 lenf = frag->size;
1431
1432 m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1433 for (i = 0; i < m; i++) {
1434 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1435 if (unlikely(buffer_info->skb))
1436 BUG();
1437 buffer_info->skb = NULL;
1438 buffer_info->length =
1439 (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf;
1440 lenf -= buffer_info->length;
1441 buffer_info->dma =
1442 pci_map_page(adapter->pdev, frag->page,
1443 frag->page_offset + i * MAX_TX_BUF_LEN,
1444 buffer_info->length, PCI_DMA_TODEVICE);
1445
1446 if (++tpd_next_to_use == tpd_ring->count)
1447 tpd_next_to_use = 0;
1448 }
1449 }
1450
1451 /* last tpd's buffer-info */
1452 buffer_info->skb = skb;
1453}
1454
1455static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1456 union tpd_descr *descr)
1457{
1458 /* We enter this function holding a spinlock. */
1459 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1460 int j;
1461 u32 val;
1462 struct atl1_buffer *buffer_info;
1463 struct tx_packet_desc *tpd;
1464 u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
1465
1466 for (j = 0; j < count; j++) {
1467 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
1468 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
1469 tpd->desc.csum.csumpu = descr->csum.csumpu;
1470 tpd->desc.csum.csumpl = descr->csum.csumpl;
1471 tpd->desc.tso.tsopu = descr->tso.tsopu;
1472 tpd->desc.tso.tsopl = descr->tso.tsopl;
1473 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1474 tpd->desc.data = descr->data;
1475 tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) &
1476 CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT;
1477
1478 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
1479 TSO_PARAM_SEGMENT_MASK;
1480 if (val && !j)
1481 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
1482
1483 if (j == (count - 1))
1484 tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT;
1485
1486 if (++tpd_next_to_use == tpd_ring->count)
1487 tpd_next_to_use = 0;
1488 }
1489 /*
1490 * Force memory writes to complete before letting h/w
1491 * know there are new descriptors to fetch. (Only
1492 * applicable for weak-ordered memory model archs,
1493 * such as IA-64).
1494 */
1495 wmb();
1496
1497 atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
1498}
1499
1500static void atl1_update_mailbox(struct atl1_adapter *adapter)
1501{
1502 unsigned long flags;
1503 u32 tpd_next_to_use;
1504 u32 rfd_next_to_use;
1505 u32 rrd_next_to_clean;
1506 u32 value;
1507
1508 spin_lock_irqsave(&adapter->mb_lock, flags);
1509
1510 tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
1511 rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
1512 rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
1513
1514 value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
1515 MB_RFD_PROD_INDX_SHIFT) |
1516 ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
1517 MB_RRD_CONS_INDX_SHIFT) |
1518 ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
1519 MB_TPD_PROD_INDX_SHIFT);
1520 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
1521
1522 spin_unlock_irqrestore(&adapter->mb_lock, flags);
1523}
1524
1525static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1526{
1527 struct atl1_adapter *adapter = netdev_priv(netdev);
1528 int len = skb->len;
1529 int tso;
1530 int count = 1;
1531 int ret_val;
1532 u32 val;
1533 union tpd_descr param;
1534 u16 frag_size;
1535 u16 vlan_tag;
1536 unsigned long flags;
1537 unsigned int nr_frags = 0;
1538 unsigned int mss = 0;
1539 unsigned int f;
1540 unsigned int proto_hdr_len;
1541
1542 len -= skb->data_len;
1543
1544 if (unlikely(skb->len == 0)) {
1545 dev_kfree_skb_any(skb);
1546 return NETDEV_TX_OK;
1547 }
1548
1549 param.data = 0;
1550 param.tso.tsopu = 0;
1551 param.tso.tsopl = 0;
1552 param.csum.csumpu = 0;
1553 param.csum.csumpl = 0;
1554
1555 /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1556 nr_frags = skb_shinfo(skb)->nr_frags;
1557 for (f = 0; f < nr_frags; f++) {
1558 frag_size = skb_shinfo(skb)->frags[f].size;
1559 if (frag_size)
1560 count +=
1561 (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1562 }
1563
1564 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1565 mss = skb_shinfo(skb)->gso_size;
1566 if (mss) {
1567 if (skb->protocol == ntohs(ETH_P_IP)) {
1568 proto_hdr_len = ((skb->h.raw - skb->data) +
1569 (skb->h.th->doff << 2));
1570 if (unlikely(proto_hdr_len > len)) {
1571 dev_kfree_skb_any(skb);
1572 return NETDEV_TX_OK;
1573 }
1574 /* need additional TPD ? */
1575 if (proto_hdr_len != len)
1576 count += (len - proto_hdr_len +
1577 MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1578 }
1579 }
1580
1581 local_irq_save(flags);
1582 if (!spin_trylock(&adapter->lock)) {
1583 /* Can't get lock - tell upper layer to requeue */
1584 local_irq_restore(flags);
1585 printk(KERN_DEBUG "%s: TX locked\n", atl1_driver_name);
1586 return NETDEV_TX_LOCKED;
1587 }
1588
1589 if (tpd_avail(&adapter->tpd_ring) < count) {
1590 /* not enough descriptors */
1591 netif_stop_queue(netdev);
1592 spin_unlock_irqrestore(&adapter->lock, flags);
1593 printk(KERN_DEBUG "%s: TX busy\n", atl1_driver_name);
1594 return NETDEV_TX_BUSY;
1595 }
1596
1597 param.data = 0;
1598
1599 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1600 vlan_tag = vlan_tx_tag_get(skb);
1601 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1602 ((vlan_tag >> 9) & 0x8);
1603 param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT;
1604 param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) <<
1605 CSUM_PARAM_VALAN_SHIFT;
1606 }
1607
1608 tso = atl1_tso(adapter, skb, &param.tso);
1609 if (tso < 0) {
1610 spin_unlock_irqrestore(&adapter->lock, flags);
1611 dev_kfree_skb_any(skb);
1612 return NETDEV_TX_OK;
1613 }
1614
1615 if (!tso) {
1616 ret_val = atl1_tx_csum(adapter, skb, &param.csum);
1617 if (ret_val < 0) {
1618 spin_unlock_irqrestore(&adapter->lock, flags);
1619 dev_kfree_skb_any(skb);
1620 return NETDEV_TX_OK;
1621 }
1622 }
1623
1624 val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) &
1625 CSUM_PARAM_SEGMENT_MASK;
1626 atl1_tx_map(adapter, skb, 1 == val);
1627 atl1_tx_queue(adapter, count, &param);
1628 netdev->trans_start = jiffies;
1629 spin_unlock_irqrestore(&adapter->lock, flags);
1630 atl1_update_mailbox(adapter);
1631 return NETDEV_TX_OK;
1632}
1633
1634/*
1635 * atl1_get_stats - Get System Network Statistics
1636 * @netdev: network interface device structure
1637 *
1638 * Returns the address of the device statistics structure.
1639 * The statistics are actually updated from the timer callback.
1640 */
1641static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
1642{
1643 struct atl1_adapter *adapter = netdev_priv(netdev);
1644 return &adapter->net_stats;
1645}
1646
1647/*
1648 * atl1_clean_rx_ring - Free RFD Buffers
1649 * @adapter: board private structure
1650 */
1651static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
1652{
1653 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1654 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1655 struct atl1_buffer *buffer_info;
1656 struct pci_dev *pdev = adapter->pdev;
1657 unsigned long size;
1658 unsigned int i;
1659
1660 /* Free all the Rx ring sk_buffs */
1661 for (i = 0; i < rfd_ring->count; i++) {
1662 buffer_info = &rfd_ring->buffer_info[i];
1663 if (buffer_info->dma) {
1664 pci_unmap_page(pdev,
1665 buffer_info->dma,
1666 buffer_info->length,
1667 PCI_DMA_FROMDEVICE);
1668 buffer_info->dma = 0;
1669 }
1670 if (buffer_info->skb) {
1671 dev_kfree_skb(buffer_info->skb);
1672 buffer_info->skb = NULL;
1673 }
1674 }
1675
1676 size = sizeof(struct atl1_buffer) * rfd_ring->count;
1677 memset(rfd_ring->buffer_info, 0, size);
1678
1679 /* Zero out the descriptor ring */
1680 memset(rfd_ring->desc, 0, rfd_ring->size);
1681
1682 rfd_ring->next_to_clean = 0;
1683 atomic_set(&rfd_ring->next_to_use, 0);
1684
1685 rrd_ring->next_to_use = 0;
1686 atomic_set(&rrd_ring->next_to_clean, 0);
1687}
1688
1689/*
1690 * atl1_clean_tx_ring - Free Tx Buffers
1691 * @adapter: board private structure
1692 */
1693static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
1694{
1695 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1696 struct atl1_buffer *buffer_info;
1697 struct pci_dev *pdev = adapter->pdev;
1698 unsigned long size;
1699 unsigned int i;
1700
1701 /* Free all the Tx ring sk_buffs */
1702 for (i = 0; i < tpd_ring->count; i++) {
1703 buffer_info = &tpd_ring->buffer_info[i];
1704 if (buffer_info->dma) {
1705 pci_unmap_page(pdev, buffer_info->dma,
1706 buffer_info->length, PCI_DMA_TODEVICE);
1707 buffer_info->dma = 0;
1708 }
1709 }
1710
1711 for (i = 0; i < tpd_ring->count; i++) {
1712 buffer_info = &tpd_ring->buffer_info[i];
1713 if (buffer_info->skb) {
1714 dev_kfree_skb_any(buffer_info->skb);
1715 buffer_info->skb = NULL;
1716 }
1717 }
1718
1719 size = sizeof(struct atl1_buffer) * tpd_ring->count;
1720 memset(tpd_ring->buffer_info, 0, size);
1721
1722 /* Zero out the descriptor ring */
1723 memset(tpd_ring->desc, 0, tpd_ring->size);
1724
1725 atomic_set(&tpd_ring->next_to_use, 0);
1726 atomic_set(&tpd_ring->next_to_clean, 0);
1727}
1728
1729/*
1730 * atl1_free_ring_resources - Free Tx / RX descriptor Resources
1731 * @adapter: board private structure
1732 *
1733 * Free all transmit software resources
1734 */
1735void atl1_free_ring_resources(struct atl1_adapter *adapter)
1736{
1737 struct pci_dev *pdev = adapter->pdev;
1738 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1739 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
1740 struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
1741 struct atl1_ring_header *ring_header = &adapter->ring_header;
1742
1743 atl1_clean_tx_ring(adapter);
1744 atl1_clean_rx_ring(adapter);
1745
1746 kfree(tpd_ring->buffer_info);
1747 pci_free_consistent(pdev, ring_header->size, ring_header->desc,
1748 ring_header->dma);
1749
1750 tpd_ring->buffer_info = NULL;
1751 tpd_ring->desc = NULL;
1752 tpd_ring->dma = 0;
1753
1754 rfd_ring->buffer_info = NULL;
1755 rfd_ring->desc = NULL;
1756 rfd_ring->dma = 0;
1757
1758 rrd_ring->desc = NULL;
1759 rrd_ring->dma = 0;
1760}
1761
1762s32 atl1_up(struct atl1_adapter *adapter)
1763{
1764 struct net_device *netdev = adapter->netdev;
1765 int err;
1766 int irq_flags = IRQF_SAMPLE_RANDOM;
1767
1768 /* hardware has been reset, we need to reload some things */
1769 atl1_set_multi(netdev);
1770 atl1_restore_vlan(adapter);
1771 err = atl1_alloc_rx_buffers(adapter);
1772 if (unlikely(!err)) /* no RX BUFFER allocated */
1773 return -ENOMEM;
1774
1775 if (unlikely(atl1_configure(adapter))) {
1776 err = -EIO;
1777 goto err_up;
1778 }
1779
1780 err = pci_enable_msi(adapter->pdev);
1781 if (err) {
1782 dev_info(&adapter->pdev->dev,
1783 "Unable to enable MSI: %d\n", err);
1784 irq_flags |= IRQF_SHARED;
1785 }
1786
1787 err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
1788 netdev->name, netdev);
1789 if (unlikely(err))
1790 goto err_up;
1791
1792 mod_timer(&adapter->watchdog_timer, jiffies);
1793 atl1_irq_enable(adapter);
1794 atl1_check_link(adapter);
1795 return 0;
1796
1797 /* FIXME: unreachable code! -- CHS */
1798 /* free irq disable any interrupt */
1799 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1800 free_irq(adapter->pdev->irq, netdev);
1801
1802err_up:
1803 pci_disable_msi(adapter->pdev);
1804 /* free rx_buffers */
1805 atl1_clean_rx_ring(adapter);
1806 return err;
1807}
1808
1809void atl1_down(struct atl1_adapter *adapter)
1810{
1811 struct net_device *netdev = adapter->netdev;
1812
1813 del_timer_sync(&adapter->watchdog_timer);
1814 del_timer_sync(&adapter->phy_config_timer);
1815 adapter->phy_timer_pending = false;
1816
1817 atl1_irq_disable(adapter);
1818 free_irq(adapter->pdev->irq, netdev);
1819 pci_disable_msi(adapter->pdev);
1820 atl1_reset_hw(&adapter->hw);
1821 adapter->cmb.cmb->int_stats = 0;
1822
1823 adapter->link_speed = SPEED_0;
1824 adapter->link_duplex = -1;
1825 netif_carrier_off(netdev);
1826 netif_stop_queue(netdev);
1827
1828 atl1_clean_tx_ring(adapter);
1829 atl1_clean_rx_ring(adapter);
1830}
1831
1832/*
1833 * atl1_change_mtu - Change the Maximum Transfer Unit
1834 * @netdev: network interface device structure
1835 * @new_mtu: new value for maximum frame size
1836 *
1837 * Returns 0 on success, negative on failure
1838 */
1839static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
1840{
1841 struct atl1_adapter *adapter = netdev_priv(netdev);
1842 int old_mtu = netdev->mtu;
1843 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1844
1845 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1846 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1847 printk(KERN_WARNING "%s: invalid MTU setting\n",
1848 atl1_driver_name);
1849 return -EINVAL;
1850 }
1851
1852 adapter->hw.max_frame_size = max_frame;
1853 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
1854 adapter->rx_buffer_len = (max_frame + 7) & ~7;
1855 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
1856
1857 netdev->mtu = new_mtu;
1858 if ((old_mtu != new_mtu) && netif_running(netdev)) {
1859 atl1_down(adapter);
1860 atl1_up(adapter);
1861 }
1862
1863 return 0;
1864}
1865
1866/*
1867 * atl1_set_mac - Change the Ethernet Address of the NIC
1868 * @netdev: network interface device structure
1869 * @p: pointer to an address structure
1870 *
1871 * Returns 0 on success, negative on failure
1872 */
1873static int atl1_set_mac(struct net_device *netdev, void *p)
1874{
1875 struct atl1_adapter *adapter = netdev_priv(netdev);
1876 struct sockaddr *addr = p;
1877
1878 if (netif_running(netdev))
1879 return -EBUSY;
1880
1881 if (!is_valid_ether_addr(addr->sa_data))
1882 return -EADDRNOTAVAIL;
1883
1884 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1885 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1886
1887 atl1_set_mac_addr(&adapter->hw);
1888 return 0;
1889}
1890
1891/*
1892 * atl1_watchdog - Timer Call-back
1893 * @data: pointer to netdev cast into an unsigned long
1894 */
1895static void atl1_watchdog(unsigned long data)
1896{
1897 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1898
1899 /* Reset the timer */
1900 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1901}
1902
1903static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
1904{
1905 struct atl1_adapter *adapter = netdev_priv(netdev);
1906 u16 result;
1907
1908 atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
1909
1910 return result;
1911}
1912
1913static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val)
1914{
1915 struct atl1_adapter *adapter = netdev_priv(netdev);
1916
1917 atl1_write_phy_reg(&adapter->hw, reg_num, val);
1918}
1919
1920/*
1921 * atl1_mii_ioctl -
1922 * @netdev:
1923 * @ifreq:
1924 * @cmd:
1925 */
1926static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1927{
1928 struct atl1_adapter *adapter = netdev_priv(netdev);
1929 unsigned long flags;
1930 int retval;
1931
1932 if (!netif_running(netdev))
1933 return -EINVAL;
1934
1935 spin_lock_irqsave(&adapter->lock, flags);
1936 retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
1937 spin_unlock_irqrestore(&adapter->lock, flags);
1938
1939 return retval;
1940}
1941
1942/*
1943 * atl1_ioctl -
1944 * @netdev:
1945 * @ifreq:
1946 * @cmd:
1947 */
1948static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1949{
1950 switch (cmd) {
1951 case SIOCGMIIPHY:
1952 case SIOCGMIIREG:
1953 case SIOCSMIIREG:
1954 return atl1_mii_ioctl(netdev, ifr, cmd);
1955 default:
1956 return -EOPNOTSUPP;
1957 }
1958}
1959
1960/*
1961 * atl1_tx_timeout - Respond to a Tx Hang
1962 * @netdev: network interface device structure
1963 */
1964static void atl1_tx_timeout(struct net_device *netdev)
1965{
1966 struct atl1_adapter *adapter = netdev_priv(netdev);
1967 /* Do the reset outside of interrupt context */
1968 schedule_work(&adapter->tx_timeout_task);
1969}
1970
1971/*
1972 * atl1_phy_config - Timer Call-back
1973 * @data: pointer to netdev cast into an unsigned long
1974 */
1975static void atl1_phy_config(unsigned long data)
1976{
1977 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
1978 struct atl1_hw *hw = &adapter->hw;
1979 unsigned long flags;
1980
1981 spin_lock_irqsave(&adapter->lock, flags);
1982 adapter->phy_timer_pending = false;
1983 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1984 atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
1985 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1986 spin_unlock_irqrestore(&adapter->lock, flags);
1987}
1988
1989int atl1_reset(struct atl1_adapter *adapter)
1990{
1991 int ret;
1992
1993 ret = atl1_reset_hw(&adapter->hw);
1994 if (ret != ATL1_SUCCESS)
1995 return ret;
1996 return atl1_init_hw(&adapter->hw);
1997}
1998
1999/*
2000 * atl1_open - Called when a network interface is made active
2001 * @netdev: network interface device structure
2002 *
2003 * Returns 0 on success, negative value on failure
2004 *
2005 * The open entry point is called when a network interface is made
2006 * active by the system (IFF_UP). At this point all resources needed
2007 * for transmit and receive operations are allocated, the interrupt
2008 * handler is registered with the OS, the watchdog timer is started,
2009 * and the stack is notified that the interface is ready.
2010 */
2011static int atl1_open(struct net_device *netdev)
2012{
2013 struct atl1_adapter *adapter = netdev_priv(netdev);
2014 int err;
2015
2016 /* allocate transmit descriptors */
2017 err = atl1_setup_ring_resources(adapter);
2018 if (err)
2019 return err;
2020
2021 err = atl1_up(adapter);
2022 if (err)
2023 goto err_up;
2024
2025 return 0;
2026
2027err_up:
2028 atl1_reset(adapter);
2029 return err;
2030}
2031
2032/*
2033 * atl1_close - Disables a network interface
2034 * @netdev: network interface device structure
2035 *
2036 * Returns 0, this is not allowed to fail
2037 *
2038 * The close entry point is called when an interface is de-activated
2039 * by the OS. The hardware is still under the drivers control, but
2040 * needs to be disabled. A global MAC reset is issued to stop the
2041 * hardware, and all transmit and receive resources are freed.
2042 */
2043static int atl1_close(struct net_device *netdev)
2044{
2045 struct atl1_adapter *adapter = netdev_priv(netdev);
2046 atl1_down(adapter);
2047 atl1_free_ring_resources(adapter);
2048 return 0;
2049}
2050
2051/*
2052 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
2053 * will assert. We do soft reset <0x1400=1> according
2054 * with the SPEC. BUT, it seemes that PCIE or DMA
2055 * state-machine will not be reset. DMAR_TO_INT will
2056 * assert again and again.
2057 */
2058static void atl1_tx_timeout_task(struct work_struct *work)
2059{
2060 struct atl1_adapter *adapter =
2061 container_of(work, struct atl1_adapter, tx_timeout_task);
2062 struct net_device *netdev = adapter->netdev;
2063
2064 netif_device_detach(netdev);
2065 atl1_down(adapter);
2066 atl1_up(adapter);
2067 netif_device_attach(netdev);
2068}
2069
2070/*
2071 * atl1_link_chg_task - deal with link change event Out of interrupt context
2072 */
2073static void atl1_link_chg_task(struct work_struct *work)
2074{
2075 struct atl1_adapter *adapter =
2076 container_of(work, struct atl1_adapter, link_chg_task);
2077 unsigned long flags;
2078
2079 spin_lock_irqsave(&adapter->lock, flags);
2080 atl1_check_link(adapter);
2081 spin_unlock_irqrestore(&adapter->lock, flags);
2082}
2083
2084/*
2085 * atl1_pcie_patch - Patch for PCIE module
2086 */
2087static void atl1_pcie_patch(struct atl1_adapter *adapter)
2088{
2089 u32 value;
2090 value = 0x6500;
2091 iowrite32(value, adapter->hw.hw_addr + 0x12FC);
2092 /* pcie flow control mode change */
2093 value = ioread32(adapter->hw.hw_addr + 0x1008);
2094 value |= 0x8000;
2095 iowrite32(value, adapter->hw.hw_addr + 0x1008);
2096}
2097
2098/*
2099 * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
2100 * on PCI Command register is disable.
2101 * The function enable this bit.
2102 * Brackett, 2006/03/15
2103 */
2104static void atl1_via_workaround(struct atl1_adapter *adapter)
2105{
2106 unsigned long value;
2107
2108 value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
2109 if (value & PCI_COMMAND_INTX_DISABLE)
2110 value &= ~PCI_COMMAND_INTX_DISABLE;
2111 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
2112}
2113
2114/*
2115 * atl1_probe - Device Initialization Routine
2116 * @pdev: PCI device information struct
2117 * @ent: entry in atl1_pci_tbl
2118 *
2119 * Returns 0 on success, negative on failure
2120 *
2121 * atl1_probe initializes an adapter identified by a pci_dev structure.
2122 * The OS initialization, configuring of the adapter private structure,
2123 * and a hardware reset occur.
2124 */
2125static int __devinit atl1_probe(struct pci_dev *pdev,
2126 const struct pci_device_id *ent)
2127{
2128 struct net_device *netdev;
2129 struct atl1_adapter *adapter;
2130 static int cards_found = 0;
2131 bool pci_using_64 = true;
2132 int err;
2133
2134 err = pci_enable_device(pdev);
2135 if (err)
2136 return err;
2137
2138 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
2139 if (err) {
2140 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2141 if (err) {
2142 printk(KERN_DEBUG
2143 "%s: no usable DMA configuration, aborting\n",
2144 atl1_driver_name);
2145 goto err_dma;
2146 }
2147 pci_using_64 = false;
2148 }
2149 /* Mark all PCI regions associated with PCI device
2150 * pdev as being reserved by owner atl1_driver_name
2151 */
2152 err = pci_request_regions(pdev, atl1_driver_name);
2153 if (err)
2154 goto err_request_regions;
2155
2156 /* Enables bus-mastering on the device and calls
2157 * pcibios_set_master to do the needed arch specific settings
2158 */
2159 pci_set_master(pdev);
2160
2161 netdev = alloc_etherdev(sizeof(struct atl1_adapter));
2162 if (!netdev) {
2163 err = -ENOMEM;
2164 goto err_alloc_etherdev;
2165 }
2166 SET_MODULE_OWNER(netdev);
2167 SET_NETDEV_DEV(netdev, &pdev->dev);
2168
2169 pci_set_drvdata(pdev, netdev);
2170 adapter = netdev_priv(netdev);
2171 adapter->netdev = netdev;
2172 adapter->pdev = pdev;
2173 adapter->hw.back = adapter;
2174
2175 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2176 if (!adapter->hw.hw_addr) {
2177 err = -EIO;
2178 goto err_pci_iomap;
2179 }
2180 /* get device revision number */
2181 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2));
2182
2183 /* set default ring resource counts */
2184 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
2185 adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
2186
2187 adapter->mii.dev = netdev;
2188 adapter->mii.mdio_read = mdio_read;
2189 adapter->mii.mdio_write = mdio_write;
2190 adapter->mii.phy_id_mask = 0x1f;
2191 adapter->mii.reg_num_mask = 0x1f;
2192
2193 netdev->open = &atl1_open;
2194 netdev->stop = &atl1_close;
2195 netdev->hard_start_xmit = &atl1_xmit_frame;
2196 netdev->get_stats = &atl1_get_stats;
2197 netdev->set_multicast_list = &atl1_set_multi;
2198 netdev->set_mac_address = &atl1_set_mac;
2199 netdev->change_mtu = &atl1_change_mtu;
2200 netdev->do_ioctl = &atl1_ioctl;
2201 netdev->tx_timeout = &atl1_tx_timeout;
2202 netdev->watchdog_timeo = 5 * HZ;
2203 netdev->vlan_rx_register = atl1_vlan_rx_register;
2204 netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid;
2205 netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid;
2206 netdev->ethtool_ops = &atl1_ethtool_ops;
2207 adapter->bd_number = cards_found;
2208 adapter->pci_using_64 = pci_using_64;
2209
2210 /* setup the private structure */
2211 err = atl1_sw_init(adapter);
2212 if (err)
2213 goto err_common;
2214
2215 netdev->features = NETIF_F_HW_CSUM;
2216 netdev->features |= NETIF_F_SG;
2217 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2218
2219 /*
2220 * FIXME - Until tso performance gets fixed, disable the feature.
2221 * Enable it with ethtool -K if desired.
2222 */
2223 /* netdev->features |= NETIF_F_TSO; */
2224
2225 if (pci_using_64)
2226 netdev->features |= NETIF_F_HIGHDMA;
2227
2228 netdev->features |= NETIF_F_LLTX;
2229
2230 /*
2231 * patch for some L1 of old version,
2232 * the final version of L1 may not need these
2233 * patches
2234 */
2235 /* atl1_pcie_patch(adapter); */
2236
2237 /* really reset GPHY core */
2238 iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
2239
2240 /*
2241 * reset the controller to
2242 * put the device in a known good starting state
2243 */
2244 if (atl1_reset_hw(&adapter->hw)) {
2245 err = -EIO;
2246 goto err_common;
2247 }
2248
2249 /* copy the MAC address out of the EEPROM */
2250 atl1_read_mac_addr(&adapter->hw);
2251 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2252
2253 if (!is_valid_ether_addr(netdev->dev_addr)) {
2254 err = -EIO;
2255 goto err_common;
2256 }
2257
2258 atl1_check_options(adapter);
2259
2260 /* pre-init the MAC, and setup link */
2261 err = atl1_init_hw(&adapter->hw);
2262 if (err) {
2263 err = -EIO;
2264 goto err_common;
2265 }
2266
2267 atl1_pcie_patch(adapter);
2268 /* assume we have no link for now */
2269 netif_carrier_off(netdev);
2270 netif_stop_queue(netdev);
2271
2272 init_timer(&adapter->watchdog_timer);
2273 adapter->watchdog_timer.function = &atl1_watchdog;
2274 adapter->watchdog_timer.data = (unsigned long)adapter;
2275
2276 init_timer(&adapter->phy_config_timer);
2277 adapter->phy_config_timer.function = &atl1_phy_config;
2278 adapter->phy_config_timer.data = (unsigned long)adapter;
2279 adapter->phy_timer_pending = false;
2280
2281 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2282
2283 INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task);
2284
2285 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2286
2287 err = register_netdev(netdev);
2288 if (err)
2289 goto err_common;
2290
2291 cards_found++;
2292 atl1_via_workaround(adapter);
2293 return 0;
2294
2295err_common:
2296 pci_iounmap(pdev, adapter->hw.hw_addr);
2297err_pci_iomap:
2298 free_netdev(netdev);
2299err_alloc_etherdev:
2300 pci_release_regions(pdev);
2301err_dma:
2302err_request_regions:
2303 pci_disable_device(pdev);
2304 return err;
2305}
2306
2307/*
2308 * atl1_remove - Device Removal Routine
2309 * @pdev: PCI device information struct
2310 *
2311 * atl1_remove is called by the PCI subsystem to alert the driver
2312 * that it should release a PCI device. The could be caused by a
2313 * Hot-Plug event, or because the driver is going to be removed from
2314 * memory.
2315 */
2316static void __devexit atl1_remove(struct pci_dev *pdev)
2317{
2318 struct net_device *netdev = pci_get_drvdata(pdev);
2319 struct atl1_adapter *adapter;
2320 /* Device not available. Return. */
2321 if (!netdev)
2322 return;
2323
2324 adapter = netdev_priv(netdev);
2325 iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
2326 unregister_netdev(netdev);
2327 pci_iounmap(pdev, adapter->hw.hw_addr);
2328 pci_release_regions(pdev);
2329 free_netdev(netdev);
2330 pci_disable_device(pdev);
2331}
2332
2333#ifdef CONFIG_PM
2334static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2335{
2336 struct net_device *netdev = pci_get_drvdata(pdev);
2337 struct atl1_adapter *adapter = netdev_priv(netdev);
2338 struct atl1_hw *hw = &adapter->hw;
2339 u32 ctrl = 0;
2340 u32 wufc = adapter->wol;
2341
2342 netif_device_detach(netdev);
2343 if (netif_running(netdev))
2344 atl1_down(adapter);
2345
2346 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2347 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2348 if (ctrl & BMSR_LSTATUS)
2349 wufc &= ~ATL1_WUFC_LNKC;
2350
2351 /* reduce speed to 10/100M */
2352 if (wufc) {
2353 atl1_phy_enter_power_saving(hw);
2354 /* if resume, let driver to re- setup link */
2355 hw->phy_configured = false;
2356 atl1_set_mac_addr(hw);
2357 atl1_set_multi(netdev);
2358
2359 ctrl = 0;
2360 /* turn on magic packet wol */
2361 if (wufc & ATL1_WUFC_MAG)
2362 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2363
2364 /* turn on Link change WOL */
2365 if (wufc & ATL1_WUFC_LNKC)
2366 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2367 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2368
2369 /* turn on all-multi mode if wake on multicast is enabled */
2370 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
2371 ctrl &= ~MAC_CTRL_DBG;
2372 ctrl &= ~MAC_CTRL_PROMIS_EN;
2373 if (wufc & ATL1_WUFC_MC)
2374 ctrl |= MAC_CTRL_MC_ALL_EN;
2375 else
2376 ctrl &= ~MAC_CTRL_MC_ALL_EN;
2377
2378 /* turn on broadcast mode if wake on-BC is enabled */
2379 if (wufc & ATL1_WUFC_BC)
2380 ctrl |= MAC_CTRL_BC_EN;
2381 else
2382 ctrl &= ~MAC_CTRL_BC_EN;
2383
2384 /* enable RX */
2385 ctrl |= MAC_CTRL_RX_EN;
2386 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
2387 pci_enable_wake(pdev, PCI_D3hot, 1);
2388 pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */
2389 } else {
2390 iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
2391 pci_enable_wake(pdev, PCI_D3hot, 0);
2392 pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
2393 }
2394
2395 pci_save_state(pdev);
2396 pci_disable_device(pdev);
2397
2398 pci_set_power_state(pdev, PCI_D3hot);
2399
2400 return 0;
2401}
2402
2403static int atl1_resume(struct pci_dev *pdev)
2404{
2405 struct net_device *netdev = pci_get_drvdata(pdev);
2406 struct atl1_adapter *adapter = netdev_priv(netdev);
2407 u32 ret_val;
2408
2409 pci_set_power_state(pdev, 0);
2410 pci_restore_state(pdev);
2411
2412 ret_val = pci_enable_device(pdev);
2413 pci_enable_wake(pdev, PCI_D3hot, 0);
2414 pci_enable_wake(pdev, PCI_D3cold, 0);
2415
2416 iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
2417 atl1_reset(adapter);
2418
2419 if (netif_running(netdev))
2420 atl1_up(adapter);
2421 netif_device_attach(netdev);
2422
2423 atl1_via_workaround(adapter);
2424
2425 return 0;
2426}
2427#else
2428#define atl1_suspend NULL
2429#define atl1_resume NULL
2430#endif
2431
2432static struct pci_driver atl1_driver = {
2433 .name = atl1_driver_name,
2434 .id_table = atl1_pci_tbl,
2435 .probe = atl1_probe,
2436 .remove = __devexit_p(atl1_remove),
2437 /* Power Managment Hooks */
2438 /* probably broken right now -- CHS */
2439 .suspend = atl1_suspend,
2440 .resume = atl1_resume
2441};
2442
2443/*
2444 * atl1_exit_module - Driver Exit Cleanup Routine
2445 *
2446 * atl1_exit_module is called just before the driver is removed
2447 * from memory.
2448 */
2449static void __exit atl1_exit_module(void)
2450{
2451 pci_unregister_driver(&atl1_driver);
2452}
2453
2454/*
2455 * atl1_init_module - Driver Registration Routine
2456 *
2457 * atl1_init_module is the first routine called when the driver is
2458 * loaded. All it does is register with the PCI subsystem.
2459 */
2460static int __init atl1_init_module(void)
2461{
2462 printk(KERN_INFO "%s - version %s\n", atl1_driver_string, DRIVER_VERSION);
2463 printk(KERN_INFO "%s\n", atl1_copyright);
2464 return pci_register_driver(&atl1_driver);
2465}
2466
2467module_init(atl1_init_module);
2468module_exit(atl1_exit_module);
diff --git a/drivers/net/atl1/atl1_param.c b/drivers/net/atl1/atl1_param.c
new file mode 100644
index 000000000000..c407214339f6
--- /dev/null
+++ b/drivers/net/atl1/atl1_param.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/moduleparam.h>
27#include "atl1.h"
28
29/*
30 * This is the only thing that needs to be changed to adjust the
31 * maximum number of ports that the driver can manage.
32 */
33#define ATL1_MAX_NIC 4
34
35#define OPTION_UNSET -1
36#define OPTION_DISABLED 0
37#define OPTION_ENABLED 1
38
39#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
40
41/*
42 * Interrupt Moderate Timer in units of 2 us
43 *
44 * Valid Range: 10-65535
45 *
46 * Default Value: 100 (200us)
47 */
48static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
49static int num_int_mod_timer = 0;
50module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0);
51MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
52
53/*
54 * flash_vendor
55 *
56 * Valid Range: 0-2
57 *
58 * 0 - Atmel
59 * 1 - SST
60 * 2 - ST
61 *
62 * Default Value: 0
63 */
64static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
65static int num_flash_vendor = 0;
66module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
67MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
68
69#define DEFAULT_INT_MOD_CNT 100 /* 200us */
70#define MAX_INT_MOD_CNT 65000
71#define MIN_INT_MOD_CNT 50
72
73#define FLASH_VENDOR_DEFAULT 0
74#define FLASH_VENDOR_MIN 0
75#define FLASH_VENDOR_MAX 2
76
77struct atl1_option {
78 enum { enable_option, range_option, list_option } type;
79 char *name;
80 char *err;
81 int def;
82 union {
83 struct { /* range_option info */
84 int min;
85 int max;
86 } r;
87 struct { /* list_option info */
88 int nr;
89 struct atl1_opt_list {
90 int i;
91 char *str;
92 } *p;
93 } l;
94 } arg;
95};
96
97static int __devinit atl1_validate_option(int *value, struct atl1_option *opt)
98{
99 if (*value == OPTION_UNSET) {
100 *value = opt->def;
101 return 0;
102 }
103
104 switch (opt->type) {
105 case enable_option:
106 switch (*value) {
107 case OPTION_ENABLED:
108 printk(KERN_INFO "%s: %s Enabled\n", atl1_driver_name,
109 opt->name);
110 return 0;
111 case OPTION_DISABLED:
112 printk(KERN_INFO "%s: %s Disabled\n", atl1_driver_name,
113 opt->name);
114 return 0;
115 }
116 break;
117 case range_option:
118 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
119 printk(KERN_INFO "%s: %s set to %i\n",
120 atl1_driver_name, opt->name, *value);
121 return 0;
122 }
123 break;
124 case list_option:{
125 int i;
126 struct atl1_opt_list *ent;
127
128 for (i = 0; i < opt->arg.l.nr; i++) {
129 ent = &opt->arg.l.p[i];
130 if (*value == ent->i) {
131 if (ent->str[0] != '\0')
132 printk(KERN_INFO "%s: %s\n",
133 atl1_driver_name, ent->str);
134 return 0;
135 }
136 }
137 }
138 break;
139
140 default:
141 break;
142 }
143
144 printk(KERN_INFO "%s: invalid %s specified (%i) %s\n",
145 atl1_driver_name, opt->name, *value, opt->err);
146 *value = opt->def;
147 return -1;
148}
149
150/*
151 * atl1_check_options - Range Checking for Command Line Parameters
152 * @adapter: board private structure
153 *
154 * This routine checks all command line parameters for valid user
155 * input. If an invalid value is given, or if no user specified
156 * value exists, a default value is used. The final value is stored
157 * in a variable in the adapter structure.
158 */
159void __devinit atl1_check_options(struct atl1_adapter *adapter)
160{
161 int bd = adapter->bd_number;
162 if (bd >= ATL1_MAX_NIC) {
163 printk(KERN_NOTICE "%s: warning: no configuration for board #%i\n",
164 atl1_driver_name, bd);
165 printk(KERN_NOTICE "%s: using defaults for all values\n",
166 atl1_driver_name);
167 }
168 { /* Interrupt Moderate Timer */
169 struct atl1_option opt = {
170 .type = range_option,
171 .name = "Interrupt Moderator Timer",
172 .err = "using default of "
173 __MODULE_STRING(DEFAULT_INT_MOD_CNT),
174 .def = DEFAULT_INT_MOD_CNT,
175 .arg = {.r =
176 {.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}}
177 };
178 int val;
179 if (num_int_mod_timer > bd) {
180 val = int_mod_timer[bd];
181 atl1_validate_option(&val, &opt);
182 adapter->imt = (u16) val;
183 } else
184 adapter->imt = (u16) (opt.def);
185 }
186
187 { /* Flash Vendor */
188 struct atl1_option opt = {
189 .type = range_option,
190 .name = "SPI Flash Vendor",
191 .err = "using default of "
192 __MODULE_STRING(FLASH_VENDOR_DEFAULT),
193 .def = DEFAULT_INT_MOD_CNT,
194 .arg = {.r =
195 {.min = FLASH_VENDOR_MIN,.max =
196 FLASH_VENDOR_MAX}}
197 };
198 int val;
199 if (num_flash_vendor > bd) {
200 val = flash_vendor[bd];
201 atl1_validate_option(&val, &opt);
202 adapter->hw.flash_vendor = (u8) val;
203 } else
204 adapter->hw.flash_vendor = (u8) (opt.def);
205 }
206}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 303a8d94ad4b..5ff7882297d6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -721,7 +721,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
721 struct ring_info *src_map, *dest_map; 721 struct ring_info *src_map, *dest_map;
722 struct rx_header *rh; 722 struct rx_header *rh;
723 int dest_idx; 723 int dest_idx;
724 u32 ctrl; 724 __le32 ctrl;
725 725
726 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); 726 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
727 dest_desc = &bp->rx_ring[dest_idx]; 727 dest_desc = &bp->rx_ring[dest_idx];
@@ -783,7 +783,7 @@ static int b44_rx(struct b44 *bp, int budget)
783 RX_PKT_BUF_SZ, 783 RX_PKT_BUF_SZ,
784 PCI_DMA_FROMDEVICE); 784 PCI_DMA_FROMDEVICE);
785 rh = (struct rx_header *) skb->data; 785 rh = (struct rx_header *) skb->data;
786 len = cpu_to_le16(rh->len); 786 len = le16_to_cpu(rh->len);
787 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) || 787 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
788 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { 788 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
789 drop_it: 789 drop_it:
@@ -799,7 +799,7 @@ static int b44_rx(struct b44 *bp, int budget)
799 do { 799 do {
800 udelay(2); 800 udelay(2);
801 barrier(); 801 barrier();
802 len = cpu_to_le16(rh->len); 802 len = le16_to_cpu(rh->len);
803 } while (len == 0 && i++ < 5); 803 } while (len == 0 && i++ < 5);
804 if (len == 0) 804 if (len == 0)
805 goto drop_it; 805 goto drop_it;
@@ -2061,7 +2061,7 @@ out:
2061static int b44_read_eeprom(struct b44 *bp, u8 *data) 2061static int b44_read_eeprom(struct b44 *bp, u8 *data)
2062{ 2062{
2063 long i; 2063 long i;
2064 u16 *ptr = (u16 *) data; 2064 __le16 *ptr = (__le16 *) data;
2065 2065
2066 for (i = 0; i < 128; i += 2) 2066 for (i = 0; i < 128; i += 2)
2067 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i)); 2067 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 4944507fad23..18fc13336628 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -308,8 +308,8 @@
308#define MII_TLEDCTRL_ENABLE 0x0040 308#define MII_TLEDCTRL_ENABLE 0x0040
309 309
310struct dma_desc { 310struct dma_desc {
311 u32 ctrl; 311 __le32 ctrl;
312 u32 addr; 312 __le32 addr;
313}; 313};
314 314
315/* There are only 12 bits in the DMA engine for descriptor offsetting 315/* There are only 12 bits in the DMA engine for descriptor offsetting
@@ -327,9 +327,9 @@ struct dma_desc {
327#define RX_COPY_THRESHOLD 256 327#define RX_COPY_THRESHOLD 256
328 328
329struct rx_header { 329struct rx_header {
330 u16 len; 330 __le16 len;
331 u16 flags; 331 __le16 flags;
332 u16 pad[12]; 332 __le16 pad[12];
333}; 333};
334#define RX_HEADER_LEN 28 334#define RX_HEADER_LEN 28
335 335
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 4528ce9c4e43..c143304dcff5 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/crc32.h> 20#include <linux/crc32.h>
21#include <linux/bitrev.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
22#include <asm/dbdma.h> 23#include <asm/dbdma.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -140,7 +141,6 @@ static unsigned char *bmac_emergency_rxbuf;
140 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ 141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
141 + sizeof(struct sk_buff_head)) 142 + sizeof(struct sk_buff_head))
142 143
143static unsigned char bitrev(unsigned char b);
144static int bmac_open(struct net_device *dev); 144static int bmac_open(struct net_device *dev);
145static int bmac_close(struct net_device *dev); 145static int bmac_close(struct net_device *dev);
146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); 146static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
@@ -586,18 +586,6 @@ bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
586 virt_to_bus(addr), 0); 586 virt_to_bus(addr), 0);
587} 587}
588 588
589/* Bit-reverse one byte of an ethernet hardware address. */
590static unsigned char
591bitrev(unsigned char b)
592{
593 int d = 0, i;
594
595 for (i = 0; i < 8; ++i, b >>= 1)
596 d = (d << 1) | (b & 1);
597 return d;
598}
599
600
601static void 589static void
602bmac_init_tx_ring(struct bmac_data *bp) 590bmac_init_tx_ring(struct bmac_data *bp)
603{ 591{
@@ -1224,8 +1212,8 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1224 { 1212 {
1225 reset_and_select_srom(dev); 1213 reset_and_select_srom(dev);
1226 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); 1214 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1227 ea[2*i] = bitrev(data & 0x0ff); 1215 ea[2*i] = bitrev8(data & 0x0ff);
1228 ea[2*i+1] = bitrev((data >> 8) & 0x0ff); 1216 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1229 } 1217 }
1230} 1218}
1231 1219
@@ -1315,7 +1303,7 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1315 1303
1316 rev = addr[0] == 0 && addr[1] == 0xA0; 1304 rev = addr[0] == 0 && addr[1] == 0xA0;
1317 for (j = 0; j < 6; ++j) 1305 for (j = 0; j < 6; ++j)
1318 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 1306 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1319 1307
1320 /* Enable chip without interrupts for now */ 1308 /* Enable chip without interrupts for now */
1321 bmac_enable_and_reset_chip(dev); 1309 bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 953808efe551..5a96d7611af1 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -39,12 +39,9 @@
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#define BCM_VLAN 1 40#define BCM_VLAN 1
41#endif 41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h> 42#include <net/ip.h>
44#include <net/tcp.h> 43#include <net/tcp.h>
45#include <net/checksum.h> 44#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h> 45#include <linux/workqueue.h>
49#include <linux/crc32.h> 46#include <linux/crc32.h>
50#include <linux/prefetch.h> 47#include <linux/prefetch.h>
@@ -57,8 +54,8 @@
57 54
58#define DRV_MODULE_NAME "bnx2" 55#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 56#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.5.4" 57#define DRV_MODULE_VERSION "1.5.5"
61#define DRV_MODULE_RELDATE "January 24, 2007" 58#define DRV_MODULE_RELDATE "February 1, 2007"
62 59
63#define RUN_AT(x) (jiffies + (x)) 60#define RUN_AT(x) (jiffies + (x))
64 61
@@ -1356,6 +1353,14 @@ bnx2_init_copper_phy(struct bnx2 *bp)
1356 bnx2_write_phy(bp, 0x18, 0x0400); 1353 bnx2_write_phy(bp, 0x18, 0x0400);
1357 } 1354 }
1358 1355
1356 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1357 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1358 MII_BNX2_DSP_EXPAND_REG | 0x8);
1359 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1360 val &= ~(1 << 8);
1361 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1362 }
1363
1359 if (bp->dev->mtu > 1500) { 1364 if (bp->dev->mtu > 1500) {
1360 /* Set extended packet length bit */ 1365 /* Set extended packet length bit */
1361 bnx2_write_phy(bp, 0x18, 0x7); 1366 bnx2_write_phy(bp, 0x18, 0x7);
@@ -1720,7 +1725,7 @@ bnx2_tx_int(struct bnx2 *bp)
1720 1725
1721 tx_buf = &bp->tx_buf_ring[sw_ring_cons]; 1726 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1722 skb = tx_buf->skb; 1727 skb = tx_buf->skb;
1723#ifdef BCM_TSO 1728
1724 /* partial BD completions possible with TSO packets */ 1729 /* partial BD completions possible with TSO packets */
1725 if (skb_is_gso(skb)) { 1730 if (skb_is_gso(skb)) {
1726 u16 last_idx, last_ring_idx; 1731 u16 last_idx, last_ring_idx;
@@ -1736,7 +1741,7 @@ bnx2_tx_int(struct bnx2 *bp)
1736 break; 1741 break;
1737 } 1742 }
1738 } 1743 }
1739#endif 1744
1740 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 1745 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1741 skb_headlen(skb), PCI_DMA_TODEVICE); 1746 skb_headlen(skb), PCI_DMA_TODEVICE);
1742 1747
@@ -4506,7 +4511,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4506 vlan_tag_flags |= 4511 vlan_tag_flags |=
4507 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 4512 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4508 } 4513 }
4509#ifdef BCM_TSO
4510 if ((mss = skb_shinfo(skb)->gso_size) && 4514 if ((mss = skb_shinfo(skb)->gso_size) &&
4511 (skb->len > (bp->dev->mtu + ETH_HLEN))) { 4515 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4512 u32 tcp_opt_len, ip_tcp_len; 4516 u32 tcp_opt_len, ip_tcp_len;
@@ -4539,7 +4543,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4539 } 4543 }
4540 } 4544 }
4541 else 4545 else
4542#endif
4543 { 4546 {
4544 mss = 0; 4547 mss = 0;
4545 } 4548 }
@@ -5536,10 +5539,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
5536 .set_tx_csum = ethtool_op_set_tx_csum, 5539 .set_tx_csum = ethtool_op_set_tx_csum,
5537 .get_sg = ethtool_op_get_sg, 5540 .get_sg = ethtool_op_get_sg,
5538 .set_sg = ethtool_op_set_sg, 5541 .set_sg = ethtool_op_set_sg,
5539#ifdef BCM_TSO
5540 .get_tso = ethtool_op_get_tso, 5542 .get_tso = ethtool_op_get_tso,
5541 .set_tso = bnx2_set_tso, 5543 .set_tso = bnx2_set_tso,
5542#endif
5543 .self_test_count = bnx2_self_test_count, 5544 .self_test_count = bnx2_self_test_count,
5544 .self_test = bnx2_self_test, 5545 .self_test = bnx2_self_test,
5545 .get_strings = bnx2_get_strings, 5546 .get_strings = bnx2_get_strings,
@@ -5918,6 +5919,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5918 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 5919 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5919 CHIP_NUM(bp) == CHIP_NUM_5708) 5920 CHIP_NUM(bp) == CHIP_NUM_5708)
5920 bp->phy_flags |= PHY_CRC_FIX_FLAG; 5921 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5922 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
5923 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
5921 5924
5922 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 5925 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5923 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 5926 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
@@ -5944,8 +5947,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5944 * responding after a while. 5947 * responding after a while.
5945 * 5948 *
5946 * AMD believes this incompatibility is unique to the 5706, and 5949 * AMD believes this incompatibility is unique to the 5706, and
5947 * prefers to locally disable MSI rather than globally disabling it 5950 * prefers to locally disable MSI rather than globally disabling it.
5948 * using pci_msi_quirk.
5949 */ 5951 */
5950 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { 5952 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5951 struct pci_dev *amd_8132 = NULL; 5953 struct pci_dev *amd_8132 = NULL;
@@ -6094,9 +6096,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6094#ifdef BCM_VLAN 6096#ifdef BCM_VLAN
6095 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 6097 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6096#endif 6098#endif
6097#ifdef BCM_TSO
6098 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 6099 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6099#endif
6100 6100
6101 netif_carrier_off(bp->dev); 6101 netif_carrier_off(bp->dev);
6102 6102
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 13b6f9b11e01..ccbdf81c6599 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6288,6 +6288,10 @@ struct l2_fhdr {
6288 6288
6289#define BCM5708S_TX_ACTL3 0x17 6289#define BCM5708S_TX_ACTL3 0x17
6290 6290
6291#define MII_BNX2_DSP_RW_PORT 0x15
6292#define MII_BNX2_DSP_ADDRESS 0x17
6293#define MII_BNX2_DSP_EXPAND_REG 0x0f00
6294
6291#define MIN_ETHERNET_PACKET_SIZE 60 6295#define MIN_ETHERNET_PACKET_SIZE 60
6292#define MAX_ETHERNET_PACKET_SIZE 1514 6296#define MAX_ETHERNET_PACKET_SIZE 1514
6293#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014 6297#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9014
@@ -6489,6 +6493,7 @@ struct bnx2 {
6489#define PHY_INT_MODE_MASK_FLAG 0x300 6493#define PHY_INT_MODE_MASK_FLAG 0x300
6490#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100 6494#define PHY_INT_MODE_AUTO_POLLING_FLAG 0x100
6491#define PHY_INT_MODE_LINK_READY_FLAG 0x200 6495#define PHY_INT_MODE_LINK_READY_FLAG 0x200
6496#define PHY_DIS_EARLY_DAC_FLAG 0x400
6492 6497
6493 u32 chip_id; 6498 u32 chip_id;
6494 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6499 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6512,6 +6517,7 @@ struct bnx2 {
6512#define CHIP_ID_5708_A0 0x57080000 6517#define CHIP_ID_5708_A0 0x57080000
6513#define CHIP_ID_5708_B0 0x57081000 6518#define CHIP_ID_5708_B0 0x57081000
6514#define CHIP_ID_5708_B1 0x57081010 6519#define CHIP_ID_5708_B1 0x57081010
6520#define CHIP_ID_5709_A0 0x57090000
6515 6521
6516#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf) 6522#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf)
6517 6523
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 32923162179e..217a2eedee0a 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -184,7 +184,7 @@ static int tlb_initialize(struct bonding *bond)
184 184
185 spin_lock_init(&(bond_info->tx_hashtbl_lock)); 185 spin_lock_init(&(bond_info->tx_hashtbl_lock));
186 186
187 new_hashtbl = kmalloc(size, GFP_KERNEL); 187 new_hashtbl = kzalloc(size, GFP_KERNEL);
188 if (!new_hashtbl) { 188 if (!new_hashtbl) {
189 printk(KERN_ERR DRV_NAME 189 printk(KERN_ERR DRV_NAME
190 ": %s: Error: Failed to allocate TLB hash table\n", 190 ": %s: Error: Failed to allocate TLB hash table\n",
@@ -195,8 +195,6 @@ static int tlb_initialize(struct bonding *bond)
195 195
196 bond_info->tx_hashtbl = new_hashtbl; 196 bond_info->tx_hashtbl = new_hashtbl;
197 197
198 memset(bond_info->tx_hashtbl, 0, size);
199
200 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 198 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
201 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); 199 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
202 } 200 }
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6482aed4bb7c..8ce8fec615ba 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1343,14 +1343,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1343 "inaccurate.\n", bond_dev->name, slave_dev->name); 1343 "inaccurate.\n", bond_dev->name, slave_dev->name);
1344 } 1344 }
1345 1345
1346 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); 1346 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1347 if (!new_slave) { 1347 if (!new_slave) {
1348 res = -ENOMEM; 1348 res = -ENOMEM;
1349 goto err_undo_flags; 1349 goto err_undo_flags;
1350 } 1350 }
1351 1351
1352 memset(new_slave, 0, sizeof(struct slave));
1353
1354 /* save slave's original flags before calling 1352 /* save slave's original flags before calling
1355 * netdev_set_master and dev_open 1353 * netdev_set_master and dev_open
1356 */ 1354 */
@@ -4704,6 +4702,7 @@ static int bond_check_params(struct bond_params *params)
4704static struct lock_class_key bonding_netdev_xmit_lock_key; 4702static struct lock_class_key bonding_netdev_xmit_lock_key;
4705 4703
4706/* Create a new bond based on the specified name and bonding parameters. 4704/* Create a new bond based on the specified name and bonding parameters.
4705 * If name is NULL, obtain a suitable "bond%d" name for us.
4707 * Caller must NOT hold rtnl_lock; we need to release it here before we 4706 * Caller must NOT hold rtnl_lock; we need to release it here before we
4708 * set up our sysfs entries. 4707 * set up our sysfs entries.
4709 */ 4708 */
@@ -4713,7 +4712,8 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4713 int res; 4712 int res;
4714 4713
4715 rtnl_lock(); 4714 rtnl_lock();
4716 bond_dev = alloc_netdev(sizeof(struct bonding), name, ether_setup); 4715 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
4716 ether_setup);
4717 if (!bond_dev) { 4717 if (!bond_dev) {
4718 printk(KERN_ERR DRV_NAME 4718 printk(KERN_ERR DRV_NAME
4719 ": %s: eek! can't alloc netdev!\n", 4719 ": %s: eek! can't alloc netdev!\n",
@@ -4722,6 +4722,12 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4722 goto out_rtnl; 4722 goto out_rtnl;
4723 } 4723 }
4724 4724
4725 if (!name) {
4726 res = dev_alloc_name(bond_dev, "bond%d");
4727 if (res < 0)
4728 goto out_netdev;
4729 }
4730
4725 /* bond_init() must be called after dev_alloc_name() (for the 4731 /* bond_init() must be called after dev_alloc_name() (for the
4726 * /proc files), but before register_netdevice(), because we 4732 * /proc files), but before register_netdevice(), because we
4727 * need to set function pointers. 4733 * need to set function pointers.
@@ -4748,14 +4754,19 @@ int bond_create(char *name, struct bond_params *params, struct bonding **newbond
4748 4754
4749 rtnl_unlock(); /* allows sysfs registration of net device */ 4755 rtnl_unlock(); /* allows sysfs registration of net device */
4750 res = bond_create_sysfs_entry(bond_dev->priv); 4756 res = bond_create_sysfs_entry(bond_dev->priv);
4751 goto done; 4757 if (res < 0) {
4758 rtnl_lock();
4759 goto out_bond;
4760 }
4761
4762 return 0;
4763
4752out_bond: 4764out_bond:
4753 bond_deinit(bond_dev); 4765 bond_deinit(bond_dev);
4754out_netdev: 4766out_netdev:
4755 free_netdev(bond_dev); 4767 free_netdev(bond_dev);
4756out_rtnl: 4768out_rtnl:
4757 rtnl_unlock(); 4769 rtnl_unlock();
4758done:
4759 return res; 4770 return res;
4760} 4771}
4761 4772
@@ -4763,7 +4774,6 @@ static int __init bonding_init(void)
4763{ 4774{
4764 int i; 4775 int i;
4765 int res; 4776 int res;
4766 char new_bond_name[8]; /* Enough room for 999 bonds at init. */
4767 4777
4768 printk(KERN_INFO "%s", version); 4778 printk(KERN_INFO "%s", version);
4769 4779
@@ -4776,8 +4786,7 @@ static int __init bonding_init(void)
4776 bond_create_proc_dir(); 4786 bond_create_proc_dir();
4777#endif 4787#endif
4778 for (i = 0; i < max_bonds; i++) { 4788 for (i = 0; i < max_bonds; i++) {
4779 sprintf(new_bond_name, "bond%d",i); 4789 res = bond_create(NULL, &bonding_defaults, NULL);
4780 res = bond_create(new_bond_name,&bonding_defaults, NULL);
4781 if (res) 4790 if (res)
4782 goto err; 4791 goto err;
4783 } 4792 }
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ced9ed8f995a..878f7aabeeac 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -39,8 +39,7 @@
39 39
40/* #define BONDING_DEBUG 1 */ 40/* #define BONDING_DEBUG 1 */
41#include "bonding.h" 41#include "bonding.h"
42#define to_class_dev(obj) container_of(obj,struct class_device,kobj) 42#define to_dev(obj) container_of(obj,struct device,kobj)
43#define to_net_dev(class) container_of(class, struct net_device, class_dev)
44#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv)) 43#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv))
45 44
46/*---------------------------- Declarations -------------------------------*/ 45/*---------------------------- Declarations -------------------------------*/
@@ -154,7 +153,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t
154 * If it's > expected, then there's a file open, 153 * If it's > expected, then there's a file open,
155 * and we have to fail. 154 * and we have to fail.
156 */ 155 */
157 if (atomic_read(&bond->dev->class_dev.kobj.kref.refcount) 156 if (atomic_read(&bond->dev->dev.kobj.kref.refcount)
158 > expected_refcount){ 157 > expected_refcount){
159 rtnl_unlock(); 158 rtnl_unlock();
160 printk(KERN_INFO DRV_NAME 159 printk(KERN_INFO DRV_NAME
@@ -201,13 +200,13 @@ int bond_create_slave_symlinks(struct net_device *master, struct net_device *sla
201 int ret = 0; 200 int ret = 0;
202 201
203 /* first, create a link from the slave back to the master */ 202 /* first, create a link from the slave back to the master */
204 ret = sysfs_create_link(&(slave->class_dev.kobj), &(master->class_dev.kobj), 203 ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
205 "master"); 204 "master");
206 if (ret) 205 if (ret)
207 return ret; 206 return ret;
208 /* next, create a link from the master to the slave */ 207 /* next, create a link from the master to the slave */
209 sprintf(linkname,"slave_%s",slave->name); 208 sprintf(linkname,"slave_%s",slave->name);
210 ret = sysfs_create_link(&(master->class_dev.kobj), &(slave->class_dev.kobj), 209 ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
211 linkname); 210 linkname);
212 return ret; 211 return ret;
213 212
@@ -217,20 +216,21 @@ void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *s
217{ 216{
218 char linkname[IFNAMSIZ+7]; 217 char linkname[IFNAMSIZ+7];
219 218
220 sysfs_remove_link(&(slave->class_dev.kobj), "master"); 219 sysfs_remove_link(&(slave->dev.kobj), "master");
221 sprintf(linkname,"slave_%s",slave->name); 220 sprintf(linkname,"slave_%s",slave->name);
222 sysfs_remove_link(&(master->class_dev.kobj), linkname); 221 sysfs_remove_link(&(master->dev.kobj), linkname);
223} 222}
224 223
225 224
226/* 225/*
227 * Show the slaves in the current bond. 226 * Show the slaves in the current bond.
228 */ 227 */
229static ssize_t bonding_show_slaves(struct class_device *cd, char *buf) 228static ssize_t bonding_show_slaves(struct device *d,
229 struct device_attribute *attr, char *buf)
230{ 230{
231 struct slave *slave; 231 struct slave *slave;
232 int i, res = 0; 232 int i, res = 0;
233 struct bonding *bond = to_bond(cd); 233 struct bonding *bond = to_bond(d);
234 234
235 read_lock_bh(&bond->lock); 235 read_lock_bh(&bond->lock);
236 bond_for_each_slave(bond, slave, i) { 236 bond_for_each_slave(bond, slave, i) {
@@ -254,14 +254,16 @@ static ssize_t bonding_show_slaves(struct class_device *cd, char *buf)
254 * up for this to succeed. 254 * up for this to succeed.
255 * This function is largely the same flow as bonding_update_bonds(). 255 * This function is largely the same flow as bonding_update_bonds().
256 */ 256 */
257static ssize_t bonding_store_slaves(struct class_device *cd, const char *buffer, size_t count) 257static ssize_t bonding_store_slaves(struct device *d,
258 struct device_attribute *attr,
259 const char *buffer, size_t count)
258{ 260{
259 char command[IFNAMSIZ + 1] = { 0, }; 261 char command[IFNAMSIZ + 1] = { 0, };
260 char *ifname; 262 char *ifname;
261 int i, res, found, ret = count; 263 int i, res, found, ret = count;
262 struct slave *slave; 264 struct slave *slave;
263 struct net_device *dev = NULL; 265 struct net_device *dev = NULL;
264 struct bonding *bond = to_bond(cd); 266 struct bonding *bond = to_bond(d);
265 267
266 /* Quick sanity check -- is the bond interface up? */ 268 /* Quick sanity check -- is the bond interface up? */
267 if (!(bond->dev->flags & IFF_UP)) { 269 if (!(bond->dev->flags & IFF_UP)) {
@@ -387,25 +389,28 @@ out:
387 return ret; 389 return ret;
388} 390}
389 391
390static CLASS_DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves); 392static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves);
391 393
392/* 394/*
393 * Show and set the bonding mode. The bond interface must be down to 395 * Show and set the bonding mode. The bond interface must be down to
394 * change the mode. 396 * change the mode.
395 */ 397 */
396static ssize_t bonding_show_mode(struct class_device *cd, char *buf) 398static ssize_t bonding_show_mode(struct device *d,
399 struct device_attribute *attr, char *buf)
397{ 400{
398 struct bonding *bond = to_bond(cd); 401 struct bonding *bond = to_bond(d);
399 402
400 return sprintf(buf, "%s %d\n", 403 return sprintf(buf, "%s %d\n",
401 bond_mode_tbl[bond->params.mode].modename, 404 bond_mode_tbl[bond->params.mode].modename,
402 bond->params.mode) + 1; 405 bond->params.mode) + 1;
403} 406}
404 407
405static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size_t count) 408static ssize_t bonding_store_mode(struct device *d,
409 struct device_attribute *attr,
410 const char *buf, size_t count)
406{ 411{
407 int new_value, ret = count; 412 int new_value, ret = count;
408 struct bonding *bond = to_bond(cd); 413 struct bonding *bond = to_bond(d);
409 414
410 if (bond->dev->flags & IFF_UP) { 415 if (bond->dev->flags & IFF_UP) {
411 printk(KERN_ERR DRV_NAME 416 printk(KERN_ERR DRV_NAME
@@ -438,16 +443,18 @@ static ssize_t bonding_store_mode(struct class_device *cd, const char *buf, size
438out: 443out:
439 return ret; 444 return ret;
440} 445}
441static CLASS_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode); 446static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode);
442 447
443/* 448/*
444 * Show and set the bonding transmit hash method. The bond interface must be down to 449 * Show and set the bonding transmit hash method. The bond interface must be down to
445 * change the xmit hash policy. 450 * change the xmit hash policy.
446 */ 451 */
447static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf) 452static ssize_t bonding_show_xmit_hash(struct device *d,
453 struct device_attribute *attr,
454 char *buf)
448{ 455{
449 int count; 456 int count;
450 struct bonding *bond = to_bond(cd); 457 struct bonding *bond = to_bond(d);
451 458
452 if ((bond->params.mode != BOND_MODE_XOR) && 459 if ((bond->params.mode != BOND_MODE_XOR) &&
453 (bond->params.mode != BOND_MODE_8023AD)) { 460 (bond->params.mode != BOND_MODE_8023AD)) {
@@ -462,10 +469,12 @@ static ssize_t bonding_show_xmit_hash(struct class_device *cd, char *buf)
462 return count; 469 return count;
463} 470}
464 471
465static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf, size_t count) 472static ssize_t bonding_store_xmit_hash(struct device *d,
473 struct device_attribute *attr,
474 const char *buf, size_t count)
466{ 475{
467 int new_value, ret = count; 476 int new_value, ret = count;
468 struct bonding *bond = to_bond(cd); 477 struct bonding *bond = to_bond(d);
469 478
470 if (bond->dev->flags & IFF_UP) { 479 if (bond->dev->flags & IFF_UP) {
471 printk(KERN_ERR DRV_NAME 480 printk(KERN_ERR DRV_NAME
@@ -501,24 +510,28 @@ static ssize_t bonding_store_xmit_hash(struct class_device *cd, const char *buf,
501out: 510out:
502 return ret; 511 return ret;
503} 512}
504static CLASS_DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash); 513static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash);
505 514
506/* 515/*
507 * Show and set arp_validate. 516 * Show and set arp_validate.
508 */ 517 */
509static ssize_t bonding_show_arp_validate(struct class_device *cd, char *buf) 518static ssize_t bonding_show_arp_validate(struct device *d,
519 struct device_attribute *attr,
520 char *buf)
510{ 521{
511 struct bonding *bond = to_bond(cd); 522 struct bonding *bond = to_bond(d);
512 523
513 return sprintf(buf, "%s %d\n", 524 return sprintf(buf, "%s %d\n",
514 arp_validate_tbl[bond->params.arp_validate].modename, 525 arp_validate_tbl[bond->params.arp_validate].modename,
515 bond->params.arp_validate) + 1; 526 bond->params.arp_validate) + 1;
516} 527}
517 528
518static ssize_t bonding_store_arp_validate(struct class_device *cd, const char *buf, size_t count) 529static ssize_t bonding_store_arp_validate(struct device *d,
530 struct device_attribute *attr,
531 const char *buf, size_t count)
519{ 532{
520 int new_value; 533 int new_value;
521 struct bonding *bond = to_bond(cd); 534 struct bonding *bond = to_bond(d);
522 535
523 new_value = bond_parse_parm((char *)buf, arp_validate_tbl); 536 new_value = bond_parse_parm((char *)buf, arp_validate_tbl);
524 if (new_value < 0) { 537 if (new_value < 0) {
@@ -548,7 +561,7 @@ static ssize_t bonding_store_arp_validate(struct class_device *cd, const char *b
548 return count; 561 return count;
549} 562}
550 563
551static CLASS_DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate); 564static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate);
552 565
553/* 566/*
554 * Show and set the arp timer interval. There are two tricky bits 567 * Show and set the arp timer interval. There are two tricky bits
@@ -556,17 +569,21 @@ static CLASS_DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_valid
556 * MII monitoring. Second, if the ARP timer isn't running, we must 569 * MII monitoring. Second, if the ARP timer isn't running, we must
557 * start it. 570 * start it.
558 */ 571 */
559static ssize_t bonding_show_arp_interval(struct class_device *cd, char *buf) 572static ssize_t bonding_show_arp_interval(struct device *d,
573 struct device_attribute *attr,
574 char *buf)
560{ 575{
561 struct bonding *bond = to_bond(cd); 576 struct bonding *bond = to_bond(d);
562 577
563 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1; 578 return sprintf(buf, "%d\n", bond->params.arp_interval) + 1;
564} 579}
565 580
566static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *buf, size_t count) 581static ssize_t bonding_store_arp_interval(struct device *d,
582 struct device_attribute *attr,
583 const char *buf, size_t count)
567{ 584{
568 int new_value, ret = count; 585 int new_value, ret = count;
569 struct bonding *bond = to_bond(cd); 586 struct bonding *bond = to_bond(d);
570 587
571 if (sscanf(buf, "%d", &new_value) != 1) { 588 if (sscanf(buf, "%d", &new_value) != 1) {
572 printk(KERN_ERR DRV_NAME 589 printk(KERN_ERR DRV_NAME
@@ -638,15 +655,17 @@ static ssize_t bonding_store_arp_interval(struct class_device *cd, const char *b
638out: 655out:
639 return ret; 656 return ret;
640} 657}
641static CLASS_DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval); 658static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR , bonding_show_arp_interval, bonding_store_arp_interval);
642 659
643/* 660/*
644 * Show and set the arp targets. 661 * Show and set the arp targets.
645 */ 662 */
646static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf) 663static ssize_t bonding_show_arp_targets(struct device *d,
664 struct device_attribute *attr,
665 char *buf)
647{ 666{
648 int i, res = 0; 667 int i, res = 0;
649 struct bonding *bond = to_bond(cd); 668 struct bonding *bond = to_bond(d);
650 669
651 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { 670 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
652 if (bond->params.arp_targets[i]) 671 if (bond->params.arp_targets[i])
@@ -660,11 +679,13 @@ static ssize_t bonding_show_arp_targets(struct class_device *cd, char *buf)
660 return res; 679 return res;
661} 680}
662 681
663static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *buf, size_t count) 682static ssize_t bonding_store_arp_targets(struct device *d,
683 struct device_attribute *attr,
684 const char *buf, size_t count)
664{ 685{
665 u32 newtarget; 686 u32 newtarget;
666 int i = 0, done = 0, ret = count; 687 int i = 0, done = 0, ret = count;
667 struct bonding *bond = to_bond(cd); 688 struct bonding *bond = to_bond(d);
668 u32 *targets; 689 u32 *targets;
669 690
670 targets = bond->params.arp_targets; 691 targets = bond->params.arp_targets;
@@ -742,24 +763,28 @@ static ssize_t bonding_store_arp_targets(struct class_device *cd, const char *bu
742out: 763out:
743 return ret; 764 return ret;
744} 765}
745static CLASS_DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets); 766static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
746 767
747/* 768/*
748 * Show and set the up and down delays. These must be multiples of the 769 * Show and set the up and down delays. These must be multiples of the
749 * MII monitoring value, and are stored internally as the multiplier. 770 * MII monitoring value, and are stored internally as the multiplier.
750 * Thus, we must translate to MS for the real world. 771 * Thus, we must translate to MS for the real world.
751 */ 772 */
752static ssize_t bonding_show_downdelay(struct class_device *cd, char *buf) 773static ssize_t bonding_show_downdelay(struct device *d,
774 struct device_attribute *attr,
775 char *buf)
753{ 776{
754 struct bonding *bond = to_bond(cd); 777 struct bonding *bond = to_bond(d);
755 778
756 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1; 779 return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon) + 1;
757} 780}
758 781
759static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf, size_t count) 782static ssize_t bonding_store_downdelay(struct device *d,
783 struct device_attribute *attr,
784 const char *buf, size_t count)
760{ 785{
761 int new_value, ret = count; 786 int new_value, ret = count;
762 struct bonding *bond = to_bond(cd); 787 struct bonding *bond = to_bond(d);
763 788
764 if (!(bond->params.miimon)) { 789 if (!(bond->params.miimon)) {
765 printk(KERN_ERR DRV_NAME 790 printk(KERN_ERR DRV_NAME
@@ -800,20 +825,24 @@ static ssize_t bonding_store_downdelay(struct class_device *cd, const char *buf,
800out: 825out:
801 return ret; 826 return ret;
802} 827}
803static CLASS_DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay); 828static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR , bonding_show_downdelay, bonding_store_downdelay);
804 829
805static ssize_t bonding_show_updelay(struct class_device *cd, char *buf) 830static ssize_t bonding_show_updelay(struct device *d,
831 struct device_attribute *attr,
832 char *buf)
806{ 833{
807 struct bonding *bond = to_bond(cd); 834 struct bonding *bond = to_bond(d);
808 835
809 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1; 836 return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon) + 1;
810 837
811} 838}
812 839
813static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, size_t count) 840static ssize_t bonding_store_updelay(struct device *d,
841 struct device_attribute *attr,
842 const char *buf, size_t count)
814{ 843{
815 int new_value, ret = count; 844 int new_value, ret = count;
816 struct bonding *bond = to_bond(cd); 845 struct bonding *bond = to_bond(d);
817 846
818 if (!(bond->params.miimon)) { 847 if (!(bond->params.miimon)) {
819 printk(KERN_ERR DRV_NAME 848 printk(KERN_ERR DRV_NAME
@@ -854,25 +883,29 @@ static ssize_t bonding_store_updelay(struct class_device *cd, const char *buf, s
854out: 883out:
855 return ret; 884 return ret;
856} 885}
857static CLASS_DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay); 886static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR , bonding_show_updelay, bonding_store_updelay);
858 887
859/* 888/*
860 * Show and set the LACP interval. Interface must be down, and the mode 889 * Show and set the LACP interval. Interface must be down, and the mode
861 * must be set to 802.3ad mode. 890 * must be set to 802.3ad mode.
862 */ 891 */
863static ssize_t bonding_show_lacp(struct class_device *cd, char *buf) 892static ssize_t bonding_show_lacp(struct device *d,
893 struct device_attribute *attr,
894 char *buf)
864{ 895{
865 struct bonding *bond = to_bond(cd); 896 struct bonding *bond = to_bond(d);
866 897
867 return sprintf(buf, "%s %d\n", 898 return sprintf(buf, "%s %d\n",
868 bond_lacp_tbl[bond->params.lacp_fast].modename, 899 bond_lacp_tbl[bond->params.lacp_fast].modename,
869 bond->params.lacp_fast) + 1; 900 bond->params.lacp_fast) + 1;
870} 901}
871 902
872static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size_t count) 903static ssize_t bonding_store_lacp(struct device *d,
904 struct device_attribute *attr,
905 const char *buf, size_t count)
873{ 906{
874 int new_value, ret = count; 907 int new_value, ret = count;
875 struct bonding *bond = to_bond(cd); 908 struct bonding *bond = to_bond(d);
876 909
877 if (bond->dev->flags & IFF_UP) { 910 if (bond->dev->flags & IFF_UP) {
878 printk(KERN_ERR DRV_NAME 911 printk(KERN_ERR DRV_NAME
@@ -906,7 +939,7 @@ static ssize_t bonding_store_lacp(struct class_device *cd, const char *buf, size
906out: 939out:
907 return ret; 940 return ret;
908} 941}
909static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp); 942static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
910 943
911/* 944/*
912 * Show and set the MII monitor interval. There are two tricky bits 945 * Show and set the MII monitor interval. There are two tricky bits
@@ -914,17 +947,21 @@ static CLASS_DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bondin
914 * ARP monitoring. Second, if the timer isn't running, we must 947 * ARP monitoring. Second, if the timer isn't running, we must
915 * start it. 948 * start it.
916 */ 949 */
917static ssize_t bonding_show_miimon(struct class_device *cd, char *buf) 950static ssize_t bonding_show_miimon(struct device *d,
951 struct device_attribute *attr,
952 char *buf)
918{ 953{
919 struct bonding *bond = to_bond(cd); 954 struct bonding *bond = to_bond(d);
920 955
921 return sprintf(buf, "%d\n", bond->params.miimon) + 1; 956 return sprintf(buf, "%d\n", bond->params.miimon) + 1;
922} 957}
923 958
924static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, size_t count) 959static ssize_t bonding_store_miimon(struct device *d,
960 struct device_attribute *attr,
961 const char *buf, size_t count)
925{ 962{
926 int new_value, ret = count; 963 int new_value, ret = count;
927 struct bonding *bond = to_bond(cd); 964 struct bonding *bond = to_bond(d);
928 965
929 if (sscanf(buf, "%d", &new_value) != 1) { 966 if (sscanf(buf, "%d", &new_value) != 1) {
930 printk(KERN_ERR DRV_NAME 967 printk(KERN_ERR DRV_NAME
@@ -1000,7 +1037,7 @@ static ssize_t bonding_store_miimon(struct class_device *cd, const char *buf, si
1000out: 1037out:
1001 return ret; 1038 return ret;
1002} 1039}
1003static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon); 1040static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon);
1004 1041
1005/* 1042/*
1006 * Show and set the primary slave. The store function is much 1043 * Show and set the primary slave. The store function is much
@@ -1009,10 +1046,12 @@ static CLASS_DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding
1009 * The bond must be a mode that supports a primary for this be 1046 * The bond must be a mode that supports a primary for this be
1010 * set. 1047 * set.
1011 */ 1048 */
1012static ssize_t bonding_show_primary(struct class_device *cd, char *buf) 1049static ssize_t bonding_show_primary(struct device *d,
1050 struct device_attribute *attr,
1051 char *buf)
1013{ 1052{
1014 int count = 0; 1053 int count = 0;
1015 struct bonding *bond = to_bond(cd); 1054 struct bonding *bond = to_bond(d);
1016 1055
1017 if (bond->primary_slave) 1056 if (bond->primary_slave)
1018 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1; 1057 count = sprintf(buf, "%s\n", bond->primary_slave->dev->name) + 1;
@@ -1022,11 +1061,13 @@ static ssize_t bonding_show_primary(struct class_device *cd, char *buf)
1022 return count; 1061 return count;
1023} 1062}
1024 1063
1025static ssize_t bonding_store_primary(struct class_device *cd, const char *buf, size_t count) 1064static ssize_t bonding_store_primary(struct device *d,
1065 struct device_attribute *attr,
1066 const char *buf, size_t count)
1026{ 1067{
1027 int i; 1068 int i;
1028 struct slave *slave; 1069 struct slave *slave;
1029 struct bonding *bond = to_bond(cd); 1070 struct bonding *bond = to_bond(d);
1030 1071
1031 write_lock_bh(&bond->lock); 1072 write_lock_bh(&bond->lock);
1032 if (!USES_PRIMARY(bond->params.mode)) { 1073 if (!USES_PRIMARY(bond->params.mode)) {
@@ -1065,22 +1106,26 @@ out:
1065 write_unlock_bh(&bond->lock); 1106 write_unlock_bh(&bond->lock);
1066 return count; 1107 return count;
1067} 1108}
1068static CLASS_DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); 1109static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary);
1069 1110
1070/* 1111/*
1071 * Show and set the use_carrier flag. 1112 * Show and set the use_carrier flag.
1072 */ 1113 */
1073static ssize_t bonding_show_carrier(struct class_device *cd, char *buf) 1114static ssize_t bonding_show_carrier(struct device *d,
1115 struct device_attribute *attr,
1116 char *buf)
1074{ 1117{
1075 struct bonding *bond = to_bond(cd); 1118 struct bonding *bond = to_bond(d);
1076 1119
1077 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1; 1120 return sprintf(buf, "%d\n", bond->params.use_carrier) + 1;
1078} 1121}
1079 1122
1080static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, size_t count) 1123static ssize_t bonding_store_carrier(struct device *d,
1124 struct device_attribute *attr,
1125 const char *buf, size_t count)
1081{ 1126{
1082 int new_value, ret = count; 1127 int new_value, ret = count;
1083 struct bonding *bond = to_bond(cd); 1128 struct bonding *bond = to_bond(d);
1084 1129
1085 1130
1086 if (sscanf(buf, "%d", &new_value) != 1) { 1131 if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1102,16 +1147,18 @@ static ssize_t bonding_store_carrier(struct class_device *cd, const char *buf, s
1102out: 1147out:
1103 return count; 1148 return count;
1104} 1149}
1105static CLASS_DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier); 1150static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier);
1106 1151
1107 1152
1108/* 1153/*
1109 * Show and set currently active_slave. 1154 * Show and set currently active_slave.
1110 */ 1155 */
1111static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf) 1156static ssize_t bonding_show_active_slave(struct device *d,
1157 struct device_attribute *attr,
1158 char *buf)
1112{ 1159{
1113 struct slave *curr; 1160 struct slave *curr;
1114 struct bonding *bond = to_bond(cd); 1161 struct bonding *bond = to_bond(d);
1115 int count; 1162 int count;
1116 1163
1117 1164
@@ -1126,13 +1173,15 @@ static ssize_t bonding_show_active_slave(struct class_device *cd, char *buf)
1126 return count; 1173 return count;
1127} 1174}
1128 1175
1129static ssize_t bonding_store_active_slave(struct class_device *cd, const char *buf, size_t count) 1176static ssize_t bonding_store_active_slave(struct device *d,
1177 struct device_attribute *attr,
1178 const char *buf, size_t count)
1130{ 1179{
1131 int i; 1180 int i;
1132 struct slave *slave; 1181 struct slave *slave;
1133 struct slave *old_active = NULL; 1182 struct slave *old_active = NULL;
1134 struct slave *new_active = NULL; 1183 struct slave *new_active = NULL;
1135 struct bonding *bond = to_bond(cd); 1184 struct bonding *bond = to_bond(d);
1136 1185
1137 write_lock_bh(&bond->lock); 1186 write_lock_bh(&bond->lock);
1138 if (!USES_PRIMARY(bond->params.mode)) { 1187 if (!USES_PRIMARY(bond->params.mode)) {
@@ -1194,16 +1243,18 @@ out:
1194 return count; 1243 return count;
1195 1244
1196} 1245}
1197static CLASS_DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave); 1246static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave);
1198 1247
1199 1248
1200/* 1249/*
1201 * Show link status of the bond interface. 1250 * Show link status of the bond interface.
1202 */ 1251 */
1203static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf) 1252static ssize_t bonding_show_mii_status(struct device *d,
1253 struct device_attribute *attr,
1254 char *buf)
1204{ 1255{
1205 struct slave *curr; 1256 struct slave *curr;
1206 struct bonding *bond = to_bond(cd); 1257 struct bonding *bond = to_bond(d);
1207 1258
1208 read_lock(&bond->curr_slave_lock); 1259 read_lock(&bond->curr_slave_lock);
1209 curr = bond->curr_active_slave; 1260 curr = bond->curr_active_slave;
@@ -1211,16 +1262,18 @@ static ssize_t bonding_show_mii_status(struct class_device *cd, char *buf)
1211 1262
1212 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1; 1263 return sprintf(buf, "%s\n", (curr) ? "up" : "down") + 1;
1213} 1264}
1214static CLASS_DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); 1265static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1215 1266
1216 1267
1217/* 1268/*
1218 * Show current 802.3ad aggregator ID. 1269 * Show current 802.3ad aggregator ID.
1219 */ 1270 */
1220static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf) 1271static ssize_t bonding_show_ad_aggregator(struct device *d,
1272 struct device_attribute *attr,
1273 char *buf)
1221{ 1274{
1222 int count = 0; 1275 int count = 0;
1223 struct bonding *bond = to_bond(cd); 1276 struct bonding *bond = to_bond(d);
1224 1277
1225 if (bond->params.mode == BOND_MODE_8023AD) { 1278 if (bond->params.mode == BOND_MODE_8023AD) {
1226 struct ad_info ad_info; 1279 struct ad_info ad_info;
@@ -1231,16 +1284,18 @@ static ssize_t bonding_show_ad_aggregator(struct class_device *cd, char *buf)
1231 1284
1232 return count; 1285 return count;
1233} 1286}
1234static CLASS_DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL); 1287static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL);
1235 1288
1236 1289
1237/* 1290/*
1238 * Show number of active 802.3ad ports. 1291 * Show number of active 802.3ad ports.
1239 */ 1292 */
1240static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf) 1293static ssize_t bonding_show_ad_num_ports(struct device *d,
1294 struct device_attribute *attr,
1295 char *buf)
1241{ 1296{
1242 int count = 0; 1297 int count = 0;
1243 struct bonding *bond = to_bond(cd); 1298 struct bonding *bond = to_bond(d);
1244 1299
1245 if (bond->params.mode == BOND_MODE_8023AD) { 1300 if (bond->params.mode == BOND_MODE_8023AD) {
1246 struct ad_info ad_info; 1301 struct ad_info ad_info;
@@ -1251,16 +1306,18 @@ static ssize_t bonding_show_ad_num_ports(struct class_device *cd, char *buf)
1251 1306
1252 return count; 1307 return count;
1253} 1308}
1254static CLASS_DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL); 1309static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL);
1255 1310
1256 1311
1257/* 1312/*
1258 * Show current 802.3ad actor key. 1313 * Show current 802.3ad actor key.
1259 */ 1314 */
1260static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf) 1315static ssize_t bonding_show_ad_actor_key(struct device *d,
1316 struct device_attribute *attr,
1317 char *buf)
1261{ 1318{
1262 int count = 0; 1319 int count = 0;
1263 struct bonding *bond = to_bond(cd); 1320 struct bonding *bond = to_bond(d);
1264 1321
1265 if (bond->params.mode == BOND_MODE_8023AD) { 1322 if (bond->params.mode == BOND_MODE_8023AD) {
1266 struct ad_info ad_info; 1323 struct ad_info ad_info;
@@ -1271,16 +1328,18 @@ static ssize_t bonding_show_ad_actor_key(struct class_device *cd, char *buf)
1271 1328
1272 return count; 1329 return count;
1273} 1330}
1274static CLASS_DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL); 1331static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL);
1275 1332
1276 1333
1277/* 1334/*
1278 * Show current 802.3ad partner key. 1335 * Show current 802.3ad partner key.
1279 */ 1336 */
1280static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf) 1337static ssize_t bonding_show_ad_partner_key(struct device *d,
1338 struct device_attribute *attr,
1339 char *buf)
1281{ 1340{
1282 int count = 0; 1341 int count = 0;
1283 struct bonding *bond = to_bond(cd); 1342 struct bonding *bond = to_bond(d);
1284 1343
1285 if (bond->params.mode == BOND_MODE_8023AD) { 1344 if (bond->params.mode == BOND_MODE_8023AD) {
1286 struct ad_info ad_info; 1345 struct ad_info ad_info;
@@ -1291,16 +1350,18 @@ static ssize_t bonding_show_ad_partner_key(struct class_device *cd, char *buf)
1291 1350
1292 return count; 1351 return count;
1293} 1352}
1294static CLASS_DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL); 1353static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL);
1295 1354
1296 1355
1297/* 1356/*
1298 * Show current 802.3ad partner mac. 1357 * Show current 802.3ad partner mac.
1299 */ 1358 */
1300static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf) 1359static ssize_t bonding_show_ad_partner_mac(struct device *d,
1360 struct device_attribute *attr,
1361 char *buf)
1301{ 1362{
1302 int count = 0; 1363 int count = 0;
1303 struct bonding *bond = to_bond(cd); 1364 struct bonding *bond = to_bond(d);
1304 1365
1305 if (bond->params.mode == BOND_MODE_8023AD) { 1366 if (bond->params.mode == BOND_MODE_8023AD) {
1306 struct ad_info ad_info; 1367 struct ad_info ad_info;
@@ -1319,30 +1380,30 @@ static ssize_t bonding_show_ad_partner_mac(struct class_device *cd, char *buf)
1319 1380
1320 return count; 1381 return count;
1321} 1382}
1322static CLASS_DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); 1383static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL);
1323 1384
1324 1385
1325 1386
1326static struct attribute *per_bond_attrs[] = { 1387static struct attribute *per_bond_attrs[] = {
1327 &class_device_attr_slaves.attr, 1388 &dev_attr_slaves.attr,
1328 &class_device_attr_mode.attr, 1389 &dev_attr_mode.attr,
1329 &class_device_attr_arp_validate.attr, 1390 &dev_attr_arp_validate.attr,
1330 &class_device_attr_arp_interval.attr, 1391 &dev_attr_arp_interval.attr,
1331 &class_device_attr_arp_ip_target.attr, 1392 &dev_attr_arp_ip_target.attr,
1332 &class_device_attr_downdelay.attr, 1393 &dev_attr_downdelay.attr,
1333 &class_device_attr_updelay.attr, 1394 &dev_attr_updelay.attr,
1334 &class_device_attr_lacp_rate.attr, 1395 &dev_attr_lacp_rate.attr,
1335 &class_device_attr_xmit_hash_policy.attr, 1396 &dev_attr_xmit_hash_policy.attr,
1336 &class_device_attr_miimon.attr, 1397 &dev_attr_miimon.attr,
1337 &class_device_attr_primary.attr, 1398 &dev_attr_primary.attr,
1338 &class_device_attr_use_carrier.attr, 1399 &dev_attr_use_carrier.attr,
1339 &class_device_attr_active_slave.attr, 1400 &dev_attr_active_slave.attr,
1340 &class_device_attr_mii_status.attr, 1401 &dev_attr_mii_status.attr,
1341 &class_device_attr_ad_aggregator.attr, 1402 &dev_attr_ad_aggregator.attr,
1342 &class_device_attr_ad_num_ports.attr, 1403 &dev_attr_ad_num_ports.attr,
1343 &class_device_attr_ad_actor_key.attr, 1404 &dev_attr_ad_actor_key.attr,
1344 &class_device_attr_ad_partner_key.attr, 1405 &dev_attr_ad_partner_key.attr,
1345 &class_device_attr_ad_partner_mac.attr, 1406 &dev_attr_ad_partner_mac.attr,
1346 NULL, 1407 NULL,
1347}; 1408};
1348 1409
@@ -1367,11 +1428,26 @@ int bond_create_sysfs(void)
1367 if (!firstbond) 1428 if (!firstbond)
1368 return -ENODEV; 1429 return -ENODEV;
1369 1430
1370 netdev_class = firstbond->dev->class_dev.class; 1431 netdev_class = firstbond->dev->dev.class;
1371 if (!netdev_class) 1432 if (!netdev_class)
1372 return -ENODEV; 1433 return -ENODEV;
1373 1434
1374 ret = class_create_file(netdev_class, &class_attr_bonding_masters); 1435 ret = class_create_file(netdev_class, &class_attr_bonding_masters);
1436 /*
1437 * Permit multiple loads of the module by ignoring failures to
1438 * create the bonding_masters sysfs file. Bonding devices
1439 * created by second or subsequent loads of the module will
1440 * not be listed in, or controllable by, bonding_masters, but
1441 * will have the usual "bonding" sysfs directory.
1442 *
1443 * This is done to preserve backwards compatibility for
1444 * initscripts/sysconfig, which load bonding multiple times to
1445 * configure multiple bonding devices.
1446 */
1447 if (ret == -EEXIST) {
1448 netdev_class = NULL;
1449 return 0;
1450 }
1375 1451
1376 return ret; 1452 return ret;
1377 1453
@@ -1395,13 +1471,13 @@ int bond_create_sysfs_entry(struct bonding *bond)
1395 struct net_device *dev = bond->dev; 1471 struct net_device *dev = bond->dev;
1396 int err; 1472 int err;
1397 1473
1398 err = sysfs_create_group(&(dev->class_dev.kobj), &bonding_group); 1474 err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
1399 if (err) { 1475 if (err) {
1400 printk(KERN_EMERG "eek! didn't create group!\n"); 1476 printk(KERN_EMERG "eek! didn't create group!\n");
1401 } 1477 }
1402 1478
1403 if (expected_refcount < 1) 1479 if (expected_refcount < 1)
1404 expected_refcount = atomic_read(&bond->dev->class_dev.kobj.kref.refcount); 1480 expected_refcount = atomic_read(&bond->dev->dev.kobj.kref.refcount);
1405 1481
1406 return err; 1482 return err;
1407} 1483}
@@ -1412,6 +1488,6 @@ void bond_destroy_sysfs_entry(struct bonding *bond)
1412{ 1488{
1413 struct net_device *dev = bond->dev; 1489 struct net_device *dev = bond->dev;
1414 1490
1415 sysfs_remove_group(&(dev->class_dev.kobj), &bonding_group); 1491 sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
1416} 1492}
1417 1493
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 0978c9ac6d2b..41aa78bf1f78 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
22#include "bond_3ad.h" 22#include "bond_3ad.h"
23#include "bond_alb.h" 23#include "bond_alb.h"
24 24
25#define DRV_VERSION "3.1.1" 25#define DRV_VERSION "3.1.2"
26#define DRV_RELDATE "September 26, 2006" 26#define DRV_RELDATE "January 20, 2007"
27#define DRV_NAME "bonding" 27#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 29
@@ -237,12 +237,13 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
237#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ 237#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
238 BOND_ARP_VALIDATE_BACKUP) 238 BOND_ARP_VALIDATE_BACKUP)
239 239
240extern inline int slave_do_arp_validate(struct bonding *bond, struct slave *slave) 240static inline int slave_do_arp_validate(struct bonding *bond,
241 struct slave *slave)
241{ 242{
242 return bond->params.arp_validate & (1 << slave->state); 243 return bond->params.arp_validate & (1 << slave->state);
243} 244}
244 245
245extern inline unsigned long slave_last_rx(struct bonding *bond, 246static inline unsigned long slave_last_rx(struct bonding *bond,
246 struct slave *slave) 247 struct slave *slave)
247{ 248{
248 if (slave_do_arp_validate(bond, slave)) 249 if (slave_do_arp_validate(bond, slave))
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 74758d2c7af8..787f2f2820fe 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -324,7 +324,7 @@ struct board_info {
324 unsigned char mdio_phybaseaddr; 324 unsigned char mdio_phybaseaddr;
325 struct gmac *gmac; 325 struct gmac *gmac;
326 struct gphy *gphy; 326 struct gphy *gphy;
327 struct mdio_ops *mdio_ops; 327 struct mdio_ops *mdio_ops;
328 const char *desc; 328 const char *desc;
329}; 329};
330 330
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
index 35f565be4fd3..e36d45b78cc7 100644
--- a/drivers/net/chelsio/cpl5_cmd.h
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -103,7 +103,7 @@ enum CPL_opcode {
103 CPL_MIGRATE_C2T_RPL = 0xDD, 103 CPL_MIGRATE_C2T_RPL = 0xDD,
104 CPL_ERROR = 0xD7, 104 CPL_ERROR = 0xD7,
105 105
106 /* internal: driver -> TOM */ 106 /* internal: driver -> TOM */
107 CPL_MSS_CHANGE = 0xE1 107 CPL_MSS_CHANGE = 0xE1
108}; 108};
109 109
@@ -159,8 +159,8 @@ enum { // TX_PKT_LSO ethernet types
159}; 159};
160 160
161union opcode_tid { 161union opcode_tid {
162 u32 opcode_tid; 162 u32 opcode_tid;
163 u8 opcode; 163 u8 opcode;
164}; 164};
165 165
166#define S_OPCODE 24 166#define S_OPCODE 24
@@ -234,7 +234,7 @@ struct cpl_pass_accept_req {
234 u32 local_ip; 234 u32 local_ip;
235 u32 peer_ip; 235 u32 peer_ip;
236 u32 tos_tid; 236 u32 tos_tid;
237 struct tcp_options tcp_options; 237 struct tcp_options tcp_options;
238 u8 dst_mac[6]; 238 u8 dst_mac[6];
239 u16 vlan_tag; 239 u16 vlan_tag;
240 u8 src_mac[6]; 240 u8 src_mac[6];
@@ -250,12 +250,12 @@ struct cpl_pass_accept_rpl {
250 u32 peer_ip; 250 u32 peer_ip;
251 u32 opt0h; 251 u32 opt0h;
252 union { 252 union {
253 u32 opt0l; 253 u32 opt0l;
254 struct { 254 struct {
255 u8 rsvd[3]; 255 u8 rsvd[3];
256 u8 status; 256 u8 status;
257 };
257 }; 258 };
258 };
259}; 259};
260 260
261struct cpl_act_open_req { 261struct cpl_act_open_req {
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index fd5d821f3f2a..7d0f24f69777 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -69,14 +69,14 @@ static inline void cancel_mac_stats_update(struct adapter *ap)
69 cancel_delayed_work(&ap->stats_update_task); 69 cancel_delayed_work(&ap->stats_update_task);
70} 70}
71 71
72#define MAX_CMDQ_ENTRIES 16384 72#define MAX_CMDQ_ENTRIES 16384
73#define MAX_CMDQ1_ENTRIES 1024 73#define MAX_CMDQ1_ENTRIES 1024
74#define MAX_RX_BUFFERS 16384 74#define MAX_RX_BUFFERS 16384
75#define MAX_RX_JUMBO_BUFFERS 16384 75#define MAX_RX_JUMBO_BUFFERS 16384
76#define MAX_TX_BUFFERS_HIGH 16384U 76#define MAX_TX_BUFFERS_HIGH 16384U
77#define MAX_TX_BUFFERS_LOW 1536U 77#define MAX_TX_BUFFERS_LOW 1536U
78#define MAX_TX_BUFFERS 1460U 78#define MAX_TX_BUFFERS 1460U
79#define MIN_FL_ENTRIES 32 79#define MIN_FL_ENTRIES 32
80 80
81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 82 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
@@ -143,7 +143,7 @@ static void link_report(struct port_info *p)
143 case SPEED_100: s = "100Mbps"; break; 143 case SPEED_100: s = "100Mbps"; break;
144 } 144 }
145 145
146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", 146 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147 p->dev->name, s, 147 p->dev->name, s,
148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); 148 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149 } 149 }
@@ -233,7 +233,7 @@ static int cxgb_up(struct adapter *adapter)
233 233
234 t1_sge_start(adapter->sge); 234 t1_sge_start(adapter->sge);
235 t1_interrupts_enable(adapter); 235 t1_interrupts_enable(adapter);
236 out_err: 236out_err:
237 return err; 237 return err;
238} 238}
239 239
@@ -454,51 +454,21 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
454 const struct cmac_statistics *s; 454 const struct cmac_statistics *s;
455 const struct sge_intr_counts *t; 455 const struct sge_intr_counts *t;
456 struct sge_port_stats ss; 456 struct sge_port_stats ss;
457 unsigned int len;
457 458
458 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); 459 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
459 460
460 *data++ = s->TxOctetsOK; 461 len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
461 *data++ = s->TxOctetsBad; 462 memcpy(data, &s->TxOctetsOK, len);
462 *data++ = s->TxUnicastFramesOK; 463 data += len;
463 *data++ = s->TxMulticastFramesOK; 464
464 *data++ = s->TxBroadcastFramesOK; 465 len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
465 *data++ = s->TxPauseFrames; 466 memcpy(data, &s->RxOctetsOK, len);
466 *data++ = s->TxFramesWithDeferredXmissions; 467 data += len;
467 *data++ = s->TxLateCollisions;
468 *data++ = s->TxTotalCollisions;
469 *data++ = s->TxFramesAbortedDueToXSCollisions;
470 *data++ = s->TxUnderrun;
471 *data++ = s->TxLengthErrors;
472 *data++ = s->TxInternalMACXmitError;
473 *data++ = s->TxFramesWithExcessiveDeferral;
474 *data++ = s->TxFCSErrors;
475
476 *data++ = s->RxOctetsOK;
477 *data++ = s->RxOctetsBad;
478 *data++ = s->RxUnicastFramesOK;
479 *data++ = s->RxMulticastFramesOK;
480 *data++ = s->RxBroadcastFramesOK;
481 *data++ = s->RxPauseFrames;
482 *data++ = s->RxFCSErrors;
483 *data++ = s->RxAlignErrors;
484 *data++ = s->RxSymbolErrors;
485 *data++ = s->RxDataErrors;
486 *data++ = s->RxSequenceErrors;
487 *data++ = s->RxRuntErrors;
488 *data++ = s->RxJabberErrors;
489 *data++ = s->RxInternalMACRcvError;
490 *data++ = s->RxInRangeLengthErrors;
491 *data++ = s->RxOutOfRangeLengthField;
492 *data++ = s->RxFrameTooLongErrors;
493 468
494 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); 469 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
495 *data++ = ss.rx_packets; 470 memcpy(data, &ss, sizeof(ss));
496 *data++ = ss.rx_cso_good; 471 data += sizeof(ss);
497 *data++ = ss.tx_packets;
498 *data++ = ss.tx_cso;
499 *data++ = ss.tx_tso;
500 *data++ = ss.vlan_xtract;
501 *data++ = ss.vlan_insert;
502 472
503 t = t1_sge_get_intr_counts(adapter->sge); 473 t = t1_sge_get_intr_counts(adapter->sge);
504 *data++ = t->rx_drops; 474 *data++ = t->rx_drops;
@@ -749,7 +719,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
749 return -EINVAL; 719 return -EINVAL;
750 720
751 if (adapter->flags & FULL_INIT_DONE) 721 if (adapter->flags & FULL_INIT_DONE)
752 return -EBUSY; 722 return -EBUSY;
753 723
754 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending; 724 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
755 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending; 725 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
@@ -764,7 +734,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
764 struct adapter *adapter = dev->priv; 734 struct adapter *adapter = dev->priv;
765 735
766 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; 736 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
767 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 737 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
768 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; 738 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
769 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); 739 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
770 return 0; 740 return 0;
@@ -782,9 +752,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
782 752
783static int get_eeprom_len(struct net_device *dev) 753static int get_eeprom_len(struct net_device *dev)
784{ 754{
785 struct adapter *adapter = dev->priv; 755 struct adapter *adapter = dev->priv;
786 756
787 return t1_is_asic(adapter) ? EEPROM_SIZE : 0; 757 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
788} 758}
789 759
790#define EEPROM_MAGIC(ap) \ 760#define EEPROM_MAGIC(ap) \
@@ -848,7 +818,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
848 u32 val; 818 u32 val;
849 819
850 if (!phy->mdio_read) 820 if (!phy->mdio_read)
851 return -EOPNOTSUPP; 821 return -EOPNOTSUPP;
852 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f, 822 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
853 &val); 823 &val);
854 data->val_out = val; 824 data->val_out = val;
@@ -860,7 +830,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
860 if (!capable(CAP_NET_ADMIN)) 830 if (!capable(CAP_NET_ADMIN))
861 return -EPERM; 831 return -EPERM;
862 if (!phy->mdio_write) 832 if (!phy->mdio_write)
863 return -EOPNOTSUPP; 833 return -EOPNOTSUPP;
864 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f, 834 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
865 data->val_in); 835 data->val_in);
866 break; 836 break;
@@ -879,9 +849,9 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
879 struct cmac *mac = adapter->port[dev->if_port].mac; 849 struct cmac *mac = adapter->port[dev->if_port].mac;
880 850
881 if (!mac->ops->set_mtu) 851 if (!mac->ops->set_mtu)
882 return -EOPNOTSUPP; 852 return -EOPNOTSUPP;
883 if (new_mtu < 68) 853 if (new_mtu < 68)
884 return -EINVAL; 854 return -EINVAL;
885 if ((ret = mac->ops->set_mtu(mac, new_mtu))) 855 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
886 return ret; 856 return ret;
887 dev->mtu = new_mtu; 857 dev->mtu = new_mtu;
@@ -1211,9 +1181,9 @@ static int __devinit init_one(struct pci_dev *pdev,
1211 1181
1212 return 0; 1182 return 0;
1213 1183
1214 out_release_adapter_res: 1184out_release_adapter_res:
1215 t1_free_sw_modules(adapter); 1185 t1_free_sw_modules(adapter);
1216 out_free_dev: 1186out_free_dev:
1217 if (adapter) { 1187 if (adapter) {
1218 if (adapter->regs) 1188 if (adapter->regs)
1219 iounmap(adapter->regs); 1189 iounmap(adapter->regs);
@@ -1222,7 +1192,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1222 free_netdev(adapter->port[i].dev); 1192 free_netdev(adapter->port[i].dev);
1223 } 1193 }
1224 pci_release_regions(pdev); 1194 pci_release_regions(pdev);
1225 out_disable_pdev: 1195out_disable_pdev:
1226 pci_disable_device(pdev); 1196 pci_disable_device(pdev);
1227 pci_set_drvdata(pdev, NULL); 1197 pci_set_drvdata(pdev, NULL);
1228 return err; 1198 return err;
@@ -1273,28 +1243,27 @@ static int t1_clock(struct adapter *adapter, int mode)
1273 int M_MEM_VAL; 1243 int M_MEM_VAL;
1274 1244
1275 enum { 1245 enum {
1276 M_CORE_BITS = 9, 1246 M_CORE_BITS = 9,
1277 T_CORE_VAL = 0, 1247 T_CORE_VAL = 0,
1278 T_CORE_BITS = 2, 1248 T_CORE_BITS = 2,
1279 N_CORE_VAL = 0, 1249 N_CORE_VAL = 0,
1280 N_CORE_BITS = 2, 1250 N_CORE_BITS = 2,
1281 M_MEM_BITS = 9, 1251 M_MEM_BITS = 9,
1282 T_MEM_VAL = 0, 1252 T_MEM_VAL = 0,
1283 T_MEM_BITS = 2, 1253 T_MEM_BITS = 2,
1284 N_MEM_VAL = 0, 1254 N_MEM_VAL = 0,
1285 N_MEM_BITS = 2, 1255 N_MEM_BITS = 2,
1286 NP_LOAD = 1 << 17, 1256 NP_LOAD = 1 << 17,
1287 S_LOAD_MEM = 1 << 5, 1257 S_LOAD_MEM = 1 << 5,
1288 S_LOAD_CORE = 1 << 6, 1258 S_LOAD_CORE = 1 << 6,
1289 S_CLOCK = 1 << 3 1259 S_CLOCK = 1 << 3
1290 }; 1260 };
1291 1261
1292 if (!t1_is_T1B(adapter)) 1262 if (!t1_is_T1B(adapter))
1293 return -ENODEV; /* Can't re-clock this chip. */ 1263 return -ENODEV; /* Can't re-clock this chip. */
1294 1264
1295 if (mode & 2) { 1265 if (mode & 2)
1296 return 0; /* show current mode. */ 1266 return 0; /* show current mode. */
1297 }
1298 1267
1299 if ((adapter->t1powersave & 1) == (mode & 1)) 1268 if ((adapter->t1powersave & 1) == (mode & 1))
1300 return -EALREADY; /* ASIC already running in mode. */ 1269 return -EALREADY; /* ASIC already running in mode. */
@@ -1386,26 +1355,26 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
1386static void __devexit remove_one(struct pci_dev *pdev) 1355static void __devexit remove_one(struct pci_dev *pdev)
1387{ 1356{
1388 struct net_device *dev = pci_get_drvdata(pdev); 1357 struct net_device *dev = pci_get_drvdata(pdev);
1358 struct adapter *adapter = dev->priv;
1359 int i;
1389 1360
1390 if (dev) { 1361 for_each_port(adapter, i) {
1391 int i; 1362 if (test_bit(i, &adapter->registered_device_map))
1392 struct adapter *adapter = dev->priv; 1363 unregister_netdev(adapter->port[i].dev);
1393 1364 }
1394 for_each_port(adapter, i)
1395 if (test_bit(i, &adapter->registered_device_map))
1396 unregister_netdev(adapter->port[i].dev);
1397 1365
1398 t1_free_sw_modules(adapter); 1366 t1_free_sw_modules(adapter);
1399 iounmap(adapter->regs); 1367 iounmap(adapter->regs);
1400 while (--i >= 0)
1401 if (adapter->port[i].dev)
1402 free_netdev(adapter->port[i].dev);
1403 1368
1404 pci_release_regions(pdev); 1369 while (--i >= 0) {
1405 pci_disable_device(pdev); 1370 if (adapter->port[i].dev)
1406 pci_set_drvdata(pdev, NULL); 1371 free_netdev(adapter->port[i].dev);
1407 t1_sw_reset(pdev);
1408 } 1372 }
1373
1374 pci_release_regions(pdev);
1375 pci_disable_device(pdev);
1376 pci_set_drvdata(pdev, NULL);
1377 t1_sw_reset(pdev);
1409} 1378}
1410 1379
1411static struct pci_driver driver = { 1380static struct pci_driver driver = {
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
index 9ebecaa97d31..eef655c827d9 100644
--- a/drivers/net/chelsio/elmer0.h
+++ b/drivers/net/chelsio/elmer0.h
@@ -46,14 +46,14 @@ enum {
46}; 46};
47 47
48/* ELMER0 registers */ 48/* ELMER0 registers */
49#define A_ELMER0_VERSION 0x100000 49#define A_ELMER0_VERSION 0x100000
50#define A_ELMER0_PHY_CFG 0x100004 50#define A_ELMER0_PHY_CFG 0x100004
51#define A_ELMER0_INT_ENABLE 0x100008 51#define A_ELMER0_INT_ENABLE 0x100008
52#define A_ELMER0_INT_CAUSE 0x10000c 52#define A_ELMER0_INT_CAUSE 0x10000c
53#define A_ELMER0_GPI_CFG 0x100010 53#define A_ELMER0_GPI_CFG 0x100010
54#define A_ELMER0_GPI_STAT 0x100014 54#define A_ELMER0_GPI_STAT 0x100014
55#define A_ELMER0_GPO 0x100018 55#define A_ELMER0_GPO 0x100018
56#define A_ELMER0_PORT0_MI1_CFG 0x400000 56#define A_ELMER0_PORT0_MI1_CFG 0x400000
57 57
58#define S_MI1_MDI_ENABLE 0 58#define S_MI1_MDI_ENABLE 0
59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE) 59#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
@@ -111,18 +111,18 @@ enum {
111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY) 111#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U) 112#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
113 113
114#define A_ELMER0_PORT1_MI1_CFG 0x500000 114#define A_ELMER0_PORT1_MI1_CFG 0x500000
115#define A_ELMER0_PORT1_MI1_ADDR 0x500004 115#define A_ELMER0_PORT1_MI1_ADDR 0x500004
116#define A_ELMER0_PORT1_MI1_DATA 0x500008 116#define A_ELMER0_PORT1_MI1_DATA 0x500008
117#define A_ELMER0_PORT1_MI1_OP 0x50000c 117#define A_ELMER0_PORT1_MI1_OP 0x50000c
118#define A_ELMER0_PORT2_MI1_CFG 0x600000 118#define A_ELMER0_PORT2_MI1_CFG 0x600000
119#define A_ELMER0_PORT2_MI1_ADDR 0x600004 119#define A_ELMER0_PORT2_MI1_ADDR 0x600004
120#define A_ELMER0_PORT2_MI1_DATA 0x600008 120#define A_ELMER0_PORT2_MI1_DATA 0x600008
121#define A_ELMER0_PORT2_MI1_OP 0x60000c 121#define A_ELMER0_PORT2_MI1_OP 0x60000c
122#define A_ELMER0_PORT3_MI1_CFG 0x700000 122#define A_ELMER0_PORT3_MI1_CFG 0x700000
123#define A_ELMER0_PORT3_MI1_ADDR 0x700004 123#define A_ELMER0_PORT3_MI1_ADDR 0x700004
124#define A_ELMER0_PORT3_MI1_DATA 0x700008 124#define A_ELMER0_PORT3_MI1_DATA 0x700008
125#define A_ELMER0_PORT3_MI1_OP 0x70000c 125#define A_ELMER0_PORT3_MI1_OP 0x70000c
126 126
127/* Simple bit definition for GPI and GP0 registers. */ 127/* Simple bit definition for GPI and GP0 registers. */
128#define ELMER0_GP_BIT0 0x0001 128#define ELMER0_GP_BIT0 0x0001
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 4192f0f5b3ee..d7c5406a6c3f 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -202,9 +202,9 @@ static void espi_setup_for_pm3393(adapter_t *adapter)
202 202
203static void espi_setup_for_vsc7321(adapter_t *adapter) 203static void espi_setup_for_vsc7321(adapter_t *adapter)
204{ 204{
205 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0); 205 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
206 writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1); 206 writel(0x1f401f4, adapter->regs + A_ESPI_SCH_TOKEN1);
207 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2); 207 writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
208 writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK); 208 writel(0xa00, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
209 writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK); 209 writel(0x1ff, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
210 writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH); 210 writel(1, adapter->regs + A_ESPI_CALENDAR_LENGTH);
@@ -247,10 +247,10 @@ int t1_espi_init(struct peespi *espi, int mac_type, int nports)
247 writel(V_OUT_OF_SYNC_COUNT(4) | 247 writel(V_OUT_OF_SYNC_COUNT(4) |
248 V_DIP2_PARITY_ERR_THRES(3) | 248 V_DIP2_PARITY_ERR_THRES(3) |
249 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL); 249 V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
250 writel(nports == 4 ? 0x200040 : 0x1000080, 250 writel(nports == 4 ? 0x200040 : 0x1000080,
251 adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); 251 adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
252 } else 252 } else
253 writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2); 253 writel(0x800100, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
254 254
255 if (mac_type == CHBT_MAC_PM3393) 255 if (mac_type == CHBT_MAC_PM3393)
256 espi_setup_for_pm3393(adapter); 256 espi_setup_for_pm3393(adapter);
@@ -301,7 +301,8 @@ void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
301{ 301{
302 struct peespi *espi = adapter->espi; 302 struct peespi *espi = adapter->espi;
303 303
304 if (!is_T2(adapter)) return; 304 if (!is_T2(adapter))
305 return;
305 spin_lock(&espi->lock); 306 spin_lock(&espi->lock);
306 espi->misc_ctrl = (val & ~MON_MASK) | 307 espi->misc_ctrl = (val & ~MON_MASK) |
307 (espi->misc_ctrl & MON_MASK); 308 (espi->misc_ctrl & MON_MASK);
@@ -340,32 +341,31 @@ u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
340 * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in 341 * compare with t1_espi_get_mon(), it reads espiInTxSop[0 ~ 3] in
341 * one shot, since there is no per port counter on the out side. 342 * one shot, since there is no per port counter on the out side.
342 */ 343 */
343int 344int t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
344t1_espi_get_mon_t204(adapter_t *adapter, u32 *valp, u8 wait)
345{ 345{
346 struct peespi *espi = adapter->espi; 346 struct peespi *espi = adapter->espi;
347 u8 i, nport = (u8)adapter->params.nports; 347 u8 i, nport = (u8)adapter->params.nports;
348 348
349 if (!wait) { 349 if (!wait) {
350 if (!spin_trylock(&espi->lock)) 350 if (!spin_trylock(&espi->lock))
351 return -1; 351 return -1;
352 } else 352 } else
353 spin_lock(&espi->lock); 353 spin_lock(&espi->lock);
354 354
355 if ( (espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION ) { 355 if ((espi->misc_ctrl & MON_MASK) != F_MONITORED_DIRECTION) {
356 espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) | 356 espi->misc_ctrl = (espi->misc_ctrl & ~MON_MASK) |
357 F_MONITORED_DIRECTION; 357 F_MONITORED_DIRECTION;
358 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); 358 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
359 } 359 }
360 for (i = 0 ; i < nport; i++, valp++) { 360 for (i = 0 ; i < nport; i++, valp++) {
361 if (i) { 361 if (i) {
362 writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i), 362 writel(espi->misc_ctrl | V_MONITORED_PORT_NUM(i),
363 adapter->regs + A_ESPI_MISC_CONTROL); 363 adapter->regs + A_ESPI_MISC_CONTROL);
364 } 364 }
365 *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3); 365 *valp = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
366 } 366 }
367 367
368 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL); 368 writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
369 spin_unlock(&espi->lock); 369 spin_unlock(&espi->lock);
370 return 0; 370 return 0;
371} 371}
diff --git a/drivers/net/chelsio/fpga_defs.h b/drivers/net/chelsio/fpga_defs.h
index 17a3c2ba36a3..ccdb2bc9ae98 100644
--- a/drivers/net/chelsio/fpga_defs.h
+++ b/drivers/net/chelsio/fpga_defs.h
@@ -98,9 +98,9 @@
98#define A_MI0_DATA_INT 0xb10 98#define A_MI0_DATA_INT 0xb10
99 99
100/* GMAC registers */ 100/* GMAC registers */
101#define A_GMAC_MACID_LO 0x28 101#define A_GMAC_MACID_LO 0x28
102#define A_GMAC_MACID_HI 0x2c 102#define A_GMAC_MACID_HI 0x2c
103#define A_GMAC_CSR 0x30 103#define A_GMAC_CSR 0x30
104 104
105#define S_INTERFACE 0 105#define S_INTERFACE 0
106#define M_INTERFACE 0x3 106#define M_INTERFACE 0x3
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
index a2b8ad9b5535..006a2eb2d362 100644
--- a/drivers/net/chelsio/gmac.h
+++ b/drivers/net/chelsio/gmac.h
@@ -42,8 +42,15 @@
42 42
43#include "common.h" 43#include "common.h"
44 44
45enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL }; 45enum {
46enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 }; 46 MAC_STATS_UPDATE_FAST,
47 MAC_STATS_UPDATE_FULL
48};
49
50enum {
51 MAC_DIRECTION_RX = 1,
52 MAC_DIRECTION_TX = 2
53};
47 54
48struct cmac_statistics { 55struct cmac_statistics {
49 /* Transmit */ 56 /* Transmit */
diff --git a/drivers/net/chelsio/ixf1010.c b/drivers/net/chelsio/ixf1010.c
index 5b8f144e83d4..10b2a9a19006 100644
--- a/drivers/net/chelsio/ixf1010.c
+++ b/drivers/net/chelsio/ixf1010.c
@@ -145,48 +145,61 @@ static void disable_port(struct cmac *mac)
145 t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val); 145 t1_tpi_write(mac->adapter, REG_PORT_ENABLE, val);
146} 146}
147 147
148#define RMON_UPDATE(mac, name, stat_name) \
149 t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
150 (mac)->stats.stat_name += val;
151
152/* 148/*
153 * Read the current values of the RMON counters and add them to the cumulative 149 * Read the current values of the RMON counters and add them to the cumulative
154 * port statistics. The HW RMON counters are cleared by this operation. 150 * port statistics. The HW RMON counters are cleared by this operation.
155 */ 151 */
156static void port_stats_update(struct cmac *mac) 152static void port_stats_update(struct cmac *mac)
157{ 153{
158 u32 val; 154 static struct {
155 unsigned int reg;
156 unsigned int offset;
157 } hw_stats[] = {
158
159#define HW_STAT(name, stat_name) \
160 { REG_##name, \
161 (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
162
163 /* Rx stats */
164 HW_STAT(RxOctetsTotalOK, RxOctetsOK),
165 HW_STAT(RxOctetsBad, RxOctetsBad),
166 HW_STAT(RxUCPkts, RxUnicastFramesOK),
167 HW_STAT(RxMCPkts, RxMulticastFramesOK),
168 HW_STAT(RxBCPkts, RxBroadcastFramesOK),
169 HW_STAT(RxJumboPkts, RxJumboFramesOK),
170 HW_STAT(RxFCSErrors, RxFCSErrors),
171 HW_STAT(RxAlignErrors, RxAlignErrors),
172 HW_STAT(RxLongErrors, RxFrameTooLongErrors),
173 HW_STAT(RxVeryLongErrors, RxFrameTooLongErrors),
174 HW_STAT(RxPauseMacControlCounter, RxPauseFrames),
175 HW_STAT(RxDataErrors, RxDataErrors),
176 HW_STAT(RxJabberErrors, RxJabberErrors),
177 HW_STAT(RxRuntErrors, RxRuntErrors),
178 HW_STAT(RxShortErrors, RxRuntErrors),
179 HW_STAT(RxSequenceErrors, RxSequenceErrors),
180 HW_STAT(RxSymbolErrors, RxSymbolErrors),
181
182 /* Tx stats (skip collision stats as we are full-duplex only) */
183 HW_STAT(TxOctetsTotalOK, TxOctetsOK),
184 HW_STAT(TxOctetsBad, TxOctetsBad),
185 HW_STAT(TxUCPkts, TxUnicastFramesOK),
186 HW_STAT(TxMCPkts, TxMulticastFramesOK),
187 HW_STAT(TxBCPkts, TxBroadcastFramesOK),
188 HW_STAT(TxJumboPkts, TxJumboFramesOK),
189 HW_STAT(TxPauseFrames, TxPauseFrames),
190 HW_STAT(TxExcessiveLengthDrop, TxLengthErrors),
191 HW_STAT(TxUnderrun, TxUnderrun),
192 HW_STAT(TxCRCErrors, TxFCSErrors)
193 }, *p = hw_stats;
194 u64 *stats = (u64 *) &mac->stats;
195 unsigned int i;
196
197 for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
198 u32 val;
159 199
160 /* Rx stats */ 200 t1_tpi_read(mac->adapter, MACREG(mac, p->reg), &val);
161 RMON_UPDATE(mac, RxOctetsTotalOK, RxOctetsOK); 201 stats[p->offset] += val;
162 RMON_UPDATE(mac, RxOctetsBad, RxOctetsBad); 202 }
163 RMON_UPDATE(mac, RxUCPkts, RxUnicastFramesOK);
164 RMON_UPDATE(mac, RxMCPkts, RxMulticastFramesOK);
165 RMON_UPDATE(mac, RxBCPkts, RxBroadcastFramesOK);
166 RMON_UPDATE(mac, RxJumboPkts, RxJumboFramesOK);
167 RMON_UPDATE(mac, RxFCSErrors, RxFCSErrors);
168 RMON_UPDATE(mac, RxAlignErrors, RxAlignErrors);
169 RMON_UPDATE(mac, RxLongErrors, RxFrameTooLongErrors);
170 RMON_UPDATE(mac, RxVeryLongErrors, RxFrameTooLongErrors);
171 RMON_UPDATE(mac, RxPauseMacControlCounter, RxPauseFrames);
172 RMON_UPDATE(mac, RxDataErrors, RxDataErrors);
173 RMON_UPDATE(mac, RxJabberErrors, RxJabberErrors);
174 RMON_UPDATE(mac, RxRuntErrors, RxRuntErrors);
175 RMON_UPDATE(mac, RxShortErrors, RxRuntErrors);
176 RMON_UPDATE(mac, RxSequenceErrors, RxSequenceErrors);
177 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
178
179 /* Tx stats (skip collision stats as we are full-duplex only) */
180 RMON_UPDATE(mac, TxOctetsTotalOK, TxOctetsOK);
181 RMON_UPDATE(mac, TxOctetsBad, TxOctetsBad);
182 RMON_UPDATE(mac, TxUCPkts, TxUnicastFramesOK);
183 RMON_UPDATE(mac, TxMCPkts, TxMulticastFramesOK);
184 RMON_UPDATE(mac, TxBCPkts, TxBroadcastFramesOK);
185 RMON_UPDATE(mac, TxJumboPkts, TxJumboFramesOK);
186 RMON_UPDATE(mac, TxPauseFrames, TxPauseFrames);
187 RMON_UPDATE(mac, TxExcessiveLengthDrop, TxLengthErrors);
188 RMON_UPDATE(mac, TxUnderrun, TxUnderrun);
189 RMON_UPDATE(mac, TxCRCErrors, TxFCSErrors);
190} 203}
191 204
192/* No-op interrupt operation as this MAC does not support interrupts */ 205/* No-op interrupt operation as this MAC does not support interrupts */
@@ -273,7 +286,8 @@ static int mac_set_rx_mode(struct cmac *mac, struct t1_rx_mode *rm)
273static int mac_set_mtu(struct cmac *mac, int mtu) 286static int mac_set_mtu(struct cmac *mac, int mtu)
274{ 287{
275 /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */ 288 /* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't */
276 if (mtu > (MAX_FRAME_SIZE - 14 - 4)) return -EINVAL; 289 if (mtu > (MAX_FRAME_SIZE - 14 - 4))
290 return -EINVAL;
277 t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE), 291 t1_tpi_write(mac->adapter, MACREG(mac, REG_MAX_FRAME_SIZE),
278 mtu + 14 + 4); 292 mtu + 14 + 4);
279 return 0; 293 return 0;
@@ -357,8 +371,8 @@ static void enable_port(struct cmac *mac)
357 val |= (1 << index); 371 val |= (1 << index);
358 t1_tpi_write(adapter, REG_PORT_ENABLE, val); 372 t1_tpi_write(adapter, REG_PORT_ENABLE, val);
359 373
360 index <<= 2; 374 index <<= 2;
361 if (is_T2(adapter)) { 375 if (is_T2(adapter)) {
362 /* T204: set the Fifo water level & threshold */ 376 /* T204: set the Fifo water level & threshold */
363 t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740); 377 t1_tpi_write(adapter, RX_FIFO_HIGH_WATERMARK_BASE + index, 0x740);
364 t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730); 378 t1_tpi_write(adapter, RX_FIFO_LOW_WATERMARK_BASE + index, 0x730);
@@ -389,6 +403,10 @@ static int mac_disable(struct cmac *mac, int which)
389 return 0; 403 return 0;
390} 404}
391 405
406#define RMON_UPDATE(mac, name, stat_name) \
407 t1_tpi_read((mac)->adapter, MACREG(mac, REG_##name), &val); \
408 (mac)->stats.stat_name += val;
409
392/* 410/*
393 * This function is called periodically to accumulate the current values of the 411 * This function is called periodically to accumulate the current values of the
394 * RMON counters into the port statistics. Since the counters are only 32 bits 412 * RMON counters into the port statistics. Since the counters are only 32 bits
@@ -460,10 +478,12 @@ static struct cmac *ixf1010_mac_create(adapter_t *adapter, int index)
460 struct cmac *mac; 478 struct cmac *mac;
461 u32 val; 479 u32 val;
462 480
463 if (index > 9) return NULL; 481 if (index > 9)
482 return NULL;
464 483
465 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); 484 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
466 if (!mac) return NULL; 485 if (!mac)
486 return NULL;
467 487
468 mac->ops = &ixf1010_ops; 488 mac->ops = &ixf1010_ops;
469 mac->instance = (cmac_instance *)(mac + 1); 489 mac->instance = (cmac_instance *)(mac + 1);
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
index 28ac93ff7c4f..5867e3b0a887 100644
--- a/drivers/net/chelsio/mv88e1xxx.c
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -73,9 +73,8 @@ static int mv88e1xxx_interrupt_enable(struct cphy *cphy)
73 73
74 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 74 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
75 elmer |= ELMER0_GP_BIT1; 75 elmer |= ELMER0_GP_BIT1;
76 if (is_T2(cphy->adapter)) { 76 if (is_T2(cphy->adapter))
77 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 77 elmer |= ELMER0_GP_BIT2 | ELMER0_GP_BIT3 | ELMER0_GP_BIT4;
78 }
79 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 78 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
80 } 79 }
81 return 0; 80 return 0;
@@ -92,9 +91,8 @@ static int mv88e1xxx_interrupt_disable(struct cphy *cphy)
92 91
93 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 92 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
94 elmer &= ~ELMER0_GP_BIT1; 93 elmer &= ~ELMER0_GP_BIT1;
95 if (is_T2(cphy->adapter)) { 94 if (is_T2(cphy->adapter))
96 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); 95 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
97 }
98 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 96 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
99 } 97 }
100 return 0; 98 return 0;
@@ -112,9 +110,8 @@ static int mv88e1xxx_interrupt_clear(struct cphy *cphy)
112 if (t1_is_asic(cphy->adapter)) { 110 if (t1_is_asic(cphy->adapter)) {
113 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 111 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
114 elmer |= ELMER0_GP_BIT1; 112 elmer |= ELMER0_GP_BIT1;
115 if (is_T2(cphy->adapter)) { 113 if (is_T2(cphy->adapter))
116 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 114 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
117 }
118 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); 115 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
119 } 116 }
120 return 0; 117 return 0;
@@ -300,7 +297,7 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
300 297
301 /* 298 /*
302 * Loop until cause reads zero. Need to handle bouncing interrupts. 299 * Loop until cause reads zero. Need to handle bouncing interrupts.
303 */ 300 */
304 while (1) { 301 while (1) {
305 u32 cause; 302 u32 cause;
306 303
@@ -308,15 +305,16 @@ static int mv88e1xxx_interrupt_handler(struct cphy *cphy)
308 MV88E1XXX_INTERRUPT_STATUS_REGISTER, 305 MV88E1XXX_INTERRUPT_STATUS_REGISTER,
309 &cause); 306 &cause);
310 cause &= INTR_ENABLE_MASK; 307 cause &= INTR_ENABLE_MASK;
311 if (!cause) break; 308 if (!cause)
309 break;
312 310
313 if (cause & MV88E1XXX_INTR_LINK_CHNG) { 311 if (cause & MV88E1XXX_INTR_LINK_CHNG) {
314 (void) simple_mdio_read(cphy, 312 (void) simple_mdio_read(cphy,
315 MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status); 313 MV88E1XXX_SPECIFIC_STATUS_REGISTER, &status);
316 314
317 if (status & MV88E1XXX_INTR_LINK_CHNG) { 315 if (status & MV88E1XXX_INTR_LINK_CHNG)
318 cphy->state |= PHY_LINK_UP; 316 cphy->state |= PHY_LINK_UP;
319 } else { 317 else {
320 cphy->state &= ~PHY_LINK_UP; 318 cphy->state &= ~PHY_LINK_UP;
321 if (cphy->state & PHY_AUTONEG_EN) 319 if (cphy->state & PHY_AUTONEG_EN)
322 cphy->state &= ~PHY_AUTONEG_RDY; 320 cphy->state &= ~PHY_AUTONEG_RDY;
@@ -360,7 +358,8 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
360{ 358{
361 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); 359 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
362 360
363 if (!cphy) return NULL; 361 if (!cphy)
362 return NULL;
364 363
365 cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops); 364 cphy_init(cphy, adapter, phy_addr, &mv88e1xxx_ops, mdio_ops);
366 365
@@ -377,11 +376,11 @@ static struct cphy *mv88e1xxx_phy_create(adapter_t *adapter, int phy_addr,
377 } 376 }
378 (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */ 377 (void) mv88e1xxx_downshift_set(cphy, 1); /* Enable downshift */
379 378
380 /* LED */ 379 /* LED */
381 if (is_T2(adapter)) { 380 if (is_T2(adapter)) {
382 (void) simple_mdio_write(cphy, 381 (void) simple_mdio_write(cphy,
383 MV88E1XXX_LED_CONTROL_REGISTER, 0x1); 382 MV88E1XXX_LED_CONTROL_REGISTER, 0x1);
384 } 383 }
385 384
386 return cphy; 385 return cphy;
387} 386}
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 82fed1dd5005..87dde3e60046 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -10,25 +10,25 @@ static int my3126_reset(struct cphy *cphy, int wait)
10 * This can be done through registers. It is not required since 10 * This can be done through registers. It is not required since
11 * a full chip reset is used. 11 * a full chip reset is used.
12 */ 12 */
13 return (0); 13 return 0;
14} 14}
15 15
16static int my3126_interrupt_enable(struct cphy *cphy) 16static int my3126_interrupt_enable(struct cphy *cphy)
17{ 17{
18 schedule_delayed_work(&cphy->phy_update, HZ/30); 18 schedule_delayed_work(&cphy->phy_update, HZ/30);
19 t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo); 19 t1_tpi_read(cphy->adapter, A_ELMER0_GPO, &cphy->elmer_gpo);
20 return (0); 20 return 0;
21} 21}
22 22
23static int my3126_interrupt_disable(struct cphy *cphy) 23static int my3126_interrupt_disable(struct cphy *cphy)
24{ 24{
25 cancel_rearming_delayed_work(&cphy->phy_update); 25 cancel_rearming_delayed_work(&cphy->phy_update);
26 return (0); 26 return 0;
27} 27}
28 28
29static int my3126_interrupt_clear(struct cphy *cphy) 29static int my3126_interrupt_clear(struct cphy *cphy)
30{ 30{
31 return (0); 31 return 0;
32} 32}
33 33
34#define OFFSET(REG_ADDR) (REG_ADDR << 2) 34#define OFFSET(REG_ADDR) (REG_ADDR << 2)
@@ -102,7 +102,7 @@ static void my3216_poll(struct work_struct *work)
102 102
103static int my3126_set_loopback(struct cphy *cphy, int on) 103static int my3126_set_loopback(struct cphy *cphy, int on)
104{ 104{
105 return (0); 105 return 0;
106} 106}
107 107
108/* To check the activity LED */ 108/* To check the activity LED */
@@ -146,7 +146,7 @@ static int my3126_get_link_status(struct cphy *cphy,
146 if (fc) 146 if (fc)
147 *fc = PAUSE_RX | PAUSE_TX; 147 *fc = PAUSE_RX | PAUSE_TX;
148 148
149 return (0); 149 return 0;
150} 150}
151 151
152static void my3126_destroy(struct cphy *cphy) 152static void my3126_destroy(struct cphy *cphy)
@@ -177,7 +177,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
178 cphy->bmsr = 0; 178 cphy->bmsr = 0;
179 179
180 return (cphy); 180 return cphy;
181} 181}
182 182
183/* Chip Reset */ 183/* Chip Reset */
@@ -198,7 +198,7 @@ static int my3126_phy_reset(adapter_t * adapter)
198 val |= 0x8000; 198 val |= 0x8000;
199 t1_tpi_write(adapter, A_ELMER0_GPO, val); 199 t1_tpi_write(adapter, A_ELMER0_GPO, val);
200 udelay(100); 200 udelay(100);
201 return (0); 201 return 0;
202} 202}
203 203
204struct gphy t1_my3126_ops = { 204struct gphy t1_my3126_ops = {
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 63cabeb98afe..69129edeefd6 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -446,17 +446,51 @@ static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
446 *val += 1ull << 40; 446 *val += 1ull << 40;
447} 447}
448 448
449#define RMON_UPDATE(mac, name, stat_name) \
450 pm3393_rmon_update((mac)->adapter, OFFSET(name), \
451 &(mac)->stats.stat_name, \
452 (ro &((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)))
453
454
455static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, 449static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
456 int flag) 450 int flag)
457{ 451{
458 u64 ro; 452 static struct {
459 u32 val0, val1, val2, val3; 453 unsigned int reg;
454 unsigned int offset;
455 } hw_stats [] = {
456
457#define HW_STAT(name, stat_name) \
458 { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
459
460 /* Rx stats */
461 HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
462 HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
463 HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
464 HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
465 HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
466 HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
467 HW_STAT(RxFramesLostDueToInternalMACErrors,
468 RxInternalMACRcvError),
469 HW_STAT(RxSymbolErrors, RxSymbolErrors),
470 HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
471 HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
472 HW_STAT(RxJabbers, RxJabberErrors),
473 HW_STAT(RxFragments, RxRuntErrors),
474 HW_STAT(RxUndersizedFrames, RxRuntErrors),
475 HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
476 HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
477
478 /* Tx stats */
479 HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
480 HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
481 TxInternalMACXmitError),
482 HW_STAT(TxTransmitSystemError, TxFCSErrors),
483 HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
484 HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
485 HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
486 HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
487 HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
488 HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
489 }, *p = hw_stats;
490 u64 ro;
491 u32 val0, val1, val2, val3;
492 u64 *stats = (u64 *) &mac->stats;
493 unsigned int i;
460 494
461 /* Snap the counters */ 495 /* Snap the counters */
462 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, 496 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
@@ -470,35 +504,14 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
470 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | 504 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
471 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); 505 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
472 506
473 /* Rx stats */ 507 for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
474 RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); 508 unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
475 RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); 509
476 RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); 510 pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
477 RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); 511 stats + p->offset, ro & (reg >> 2));
478 RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); 512 }
479 RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); 513
480 RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, 514
481 RxInternalMACRcvError);
482 RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
483 RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
484 RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
485 RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
486 RMON_UPDATE(mac, RxFragments, RxRuntErrors);
487 RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
488 RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
489 RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
490
491 /* Tx stats */
492 RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
493 RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
494 TxInternalMACXmitError);
495 RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
496 RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
497 RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
498 RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
499 RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
500 RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
501 RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
502 515
503 return &mac->stats; 516 return &mac->stats;
504} 517}
@@ -534,9 +547,9 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
534 /* Store local copy */ 547 /* Store local copy */
535 memcpy(cmac->instance->mac_addr, ma, 6); 548 memcpy(cmac->instance->mac_addr, ma, 6);
536 549
537 lo = ((u32) ma[1] << 8) | (u32) ma[0]; 550 lo = ((u32) ma[1] << 8) | (u32) ma[0];
538 mid = ((u32) ma[3] << 8) | (u32) ma[2]; 551 mid = ((u32) ma[3] << 8) | (u32) ma[2];
539 hi = ((u32) ma[5] << 8) | (u32) ma[4]; 552 hi = ((u32) ma[5] << 8) | (u32) ma[4];
540 553
541 /* Disable Rx/Tx MAC before configuring it. */ 554 /* Disable Rx/Tx MAC before configuring it. */
542 if (enabled) 555 if (enabled)
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 659cb2252e44..89a682702fa9 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -71,12 +71,9 @@
71#define SGE_FREEL_REFILL_THRESH 16 71#define SGE_FREEL_REFILL_THRESH 16
72#define SGE_RESPQ_E_N 1024 72#define SGE_RESPQ_E_N 1024
73#define SGE_INTRTIMER_NRES 1000 73#define SGE_INTRTIMER_NRES 1000
74#define SGE_RX_COPY_THRES 256
75#define SGE_RX_SM_BUF_SIZE 1536 74#define SGE_RX_SM_BUF_SIZE 1536
76#define SGE_TX_DESC_MAX_PLEN 16384 75#define SGE_TX_DESC_MAX_PLEN 16384
77 76
78# define SGE_RX_DROP_THRES 2
79
80#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 77#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81 78
82/* 79/*
@@ -85,10 +82,6 @@
85 */ 82 */
86#define TX_RECLAIM_PERIOD (HZ / 4) 83#define TX_RECLAIM_PERIOD (HZ / 4)
87 84
88#ifndef NET_IP_ALIGN
89# define NET_IP_ALIGN 2
90#endif
91
92#define M_CMD_LEN 0x7fffffff 85#define M_CMD_LEN 0x7fffffff
93#define V_CMD_LEN(v) (v) 86#define V_CMD_LEN(v) (v)
94#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 87#define G_CMD_LEN(v) ((v) & M_CMD_LEN)
@@ -195,7 +188,7 @@ struct cmdQ {
195 struct cmdQ_e *entries; /* HW command descriptor Q */ 188 struct cmdQ_e *entries; /* HW command descriptor Q */
196 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 189 struct cmdQ_ce *centries; /* SW command context descriptor Q */
197 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
198 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
199}; 192};
200 193
201struct freelQ { 194struct freelQ {
@@ -241,9 +234,9 @@ struct sched_port {
241/* Per T204 device */ 234/* Per T204 device */
242struct sched { 235struct sched {
243 ktime_t last_updated; /* last time quotas were computed */ 236 ktime_t last_updated; /* last time quotas were computed */
244 unsigned int max_avail; /* max bits to be sent to any port */ 237 unsigned int max_avail; /* max bits to be sent to any port */
245 unsigned int port; /* port index (round robin ports) */ 238 unsigned int port; /* port index (round robin ports) */
246 unsigned int num; /* num skbs in per port queues */ 239 unsigned int num; /* num skbs in per port queues */
247 struct sched_port p[MAX_NPORTS]; 240 struct sched_port p[MAX_NPORTS];
248 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
249}; 242};
@@ -259,10 +252,10 @@ static void restart_sched(unsigned long);
259 * contention. 252 * contention.
260 */ 253 */
261struct sge { 254struct sge {
262 struct adapter *adapter; /* adapter backpointer */ 255 struct adapter *adapter; /* adapter backpointer */
263 struct net_device *netdev; /* netdevice backpointer */ 256 struct net_device *netdev; /* netdevice backpointer */
264 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
265 struct respQ respQ; /* response Q */ 258 struct respQ respQ; /* response Q */
266 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
267 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
268 unsigned int jumbo_fl; /* jumbo freelist Q index */ 261 unsigned int jumbo_fl; /* jumbo freelist Q index */
@@ -460,7 +453,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
460 if (credits < MAX_SKB_FRAGS + 1) 453 if (credits < MAX_SKB_FRAGS + 1)
461 goto out; 454 goto out;
462 455
463 again: 456again:
464 for (i = 0; i < MAX_NPORTS; i++) { 457 for (i = 0; i < MAX_NPORTS; i++) {
465 s->port = ++s->port & (MAX_NPORTS - 1); 458 s->port = ++s->port & (MAX_NPORTS - 1);
466 skbq = &s->p[s->port].skbq; 459 skbq = &s->p[s->port].skbq;
@@ -483,8 +476,8 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
483 if (update-- && sched_update_avail(sge)) 476 if (update-- && sched_update_avail(sge))
484 goto again; 477 goto again;
485 478
486 out: 479out:
487 /* If there are more pending skbs, we use the hardware to schedule us 480 /* If there are more pending skbs, we use the hardware to schedule us
488 * again. 481 * again.
489 */ 482 */
490 if (s->num && !skb) { 483 if (s->num && !skb) {
@@ -575,11 +568,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
575 q->size = p->freelQ_size[i]; 568 q->size = p->freelQ_size[i];
576 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 569 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
577 size = sizeof(struct freelQ_e) * q->size; 570 size = sizeof(struct freelQ_e) * q->size;
578 q->entries = (struct freelQ_e *) 571 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
579 pci_alloc_consistent(pdev, size, &q->dma_addr);
580 if (!q->entries) 572 if (!q->entries)
581 goto err_no_mem; 573 goto err_no_mem;
582 memset(q->entries, 0, size); 574
583 size = sizeof(struct freelQ_ce) * q->size; 575 size = sizeof(struct freelQ_ce) * q->size;
584 q->centries = kzalloc(size, GFP_KERNEL); 576 q->centries = kzalloc(size, GFP_KERNEL);
585 if (!q->centries) 577 if (!q->centries)
@@ -613,11 +605,10 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
613 sge->respQ.size = SGE_RESPQ_E_N; 605 sge->respQ.size = SGE_RESPQ_E_N;
614 sge->respQ.credits = 0; 606 sge->respQ.credits = 0;
615 size = sizeof(struct respQ_e) * sge->respQ.size; 607 size = sizeof(struct respQ_e) * sge->respQ.size;
616 sge->respQ.entries = (struct respQ_e *) 608 sge->respQ.entries =
617 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 609 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
618 if (!sge->respQ.entries) 610 if (!sge->respQ.entries)
619 goto err_no_mem; 611 goto err_no_mem;
620 memset(sge->respQ.entries, 0, size);
621 return 0; 612 return 0;
622 613
623err_no_mem: 614err_no_mem:
@@ -637,20 +628,12 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
637 q->in_use -= n; 628 q->in_use -= n;
638 ce = &q->centries[cidx]; 629 ce = &q->centries[cidx];
639 while (n--) { 630 while (n--) {
640 if (q->sop) { 631 if (likely(pci_unmap_len(ce, dma_len))) {
641 if (likely(pci_unmap_len(ce, dma_len))) { 632 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
642 pci_unmap_single(pdev, 633 pci_unmap_len(ce, dma_len),
643 pci_unmap_addr(ce, dma_addr), 634 PCI_DMA_TODEVICE);
644 pci_unmap_len(ce, dma_len), 635 if (q->sop)
645 PCI_DMA_TODEVICE);
646 q->sop = 0; 636 q->sop = 0;
647 }
648 } else {
649 if (likely(pci_unmap_len(ce, dma_len))) {
650 pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
651 pci_unmap_len(ce, dma_len),
652 PCI_DMA_TODEVICE);
653 }
654 } 637 }
655 if (ce->skb) { 638 if (ce->skb) {
656 dev_kfree_skb_any(ce->skb); 639 dev_kfree_skb_any(ce->skb);
@@ -711,11 +694,10 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
711 q->stop_thres = 0; 694 q->stop_thres = 0;
712 spin_lock_init(&q->lock); 695 spin_lock_init(&q->lock);
713 size = sizeof(struct cmdQ_e) * q->size; 696 size = sizeof(struct cmdQ_e) * q->size;
714 q->entries = (struct cmdQ_e *) 697 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
715 pci_alloc_consistent(pdev, size, &q->dma_addr);
716 if (!q->entries) 698 if (!q->entries)
717 goto err_no_mem; 699 goto err_no_mem;
718 memset(q->entries, 0, size); 700
719 size = sizeof(struct cmdQ_ce) * q->size; 701 size = sizeof(struct cmdQ_ce) * q->size;
720 q->centries = kzalloc(size, GFP_KERNEL); 702 q->centries = kzalloc(size, GFP_KERNEL);
721 if (!q->centries) 703 if (!q->centries)
@@ -770,7 +752,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
770static void configure_sge(struct sge *sge, struct sge_params *p) 752static void configure_sge(struct sge *sge, struct sge_params *p)
771{ 753{
772 struct adapter *ap = sge->adapter; 754 struct adapter *ap = sge->adapter;
773 755
774 writel(0, ap->regs + A_SG_CONTROL); 756 writel(0, ap->regs + A_SG_CONTROL);
775 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 757 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
776 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 758 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
@@ -850,7 +832,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
850 struct freelQ_e *e = &q->entries[q->pidx]; 832 struct freelQ_e *e = &q->entries[q->pidx];
851 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 833 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
852 834
853
854 while (q->credits < q->size) { 835 while (q->credits < q->size) {
855 struct sk_buff *skb; 836 struct sk_buff *skb;
856 dma_addr_t mapping; 837 dma_addr_t mapping;
@@ -862,6 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
862 skb_reserve(skb, q->dma_offset); 843 skb_reserve(skb, q->dma_offset);
863 mapping = pci_map_single(pdev, skb->data, dma_len, 844 mapping = pci_map_single(pdev, skb->data, dma_len,
864 PCI_DMA_FROMDEVICE); 845 PCI_DMA_FROMDEVICE);
846 skb_reserve(skb, sge->rx_pkt_pad);
847
865 ce->skb = skb; 848 ce->skb = skb;
866 pci_unmap_addr_set(ce, dma_addr, mapping); 849 pci_unmap_addr_set(ce, dma_addr, mapping);
867 pci_unmap_len_set(ce, dma_len, dma_len); 850 pci_unmap_len_set(ce, dma_len, dma_len);
@@ -881,7 +864,6 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
881 } 864 }
882 q->credits++; 865 q->credits++;
883 } 866 }
884
885} 867}
886 868
887/* 869/*
@@ -1041,6 +1023,10 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
1041 } 1023 }
1042} 1024}
1043 1025
1026static int copybreak __read_mostly = 256;
1027module_param(copybreak, int, 0);
1028MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1029
1044/** 1030/**
1045 * get_packet - return the next ingress packet buffer 1031 * get_packet - return the next ingress packet buffer
1046 * @pdev: the PCI device that received the packet 1032 * @pdev: the PCI device that received the packet
@@ -1060,45 +1046,42 @@ static void recycle_fl_buf(struct freelQ *fl, int idx)
1060 * be copied but there is no memory for the copy. 1046 * be copied but there is no memory for the copy.
1061 */ 1047 */
1062static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1048static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1063 struct freelQ *fl, unsigned int len, 1049 struct freelQ *fl, unsigned int len)
1064 int dma_pad, int skb_pad,
1065 unsigned int copy_thres,
1066 unsigned int drop_thres)
1067{ 1050{
1068 struct sk_buff *skb; 1051 struct sk_buff *skb;
1069 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1052 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1070 1053
1071 if (len < copy_thres) { 1054 if (len < copybreak) {
1072 skb = alloc_skb(len + skb_pad, GFP_ATOMIC); 1055 skb = alloc_skb(len + 2, GFP_ATOMIC);
1073 if (likely(skb != NULL)) { 1056 if (!skb)
1074 skb_reserve(skb, skb_pad);
1075 skb_put(skb, len);
1076 pci_dma_sync_single_for_cpu(pdev,
1077 pci_unmap_addr(ce, dma_addr),
1078 pci_unmap_len(ce, dma_len),
1079 PCI_DMA_FROMDEVICE);
1080 memcpy(skb->data, ce->skb->data + dma_pad, len);
1081 pci_dma_sync_single_for_device(pdev,
1082 pci_unmap_addr(ce, dma_addr),
1083 pci_unmap_len(ce, dma_len),
1084 PCI_DMA_FROMDEVICE);
1085 } else if (!drop_thres)
1086 goto use_orig_buf; 1057 goto use_orig_buf;
1087 1058
1059 skb_reserve(skb, 2); /* align IP header */
1060 skb_put(skb, len);
1061 pci_dma_sync_single_for_cpu(pdev,
1062 pci_unmap_addr(ce, dma_addr),
1063 pci_unmap_len(ce, dma_len),
1064 PCI_DMA_FROMDEVICE);
1065 memcpy(skb->data, ce->skb->data, len);
1066 pci_dma_sync_single_for_device(pdev,
1067 pci_unmap_addr(ce, dma_addr),
1068 pci_unmap_len(ce, dma_len),
1069 PCI_DMA_FROMDEVICE);
1088 recycle_fl_buf(fl, fl->cidx); 1070 recycle_fl_buf(fl, fl->cidx);
1089 return skb; 1071 return skb;
1090 } 1072 }
1091 1073
1092 if (fl->credits < drop_thres) { 1074use_orig_buf:
1075 if (fl->credits < 2) {
1093 recycle_fl_buf(fl, fl->cidx); 1076 recycle_fl_buf(fl, fl->cidx);
1094 return NULL; 1077 return NULL;
1095 } 1078 }
1096 1079
1097use_orig_buf:
1098 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 1080 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
1099 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1081 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1100 skb = ce->skb; 1082 skb = ce->skb;
1101 skb_reserve(skb, dma_pad); 1083 prefetch(skb->data);
1084
1102 skb_put(skb, len); 1085 skb_put(skb, len);
1103 return skb; 1086 return skb;
1104} 1087}
@@ -1137,6 +1120,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1137static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1120static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1138{ 1121{
1139 unsigned int count = 0; 1122 unsigned int count = 0;
1123
1140 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1141 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1142 unsigned int i, len = skb->len - skb->data_len; 1126 unsigned int i, len = skb->len - skb->data_len;
@@ -1343,7 +1327,7 @@ static void restart_sched(unsigned long arg)
1343 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1327 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1344 unsigned int genbit, pidx, count; 1328 unsigned int genbit, pidx, count;
1345 count = 1 + skb_shinfo(skb)->nr_frags; 1329 count = 1 + skb_shinfo(skb)->nr_frags;
1346 count += compute_large_page_tx_descs(skb); 1330 count += compute_large_page_tx_descs(skb);
1347 q->in_use += count; 1331 q->in_use += count;
1348 genbit = q->genbit; 1332 genbit = q->genbit;
1349 pidx = q->pidx; 1333 pidx = q->pidx;
@@ -1375,27 +1359,25 @@ static void restart_sched(unsigned long arg)
1375 * 1359 *
1376 * Process an ingress ethernet pakcet and deliver it to the stack. 1360 * Process an ingress ethernet pakcet and deliver it to the stack.
1377 */ 1361 */
1378static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1362static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1379{ 1363{
1380 struct sk_buff *skb; 1364 struct sk_buff *skb;
1381 struct cpl_rx_pkt *p; 1365 const struct cpl_rx_pkt *p;
1382 struct adapter *adapter = sge->adapter; 1366 struct adapter *adapter = sge->adapter;
1383 struct sge_port_stats *st; 1367 struct sge_port_stats *st;
1384 1368
1385 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad, 1369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1386 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
1387 SGE_RX_DROP_THRES);
1388 if (unlikely(!skb)) { 1370 if (unlikely(!skb)) {
1389 sge->stats.rx_drops++; 1371 sge->stats.rx_drops++;
1390 return 0; 1372 return;
1391 } 1373 }
1392 1374
1393 p = (struct cpl_rx_pkt *)skb->data; 1375 p = (const struct cpl_rx_pkt *) skb->data;
1394 skb_pull(skb, sizeof(*p));
1395 if (p->iff >= adapter->params.nports) { 1376 if (p->iff >= adapter->params.nports) {
1396 kfree_skb(skb); 1377 kfree_skb(skb);
1397 return 0; 1378 return;
1398 } 1379 }
1380 __skb_pull(skb, sizeof(*p));
1399 1381
1400 skb->dev = adapter->port[p->iff].dev; 1382 skb->dev = adapter->port[p->iff].dev;
1401 skb->dev->last_rx = jiffies; 1383 skb->dev->last_rx = jiffies;
@@ -1427,7 +1409,6 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1427 netif_rx(skb); 1409 netif_rx(skb);
1428#endif 1410#endif
1429 } 1411 }
1430 return 0;
1431} 1412}
1432 1413
1433/* 1414/*
@@ -1448,29 +1429,28 @@ static inline int enough_free_Tx_descs(const struct cmdQ *q)
1448static void restart_tx_queues(struct sge *sge) 1429static void restart_tx_queues(struct sge *sge)
1449{ 1430{
1450 struct adapter *adap = sge->adapter; 1431 struct adapter *adap = sge->adapter;
1432 int i;
1451 1433
1452 if (enough_free_Tx_descs(&sge->cmdQ[0])) { 1434 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1453 int i; 1435 return;
1454 1436
1455 for_each_port(adap, i) { 1437 for_each_port(adap, i) {
1456 struct net_device *nd = adap->port[i].dev; 1438 struct net_device *nd = adap->port[i].dev;
1457 1439
1458 if (test_and_clear_bit(nd->if_port, 1440 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1459 &sge->stopped_tx_queues) && 1441 netif_running(nd)) {
1460 netif_running(nd)) { 1442 sge->stats.cmdQ_restarted[2]++;
1461 sge->stats.cmdQ_restarted[2]++; 1443 netif_wake_queue(nd);
1462 netif_wake_queue(nd);
1463 }
1464 } 1444 }
1465 } 1445 }
1466} 1446}
1467 1447
1468/* 1448/*
1469 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1449 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1470 * information. 1450 * information.
1471 */ 1451 */
1472static unsigned int update_tx_info(struct adapter *adapter, 1452static unsigned int update_tx_info(struct adapter *adapter,
1473 unsigned int flags, 1453 unsigned int flags,
1474 unsigned int pr0) 1454 unsigned int pr0)
1475{ 1455{
1476 struct sge *sge = adapter->sge; 1456 struct sge *sge = adapter->sge;
@@ -1510,29 +1490,30 @@ static int process_responses(struct adapter *adapter, int budget)
1510 struct sge *sge = adapter->sge; 1490 struct sge *sge = adapter->sge;
1511 struct respQ *q = &sge->respQ; 1491 struct respQ *q = &sge->respQ;
1512 struct respQ_e *e = &q->entries[q->cidx]; 1492 struct respQ_e *e = &q->entries[q->cidx];
1513 int budget_left = budget; 1493 int done = 0;
1514 unsigned int flags = 0; 1494 unsigned int flags = 0;
1515 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1495 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1516
1517 1496
1518 while (likely(budget_left && e->GenerationBit == q->genbit)) { 1497 while (done < budget && e->GenerationBit == q->genbit) {
1519 flags |= e->Qsleeping; 1498 flags |= e->Qsleeping;
1520 1499
1521 cmdq_processed[0] += e->Cmdq0CreditReturn; 1500 cmdq_processed[0] += e->Cmdq0CreditReturn;
1522 cmdq_processed[1] += e->Cmdq1CreditReturn; 1501 cmdq_processed[1] += e->Cmdq1CreditReturn;
1523 1502
1524 /* We batch updates to the TX side to avoid cacheline 1503 /* We batch updates to the TX side to avoid cacheline
1525 * ping-pong of TX state information on MP where the sender 1504 * ping-pong of TX state information on MP where the sender
1526 * might run on a different CPU than this function... 1505 * might run on a different CPU than this function...
1527 */ 1506 */
1528 if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) { 1507 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1529 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1508 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1530 cmdq_processed[0] = 0; 1509 cmdq_processed[0] = 0;
1531 } 1510 }
1511
1532 if (unlikely(cmdq_processed[1] > 16)) { 1512 if (unlikely(cmdq_processed[1] > 16)) {
1533 sge->cmdQ[1].processed += cmdq_processed[1]; 1513 sge->cmdQ[1].processed += cmdq_processed[1];
1534 cmdq_processed[1] = 0; 1514 cmdq_processed[1] = 0;
1535 } 1515 }
1516
1536 if (likely(e->DataValid)) { 1517 if (likely(e->DataValid)) {
1537 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1518 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1538 1519
@@ -1542,12 +1523,16 @@ static int process_responses(struct adapter *adapter, int budget)
1542 else 1523 else
1543 sge_rx(sge, fl, e->BufferLength); 1524 sge_rx(sge, fl, e->BufferLength);
1544 1525
1526 ++done;
1527
1545 /* 1528 /*
1546 * Note: this depends on each packet consuming a 1529 * Note: this depends on each packet consuming a
1547 * single free-list buffer; cf. the BUG above. 1530 * single free-list buffer; cf. the BUG above.
1548 */ 1531 */
1549 if (++fl->cidx == fl->size) 1532 if (++fl->cidx == fl->size)
1550 fl->cidx = 0; 1533 fl->cidx = 0;
1534 prefetch(fl->centries[fl->cidx].skb);
1535
1551 if (unlikely(--fl->credits < 1536 if (unlikely(--fl->credits <
1552 fl->size - SGE_FREEL_REFILL_THRESH)) 1537 fl->size - SGE_FREEL_REFILL_THRESH))
1553 refill_free_list(sge, fl); 1538 refill_free_list(sge, fl);
@@ -1566,14 +1551,20 @@ static int process_responses(struct adapter *adapter, int budget)
1566 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1551 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1567 q->credits = 0; 1552 q->credits = 0;
1568 } 1553 }
1569 --budget_left;
1570 } 1554 }
1571 1555
1572 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1556 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1573 sge->cmdQ[1].processed += cmdq_processed[1]; 1557 sge->cmdQ[1].processed += cmdq_processed[1];
1574 1558
1575 budget -= budget_left; 1559 return done;
1576 return budget; 1560}
1561
1562static inline int responses_pending(const struct adapter *adapter)
1563{
1564 const struct respQ *Q = &adapter->sge->respQ;
1565 const struct respQ_e *e = &Q->entries[Q->cidx];
1566
1567 return (e->GenerationBit == Q->genbit);
1577} 1568}
1578 1569
1579#ifdef CONFIG_CHELSIO_T1_NAPI 1570#ifdef CONFIG_CHELSIO_T1_NAPI
@@ -1585,19 +1576,25 @@ static int process_responses(struct adapter *adapter, int budget)
1585 * which the caller must ensure is a valid pure response. Returns 1 if it 1576 * which the caller must ensure is a valid pure response. Returns 1 if it
1586 * encounters a valid data-carrying response, 0 otherwise. 1577 * encounters a valid data-carrying response, 0 otherwise.
1587 */ 1578 */
1588static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) 1579static int process_pure_responses(struct adapter *adapter)
1589{ 1580{
1590 struct sge *sge = adapter->sge; 1581 struct sge *sge = adapter->sge;
1591 struct respQ *q = &sge->respQ; 1582 struct respQ *q = &sge->respQ;
1583 struct respQ_e *e = &q->entries[q->cidx];
1584 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1592 unsigned int flags = 0; 1585 unsigned int flags = 0;
1593 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1586 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1594 1587
1588 prefetch(fl->centries[fl->cidx].skb);
1589 if (e->DataValid)
1590 return 1;
1591
1595 do { 1592 do {
1596 flags |= e->Qsleeping; 1593 flags |= e->Qsleeping;
1597 1594
1598 cmdq_processed[0] += e->Cmdq0CreditReturn; 1595 cmdq_processed[0] += e->Cmdq0CreditReturn;
1599 cmdq_processed[1] += e->Cmdq1CreditReturn; 1596 cmdq_processed[1] += e->Cmdq1CreditReturn;
1600 1597
1601 e++; 1598 e++;
1602 if (unlikely(++q->cidx == q->size)) { 1599 if (unlikely(++q->cidx == q->size)) {
1603 q->cidx = 0; 1600 q->cidx = 0;
@@ -1613,7 +1610,7 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1613 sge->stats.pure_rsps++; 1610 sge->stats.pure_rsps++;
1614 } while (e->GenerationBit == q->genbit && !e->DataValid); 1611 } while (e->GenerationBit == q->genbit && !e->DataValid);
1615 1612
1616 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1613 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1617 sge->cmdQ[1].processed += cmdq_processed[1]; 1614 sge->cmdQ[1].processed += cmdq_processed[1];
1618 1615
1619 return e->GenerationBit == q->genbit; 1616 return e->GenerationBit == q->genbit;
@@ -1627,23 +1624,20 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
1627int t1_poll(struct net_device *dev, int *budget) 1624int t1_poll(struct net_device *dev, int *budget)
1628{ 1625{
1629 struct adapter *adapter = dev->priv; 1626 struct adapter *adapter = dev->priv;
1630 int effective_budget = min(*budget, dev->quota); 1627 int work_done;
1631 int work_done = process_responses(adapter, effective_budget);
1632 1628
1629 work_done = process_responses(adapter, min(*budget, dev->quota));
1633 *budget -= work_done; 1630 *budget -= work_done;
1634 dev->quota -= work_done; 1631 dev->quota -= work_done;
1635 1632
1636 if (work_done >= effective_budget) 1633 if (unlikely(responses_pending(adapter)))
1637 return 1; 1634 return 1;
1638 1635
1639 spin_lock_irq(&adapter->async_lock); 1636 netif_rx_complete(dev);
1640 __netif_rx_complete(dev);
1641 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1637 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1642 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
1643 adapter->regs + A_PL_ENABLE);
1644 spin_unlock_irq(&adapter->async_lock);
1645 1638
1646 return 0; 1639 return 0;
1640
1647} 1641}
1648 1642
1649/* 1643/*
@@ -1652,44 +1646,33 @@ int t1_poll(struct net_device *dev, int *budget)
1652irqreturn_t t1_interrupt(int irq, void *data) 1646irqreturn_t t1_interrupt(int irq, void *data)
1653{ 1647{
1654 struct adapter *adapter = data; 1648 struct adapter *adapter = data;
1655 struct net_device *dev = adapter->sge->netdev;
1656 struct sge *sge = adapter->sge; 1649 struct sge *sge = adapter->sge;
1657 u32 cause; 1650 int handled;
1658 int handled = 0;
1659 1651
1660 cause = readl(adapter->regs + A_PL_CAUSE); 1652 if (likely(responses_pending(adapter))) {
1661 if (cause == 0 || cause == ~0) 1653 struct net_device *dev = sge->netdev;
1662 return IRQ_NONE;
1663 1654
1664 spin_lock(&adapter->async_lock); 1655 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1665 if (cause & F_PL_INTR_SGE_DATA) {
1666 struct respQ *q = &adapter->sge->respQ;
1667 struct respQ_e *e = &q->entries[q->cidx];
1668
1669 handled = 1;
1670 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1671
1672 if (e->GenerationBit == q->genbit &&
1673 __netif_rx_schedule_prep(dev)) {
1674 if (e->DataValid || process_pure_responses(adapter, e)) {
1675 /* mask off data IRQ */
1676 writel(adapter->slow_intr_mask,
1677 adapter->regs + A_PL_ENABLE);
1678 __netif_rx_schedule(sge->netdev);
1679 goto unlock;
1680 }
1681 /* no data, no NAPI needed */
1682 netif_poll_enable(dev);
1683 1656
1657 if (__netif_rx_schedule_prep(dev)) {
1658 if (process_pure_responses(adapter))
1659 __netif_rx_schedule(dev);
1660 else {
1661 /* no data, no NAPI needed */
1662 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1663 netif_poll_enable(dev); /* undo schedule_prep */
1664 }
1684 } 1665 }
1685 writel(q->cidx, adapter->regs + A_SG_SLEEPING); 1666 return IRQ_HANDLED;
1686 } else 1667 }
1687 handled = t1_slow_intr_handler(adapter); 1668
1669 spin_lock(&adapter->async_lock);
1670 handled = t1_slow_intr_handler(adapter);
1671 spin_unlock(&adapter->async_lock);
1688 1672
1689 if (!handled) 1673 if (!handled)
1690 sge->stats.unhandled_irqs++; 1674 sge->stats.unhandled_irqs++;
1691unlock: 1675
1692 spin_unlock(&adapter->async_lock);
1693 return IRQ_RETVAL(handled != 0); 1676 return IRQ_RETVAL(handled != 0);
1694} 1677}
1695 1678
@@ -1712,17 +1695,13 @@ unlock:
1712irqreturn_t t1_interrupt(int irq, void *cookie) 1695irqreturn_t t1_interrupt(int irq, void *cookie)
1713{ 1696{
1714 int work_done; 1697 int work_done;
1715 struct respQ_e *e;
1716 struct adapter *adapter = cookie; 1698 struct adapter *adapter = cookie;
1717 struct respQ *Q = &adapter->sge->respQ;
1718 1699
1719 spin_lock(&adapter->async_lock); 1700 spin_lock(&adapter->async_lock);
1720 e = &Q->entries[Q->cidx];
1721 prefetch(e);
1722 1701
1723 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1702 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1724 1703
1725 if (likely(e->GenerationBit == Q->genbit)) 1704 if (likely(responses_pending(adapter)))
1726 work_done = process_responses(adapter, -1); 1705 work_done = process_responses(adapter, -1);
1727 else 1706 else
1728 work_done = t1_slow_intr_handler(adapter); 1707 work_done = t1_slow_intr_handler(adapter);
@@ -1796,7 +1775,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1796 * through the scheduler. 1775 * through the scheduler.
1797 */ 1776 */
1798 if (sge->tx_sched && !qid && skb->dev) { 1777 if (sge->tx_sched && !qid && skb->dev) {
1799 use_sched: 1778use_sched:
1800 use_sched_skb = 1; 1779 use_sched_skb = 1;
1801 /* Note that the scheduler might return a different skb than 1780 /* Note that the scheduler might return a different skb than
1802 * the one passed in. 1781 * the one passed in.
@@ -1900,7 +1879,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1900 cpl = (struct cpl_tx_pkt *)hdr; 1879 cpl = (struct cpl_tx_pkt *)hdr;
1901 } else { 1880 } else {
1902 /* 1881 /*
1903 * Packets shorter than ETH_HLEN can break the MAC, drop them 1882 * Packets shorter than ETH_HLEN can break the MAC, drop them
1904 * early. Also, we may get oversized packets because some 1883 * early. Also, we may get oversized packets because some
1905 * parts of the kernel don't handle our unusual hard_header_len 1884 * parts of the kernel don't handle our unusual hard_header_len
1906 * right, drop those too. 1885 * right, drop those too.
@@ -1984,9 +1963,9 @@ send:
1984 * then silently discard to avoid leak. 1963 * then silently discard to avoid leak.
1985 */ 1964 */
1986 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1965 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1987 dev_kfree_skb_any(skb); 1966 dev_kfree_skb_any(skb);
1988 ret = NETDEV_TX_OK; 1967 ret = NETDEV_TX_OK;
1989 } 1968 }
1990 return ret; 1969 return ret;
1991} 1970}
1992 1971
@@ -2099,31 +2078,35 @@ static void espibug_workaround_t204(unsigned long data)
2099 2078
2100 if (adapter->open_device_map & PORT_MASK) { 2079 if (adapter->open_device_map & PORT_MASK) {
2101 int i; 2080 int i;
2102 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) { 2081
2082 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
2103 return; 2083 return;
2104 } 2084
2105 for (i = 0; i < nports; i++) { 2085 for (i = 0; i < nports; i++) {
2106 struct sk_buff *skb = sge->espibug_skb[i]; 2086 struct sk_buff *skb = sge->espibug_skb[i];
2107 if ( (netif_running(adapter->port[i].dev)) && 2087
2108 !(netif_queue_stopped(adapter->port[i].dev)) && 2088 if (!netif_running(adapter->port[i].dev) ||
2109 (seop[i] && ((seop[i] & 0xfff) == 0)) && 2089 netif_queue_stopped(adapter->port[i].dev) ||
2110 skb ) { 2090 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2111 if (!skb->cb[0]) { 2091 continue;
2112 u8 ch_mac_addr[ETH_ALEN] = 2092
2113 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2093 if (!skb->cb[0]) {
2114 memcpy(skb->data + sizeof(struct cpl_tx_pkt), 2094 u8 ch_mac_addr[ETH_ALEN] = {
2115 ch_mac_addr, ETH_ALEN); 2095 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2116 memcpy(skb->data + skb->len - 10, 2096 };
2117 ch_mac_addr, ETH_ALEN); 2097
2118 skb->cb[0] = 0xff; 2098 memcpy(skb->data + sizeof(struct cpl_tx_pkt),
2119 } 2099 ch_mac_addr, ETH_ALEN);
2120 2100 memcpy(skb->data + skb->len - 10,
2121 /* bump the reference count to avoid freeing of 2101 ch_mac_addr, ETH_ALEN);
2122 * the skb once the DMA has completed. 2102 skb->cb[0] = 0xff;
2123 */
2124 skb = skb_get(skb);
2125 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2126 } 2103 }
2104
2105 /* bump the reference count to avoid freeing of
2106 * the skb once the DMA has completed.
2107 */
2108 skb = skb_get(skb);
2109 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2127 } 2110 }
2128 } 2111 }
2129 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2112 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
@@ -2192,9 +2175,8 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2192 if (adapter->params.nports > 1) { 2175 if (adapter->params.nports > 1) {
2193 tx_sched_init(sge); 2176 tx_sched_init(sge);
2194 sge->espibug_timer.function = espibug_workaround_t204; 2177 sge->espibug_timer.function = espibug_workaround_t204;
2195 } else { 2178 } else
2196 sge->espibug_timer.function = espibug_workaround; 2179 sge->espibug_timer.function = espibug_workaround;
2197 }
2198 sge->espibug_timer.data = (unsigned long)sge->adapter; 2180 sge->espibug_timer.data = (unsigned long)sge->adapter;
2199 2181
2200 sge->espibug_timeout = 1; 2182 sge->espibug_timeout = 1;
@@ -2202,7 +2184,7 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
2202 if (adapter->params.nports > 1) 2184 if (adapter->params.nports > 1)
2203 sge->espibug_timeout = HZ/100; 2185 sge->espibug_timeout = HZ/100;
2204 } 2186 }
2205 2187
2206 2188
2207 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2189 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2208 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2190 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 22ed9a383c08..c2522cdfab37 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -223,13 +223,13 @@ static int fpga_slow_intr(adapter_t *adapter)
223 t1_sge_intr_error_handler(adapter->sge); 223 t1_sge_intr_error_handler(adapter->sge);
224 224
225 if (cause & FPGA_PCIX_INTERRUPT_GMAC) 225 if (cause & FPGA_PCIX_INTERRUPT_GMAC)
226 fpga_phy_intr_handler(adapter); 226 fpga_phy_intr_handler(adapter);
227 227
228 if (cause & FPGA_PCIX_INTERRUPT_TP) { 228 if (cause & FPGA_PCIX_INTERRUPT_TP) {
229 /* 229 /*
230 * FPGA doesn't support MC4 interrupts and it requires 230 * FPGA doesn't support MC4 interrupts and it requires
231 * this odd layer of indirection for MC5. 231 * this odd layer of indirection for MC5.
232 */ 232 */
233 u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE); 233 u32 tp_cause = readl(adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
234 234
235 /* Clear TP interrupt */ 235 /* Clear TP interrupt */
@@ -262,8 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
262 udelay(10); 262 udelay(10);
263 } while (busy && --attempts); 263 } while (busy && --attempts);
264 if (busy) 264 if (busy)
265 CH_ALERT("%s: MDIO operation timed out\n", 265 CH_ALERT("%s: MDIO operation timed out\n", adapter->name);
266 adapter->name);
267 return busy; 266 return busy;
268} 267}
269 268
@@ -605,22 +604,23 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
605 604
606 switch (board_info(adapter)->board) { 605 switch (board_info(adapter)->board) {
607#ifdef CONFIG_CHELSIO_T1_1G 606#ifdef CONFIG_CHELSIO_T1_1G
608 case CHBT_BOARD_CHT204: 607 case CHBT_BOARD_CHT204:
609 case CHBT_BOARD_CHT204E: 608 case CHBT_BOARD_CHT204E:
610 case CHBT_BOARD_CHN204: 609 case CHBT_BOARD_CHN204:
611 case CHBT_BOARD_CHT204V: { 610 case CHBT_BOARD_CHT204V: {
612 int i, port_bit; 611 int i, port_bit;
613 for_each_port(adapter, i) { 612 for_each_port(adapter, i) {
614 port_bit = i + 1; 613 port_bit = i + 1;
615 if (!(cause & (1 << port_bit))) continue; 614 if (!(cause & (1 << port_bit)))
615 continue;
616 616
617 phy = adapter->port[i].phy; 617 phy = adapter->port[i].phy;
618 phy_cause = phy->ops->interrupt_handler(phy); 618 phy_cause = phy->ops->interrupt_handler(phy);
619 if (phy_cause & cphy_cause_link_change) 619 if (phy_cause & cphy_cause_link_change)
620 t1_link_changed(adapter, i); 620 t1_link_changed(adapter, i);
621 } 621 }
622 break; 622 break;
623 } 623 }
624 case CHBT_BOARD_CHT101: 624 case CHBT_BOARD_CHT101:
625 if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */ 625 if (cause & ELMER0_GP_BIT1) { /* Marvell 88E1111 interrupt */
626 phy = adapter->port[0].phy; 626 phy = adapter->port[0].phy;
@@ -631,13 +631,13 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
631 break; 631 break;
632 case CHBT_BOARD_7500: { 632 case CHBT_BOARD_7500: {
633 int p; 633 int p;
634 /* 634 /*
635 * Elmer0's interrupt cause isn't useful here because there is 635 * Elmer0's interrupt cause isn't useful here because there is
636 * only one bit that can be set for all 4 ports. This means 636 * only one bit that can be set for all 4 ports. This means
637 * we are forced to check every PHY's interrupt status 637 * we are forced to check every PHY's interrupt status
638 * register to see who initiated the interrupt. 638 * register to see who initiated the interrupt.
639 */ 639 */
640 for_each_port(adapter, p) { 640 for_each_port(adapter, p) {
641 phy = adapter->port[p].phy; 641 phy = adapter->port[p].phy;
642 phy_cause = phy->ops->interrupt_handler(phy); 642 phy_cause = phy->ops->interrupt_handler(phy);
643 if (phy_cause & cphy_cause_link_change) 643 if (phy_cause & cphy_cause_link_change)
@@ -658,7 +658,7 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
658 break; 658 break;
659 case CHBT_BOARD_8000: 659 case CHBT_BOARD_8000:
660 case CHBT_BOARD_CHT110: 660 case CHBT_BOARD_CHT110:
661 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n", 661 CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
662 cause); 662 cause);
663 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */ 663 if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
664 struct cmac *mac = adapter->port[0].mac; 664 struct cmac *mac = adapter->port[0].mac;
@@ -670,9 +670,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
670 670
671 t1_tpi_read(adapter, 671 t1_tpi_read(adapter,
672 A_ELMER0_GPI_STAT, &mod_detect); 672 A_ELMER0_GPI_STAT, &mod_detect);
673 CH_MSG(adapter, INFO, LINK, "XPAK %s\n", 673 CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
674 mod_detect ? "removed" : "inserted"); 674 mod_detect ? "removed" : "inserted");
675 } 675 }
676 break; 676 break;
677#ifdef CONFIG_CHELSIO_T1_COUGAR 677#ifdef CONFIG_CHELSIO_T1_COUGAR
678 case CHBT_BOARD_COUGAR: 678 case CHBT_BOARD_COUGAR:
@@ -688,7 +688,8 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
688 688
689 for_each_port(adapter, i) { 689 for_each_port(adapter, i) {
690 port_bit = i ? i + 1 : 0; 690 port_bit = i ? i + 1 : 0;
691 if (!(cause & (1 << port_bit))) continue; 691 if (!(cause & (1 << port_bit)))
692 continue;
692 693
693 phy = adapter->port[i].phy; 694 phy = adapter->port[i].phy;
694 phy_cause = phy->ops->interrupt_handler(phy); 695 phy_cause = phy->ops->interrupt_handler(phy);
@@ -755,7 +756,7 @@ void t1_interrupts_disable(adapter_t* adapter)
755 756
756 /* Disable PCIX & external chip interrupts. */ 757 /* Disable PCIX & external chip interrupts. */
757 if (t1_is_asic(adapter)) 758 if (t1_is_asic(adapter))
758 writel(0, adapter->regs + A_PL_ENABLE); 759 writel(0, adapter->regs + A_PL_ENABLE);
759 760
760 /* PCI-X interrupts */ 761 /* PCI-X interrupts */
761 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0); 762 pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
@@ -830,11 +831,11 @@ int t1_slow_intr_handler(adapter_t *adapter)
830/* Power sequencing is a work-around for Intel's XPAKs. */ 831/* Power sequencing is a work-around for Intel's XPAKs. */
831static void power_sequence_xpak(adapter_t* adapter) 832static void power_sequence_xpak(adapter_t* adapter)
832{ 833{
833 u32 mod_detect; 834 u32 mod_detect;
834 u32 gpo; 835 u32 gpo;
835 836
836 /* Check for XPAK */ 837 /* Check for XPAK */
837 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect); 838 t1_tpi_read(adapter, A_ELMER0_GPI_STAT, &mod_detect);
838 if (!(ELMER0_GP_BIT5 & mod_detect)) { 839 if (!(ELMER0_GP_BIT5 & mod_detect)) {
839 /* XPAK is present */ 840 /* XPAK is present */
840 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo); 841 t1_tpi_read(adapter, A_ELMER0_GPO, &gpo);
@@ -877,31 +878,31 @@ static int board_init(adapter_t *adapter, const struct board_info *bi)
877 case CHBT_BOARD_N210: 878 case CHBT_BOARD_N210:
878 case CHBT_BOARD_CHT210: 879 case CHBT_BOARD_CHT210:
879 case CHBT_BOARD_COUGAR: 880 case CHBT_BOARD_COUGAR:
880 t1_tpi_par(adapter, 0xf); 881 t1_tpi_par(adapter, 0xf);
881 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800); 882 t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
882 break; 883 break;
883 case CHBT_BOARD_CHT110: 884 case CHBT_BOARD_CHT110:
884 t1_tpi_par(adapter, 0xf); 885 t1_tpi_par(adapter, 0xf);
885 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800); 886 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1800);
886 887
887 /* TBD XXX Might not need. This fixes a problem 888 /* TBD XXX Might not need. This fixes a problem
888 * described in the Intel SR XPAK errata. 889 * described in the Intel SR XPAK errata.
889 */ 890 */
890 power_sequence_xpak(adapter); 891 power_sequence_xpak(adapter);
891 break; 892 break;
892#ifdef CONFIG_CHELSIO_T1_1G 893#ifdef CONFIG_CHELSIO_T1_1G
893 case CHBT_BOARD_CHT204E: 894 case CHBT_BOARD_CHT204E:
894 /* add config space write here */ 895 /* add config space write here */
895 case CHBT_BOARD_CHT204: 896 case CHBT_BOARD_CHT204:
896 case CHBT_BOARD_CHT204V: 897 case CHBT_BOARD_CHT204V:
897 case CHBT_BOARD_CHN204: 898 case CHBT_BOARD_CHN204:
898 t1_tpi_par(adapter, 0xf); 899 t1_tpi_par(adapter, 0xf);
899 t1_tpi_write(adapter, A_ELMER0_GPO, 0x804); 900 t1_tpi_write(adapter, A_ELMER0_GPO, 0x804);
900 break; 901 break;
901 case CHBT_BOARD_CHT101: 902 case CHBT_BOARD_CHT101:
902 case CHBT_BOARD_7500: 903 case CHBT_BOARD_7500:
903 t1_tpi_par(adapter, 0xf); 904 t1_tpi_par(adapter, 0xf);
904 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804); 905 t1_tpi_write(adapter, A_ELMER0_GPO, 0x1804);
905 break; 906 break;
906#endif 907#endif
907 } 908 }
@@ -941,7 +942,7 @@ int t1_init_hw_modules(adapter_t *adapter)
941 goto out_err; 942 goto out_err;
942 943
943 err = 0; 944 err = 0;
944 out_err: 945out_err:
945 return err; 946 return err;
946} 947}
947 948
@@ -983,7 +984,7 @@ void t1_free_sw_modules(adapter_t *adapter)
983 if (adapter->espi) 984 if (adapter->espi)
984 t1_espi_destroy(adapter->espi); 985 t1_espi_destroy(adapter->espi);
985#ifdef CONFIG_CHELSIO_T1_COUGAR 986#ifdef CONFIG_CHELSIO_T1_COUGAR
986 if (adapter->cspi) 987 if (adapter->cspi)
987 t1_cspi_destroy(adapter->cspi); 988 t1_cspi_destroy(adapter->cspi);
988#endif 989#endif
989} 990}
@@ -1010,7 +1011,7 @@ static void __devinit init_link_config(struct link_config *lc,
1010 CH_ERR("%s: CSPI initialization failed\n", 1011 CH_ERR("%s: CSPI initialization failed\n",
1011 adapter->name); 1012 adapter->name);
1012 goto error; 1013 goto error;
1013 } 1014 }
1014#endif 1015#endif
1015 1016
1016/* 1017/*
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
index 0ca0b6e19e43..6222d585e447 100644
--- a/drivers/net/chelsio/tp.c
+++ b/drivers/net/chelsio/tp.c
@@ -17,39 +17,36 @@ struct petp {
17static void tp_init(adapter_t * ap, const struct tp_params *p, 17static void tp_init(adapter_t * ap, const struct tp_params *p,
18 unsigned int tp_clk) 18 unsigned int tp_clk)
19{ 19{
20 if (t1_is_asic(ap)) { 20 u32 val;
21 u32 val;
22
23 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
24 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
25 if (!p->pm_size)
26 val |= F_OFFLOAD_DISABLE;
27 else
28 val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
29 F_TP_IN_ESPI_CHECK_TCP_CSUM;
30 writel(val, ap->regs + A_TP_IN_CONFIG);
31 writel(F_TP_OUT_CSPI_CPL |
32 F_TP_OUT_ESPI_ETHERNET |
33 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
34 F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
35 ap->regs + A_TP_OUT_CONFIG);
36 writel(V_IP_TTL(64) |
37 F_PATH_MTU /* IP DF bit */ |
38 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
39 V_SYN_COOKIE_PARAMETER(29),
40 ap->regs + A_TP_GLOBAL_CONFIG);
41 /*
42 * Enable pause frame deadlock prevention.
43 */
44 if (is_T2(ap) && ap->params.nports > 1) {
45 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
46
47 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
48 V_DROP_TICKS_CNT(drop_ticks) |
49 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
50 ap->regs + A_TP_TX_DROP_CONFIG);
51 }
52 21
22 if (!t1_is_asic(ap))
23 return;
24
25 val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
26 F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
27 if (!p->pm_size)
28 val |= F_OFFLOAD_DISABLE;
29 else
30 val |= F_TP_IN_ESPI_CHECK_IP_CSUM | F_TP_IN_ESPI_CHECK_TCP_CSUM;
31 writel(val, ap->regs + A_TP_IN_CONFIG);
32 writel(F_TP_OUT_CSPI_CPL |
33 F_TP_OUT_ESPI_ETHERNET |
34 F_TP_OUT_ESPI_GENERATE_IP_CSUM |
35 F_TP_OUT_ESPI_GENERATE_TCP_CSUM, ap->regs + A_TP_OUT_CONFIG);
36 writel(V_IP_TTL(64) |
37 F_PATH_MTU /* IP DF bit */ |
38 V_5TUPLE_LOOKUP(p->use_5tuple_mode) |
39 V_SYN_COOKIE_PARAMETER(29), ap->regs + A_TP_GLOBAL_CONFIG);
40 /*
41 * Enable pause frame deadlock prevention.
42 */
43 if (is_T2(ap) && ap->params.nports > 1) {
44 u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
45
46 writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
47 V_DROP_TICKS_CNT(drop_ticks) |
48 V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
49 ap->regs + A_TP_TX_DROP_CONFIG);
53 } 50 }
54} 51}
55 52
@@ -61,6 +58,7 @@ void t1_tp_destroy(struct petp *tp)
61struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) 58struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p)
62{ 59{
63 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); 60 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
61
64 if (!tp) 62 if (!tp)
65 return NULL; 63 return NULL;
66 64
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 85dc3b1dc309..534ffa0f616e 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -226,22 +226,21 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
226 if (ib[i].addr == INITBLOCK_SLEEP) { 226 if (ib[i].addr == INITBLOCK_SLEEP) {
227 udelay( ib[i].data ); 227 udelay( ib[i].data );
228 CH_ERR("sleep %d us\n",ib[i].data); 228 CH_ERR("sleep %d us\n",ib[i].data);
229 } else { 229 } else
230 vsc_write( adapter, ib[i].addr, ib[i].data ); 230 vsc_write( adapter, ib[i].addr, ib[i].data );
231 }
232 } 231 }
233} 232}
234 233
235static int bist_rd(adapter_t *adapter, int moduleid, int address) 234static int bist_rd(adapter_t *adapter, int moduleid, int address)
236{ 235{
237 int data=0; 236 int data = 0;
238 u32 result=0; 237 u32 result = 0;
239 238
240 if( (address != 0x0) && 239 if ((address != 0x0) &&
241 (address != 0x1) && 240 (address != 0x1) &&
242 (address != 0x2) && 241 (address != 0x2) &&
243 (address != 0xd) && 242 (address != 0xd) &&
244 (address != 0xe)) 243 (address != 0xe))
245 CH_ERR("No bist address: 0x%x\n", address); 244 CH_ERR("No bist address: 0x%x\n", address);
246 245
247 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) | 246 data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
@@ -251,27 +250,27 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
251 udelay(10); 250 udelay(10);
252 251
253 vsc_read(adapter, REG_RAM_BIST_RESULT, &result); 252 vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
254 if((result & (1<<9)) != 0x0) 253 if ((result & (1 << 9)) != 0x0)
255 CH_ERR("Still in bist read: 0x%x\n", result); 254 CH_ERR("Still in bist read: 0x%x\n", result);
256 else if((result & (1<<8)) != 0x0) 255 else if ((result & (1 << 8)) != 0x0)
257 CH_ERR("bist read error: 0x%x\n", result); 256 CH_ERR("bist read error: 0x%x\n", result);
258 257
259 return(result & 0xff); 258 return (result & 0xff);
260} 259}
261 260
262static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) 261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
263{ 262{
264 int data=0; 263 int data = 0;
265 u32 result=0; 264 u32 result = 0;
266 265
267 if( (address != 0x0) && 266 if ((address != 0x0) &&
268 (address != 0x1) && 267 (address != 0x1) &&
269 (address != 0x2) && 268 (address != 0x2) &&
270 (address != 0xd) && 269 (address != 0xd) &&
271 (address != 0xe)) 270 (address != 0xe))
272 CH_ERR("No bist address: 0x%x\n", address); 271 CH_ERR("No bist address: 0x%x\n", address);
273 272
274 if( value>255 ) 273 if (value > 255)
275 CH_ERR("Suspicious write out of range value: 0x%x\n", value); 274 CH_ERR("Suspicious write out of range value: 0x%x\n", value);
276 275
277 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) | 276 data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
@@ -281,12 +280,12 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
281 udelay(5); 280 udelay(5);
282 281
283 vsc_read(adapter, REG_RAM_BIST_CMD, &result); 282 vsc_read(adapter, REG_RAM_BIST_CMD, &result);
284 if((result & (1<<27)) != 0x0) 283 if ((result & (1 << 27)) != 0x0)
285 CH_ERR("Still in bist write: 0x%x\n", result); 284 CH_ERR("Still in bist write: 0x%x\n", result);
286 else if((result & (1<<26)) != 0x0) 285 else if ((result & (1 << 26)) != 0x0)
287 CH_ERR("bist write error: 0x%x\n", result); 286 CH_ERR("bist write error: 0x%x\n", result);
288 287
289 return(0); 288 return 0;
290} 289}
291 290
292static int run_bist(adapter_t *adapter, int moduleid) 291static int run_bist(adapter_t *adapter, int moduleid)
@@ -295,7 +294,7 @@ static int run_bist(adapter_t *adapter, int moduleid)
295 (void) bist_wr(adapter,moduleid, 0x00, 0x02); 294 (void) bist_wr(adapter,moduleid, 0x00, 0x02);
296 (void) bist_wr(adapter,moduleid, 0x01, 0x01); 295 (void) bist_wr(adapter,moduleid, 0x01, 0x01);
297 296
298 return(0); 297 return 0;
299} 298}
300 299
301static int check_bist(adapter_t *adapter, int moduleid) 300static int check_bist(adapter_t *adapter, int moduleid)
@@ -309,27 +308,26 @@ static int check_bist(adapter_t *adapter, int moduleid)
309 if ((result & 3) != 0x3) 308 if ((result & 3) != 0x3)
310 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n", 309 CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
311 result, moduleid, column); 310 result, moduleid, column);
312 return(0); 311 return 0;
313} 312}
314 313
315static int enable_mem(adapter_t *adapter, int moduleid) 314static int enable_mem(adapter_t *adapter, int moduleid)
316{ 315{
317 /*enable mem*/ 316 /*enable mem*/
318 (void) bist_wr(adapter,moduleid, 0x00, 0x00); 317 (void) bist_wr(adapter,moduleid, 0x00, 0x00);
319 return(0); 318 return 0;
320} 319}
321 320
322static int run_bist_all(adapter_t *adapter) 321static int run_bist_all(adapter_t *adapter)
323{ 322{
324 int port=0; 323 int port = 0;
325 u32 val=0; 324 u32 val = 0;
326 325
327 vsc_write(adapter, REG_MEM_BIST, 0x5); 326 vsc_write(adapter, REG_MEM_BIST, 0x5);
328 vsc_read(adapter, REG_MEM_BIST, &val); 327 vsc_read(adapter, REG_MEM_BIST, &val);
329 328
330 for(port=0; port<12; port++){ 329 for (port = 0; port < 12; port++)
331 vsc_write(adapter, REG_DEV_SETUP(port), 0x0); 330 vsc_write(adapter, REG_DEV_SETUP(port), 0x0);
332 }
333 331
334 udelay(300); 332 udelay(300);
335 vsc_write(adapter, REG_SPI4_MISC, 0x00040409); 333 vsc_write(adapter, REG_SPI4_MISC, 0x00040409);
@@ -352,13 +350,13 @@ static int run_bist_all(adapter_t *adapter)
352 udelay(300); 350 udelay(300);
353 vsc_write(adapter, REG_SPI4_MISC, 0x60040400); 351 vsc_write(adapter, REG_SPI4_MISC, 0x60040400);
354 udelay(300); 352 udelay(300);
355 for(port=0; port<12; port++){ 353 for (port = 0; port < 12; port++)
356 vsc_write(adapter, REG_DEV_SETUP(port), 0x1); 354 vsc_write(adapter, REG_DEV_SETUP(port), 0x1);
357 } 355
358 udelay(300); 356 udelay(300);
359 vsc_write(adapter, REG_MEM_BIST, 0x0); 357 vsc_write(adapter, REG_MEM_BIST, 0x0);
360 mdelay(10); 358 mdelay(10);
361 return(0); 359 return 0;
362} 360}
363 361
364static int mac_intr_handler(struct cmac *mac) 362static int mac_intr_handler(struct cmac *mac)
@@ -591,40 +589,46 @@ static void rmon_update(struct cmac *mac, unsigned int addr, u64 *stat)
591 589
592static void port_stats_update(struct cmac *mac) 590static void port_stats_update(struct cmac *mac)
593{ 591{
594 int port = mac->instance->index; 592 struct {
593 unsigned int reg;
594 unsigned int offset;
595 } hw_stats[] = {
596
597#define HW_STAT(reg, stat_name) \
598 { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
599
600 /* Rx stats */
601 HW_STAT(RxUnicast, RxUnicastFramesOK),
602 HW_STAT(RxMulticast, RxMulticastFramesOK),
603 HW_STAT(RxBroadcast, RxBroadcastFramesOK),
604 HW_STAT(Crc, RxFCSErrors),
605 HW_STAT(RxAlignment, RxAlignErrors),
606 HW_STAT(RxOversize, RxFrameTooLongErrors),
607 HW_STAT(RxPause, RxPauseFrames),
608 HW_STAT(RxJabbers, RxJabberErrors),
609 HW_STAT(RxFragments, RxRuntErrors),
610 HW_STAT(RxUndersize, RxRuntErrors),
611 HW_STAT(RxSymbolCarrier, RxSymbolErrors),
612 HW_STAT(RxSize1519ToMax, RxJumboFramesOK),
613
614 /* Tx stats (skip collision stats as we are full-duplex only) */
615 HW_STAT(TxUnicast, TxUnicastFramesOK),
616 HW_STAT(TxMulticast, TxMulticastFramesOK),
617 HW_STAT(TxBroadcast, TxBroadcastFramesOK),
618 HW_STAT(TxPause, TxPauseFrames),
619 HW_STAT(TxUnderrun, TxUnderrun),
620 HW_STAT(TxSize1519ToMax, TxJumboFramesOK),
621 }, *p = hw_stats;
622 unsigned int port = mac->instance->index;
623 u64 *stats = (u64 *)&mac->stats;
624 unsigned int i;
625
626 for (i = 0; i < ARRAY_SIZE(hw_stats); i++)
627 rmon_update(mac, CRA(0x4, port, p->reg), stats + p->offset);
595 628
596 /* Rx stats */ 629 rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
597 rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK); 630 rmon_update(mac, REG_RX_OK_BYTES(port), &mac->stats.RxOctetsOK);
598 rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad); 631 rmon_update(mac, REG_RX_BAD_BYTES(port), &mac->stats.RxOctetsBad);
599 rmon_update(mac, REG_RX_UNICAST(port), &mac->stats.RxUnicastFramesOK);
600 rmon_update(mac, REG_RX_MULTICAST(port),
601 &mac->stats.RxMulticastFramesOK);
602 rmon_update(mac, REG_RX_BROADCAST(port),
603 &mac->stats.RxBroadcastFramesOK);
604 rmon_update(mac, REG_CRC(port), &mac->stats.RxFCSErrors);
605 rmon_update(mac, REG_RX_ALIGNMENT(port), &mac->stats.RxAlignErrors);
606 rmon_update(mac, REG_RX_OVERSIZE(port),
607 &mac->stats.RxFrameTooLongErrors);
608 rmon_update(mac, REG_RX_PAUSE(port), &mac->stats.RxPauseFrames);
609 rmon_update(mac, REG_RX_JABBERS(port), &mac->stats.RxJabberErrors);
610 rmon_update(mac, REG_RX_FRAGMENTS(port), &mac->stats.RxRuntErrors);
611 rmon_update(mac, REG_RX_UNDERSIZE(port), &mac->stats.RxRuntErrors);
612 rmon_update(mac, REG_RX_SYMBOL_CARRIER(port),
613 &mac->stats.RxSymbolErrors);
614 rmon_update(mac, REG_RX_SIZE_1519_TO_MAX(port),
615 &mac->stats.RxJumboFramesOK);
616
617 /* Tx stats (skip collision stats as we are full-duplex only) */
618 rmon_update(mac, REG_TX_OK_BYTES(port), &mac->stats.TxOctetsOK);
619 rmon_update(mac, REG_TX_UNICAST(port), &mac->stats.TxUnicastFramesOK);
620 rmon_update(mac, REG_TX_MULTICAST(port),
621 &mac->stats.TxMulticastFramesOK);
622 rmon_update(mac, REG_TX_BROADCAST(port),
623 &mac->stats.TxBroadcastFramesOK);
624 rmon_update(mac, REG_TX_PAUSE(port), &mac->stats.TxPauseFrames);
625 rmon_update(mac, REG_TX_UNDERRUN(port), &mac->stats.TxUnderrun);
626 rmon_update(mac, REG_TX_SIZE_1519_TO_MAX(port),
627 &mac->stats.TxJumboFramesOK);
628} 632}
629 633
630/* 634/*
@@ -686,7 +690,8 @@ static struct cmac *vsc7326_mac_create(adapter_t *adapter, int index)
686 int i; 690 int i;
687 691
688 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL); 692 mac = kzalloc(sizeof(*mac) + sizeof(cmac_instance), GFP_KERNEL);
689 if (!mac) return NULL; 693 if (!mac)
694 return NULL;
690 695
691 mac->ops = &vsc7326_ops; 696 mac->ops = &vsc7326_ops;
692 mac->instance = (cmac_instance *)(mac + 1); 697 mac->instance = (cmac_instance *)(mac + 1);
diff --git a/drivers/net/chelsio/vsc7326_reg.h b/drivers/net/chelsio/vsc7326_reg.h
index 491bcf75c4fb..479edbcabe68 100644
--- a/drivers/net/chelsio/vsc7326_reg.h
+++ b/drivers/net/chelsio/vsc7326_reg.h
@@ -192,73 +192,84 @@
192#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */ 192#define REG_HDX(pn) CRA(0x1,pn,0x19) /* Half-duplex config */
193 193
194/* Statistics */ 194/* Statistics */
195/* CRA(0x4,pn,reg) */
196/* reg below */
195/* pn = port number, 0-a, a = 10GbE */ 197/* pn = port number, 0-a, a = 10GbE */
196#define REG_RX_IN_BYTES(pn) CRA(0x4,pn,0x00) /* # Rx in octets */
197#define REG_RX_SYMBOL_CARRIER(pn) CRA(0x4,pn,0x01) /* Frames w/ symbol errors */
198#define REG_RX_PAUSE(pn) CRA(0x4,pn,0x02) /* # pause frames received */
199#define REG_RX_UNSUP_OPCODE(pn) CRA(0x4,pn,0x03) /* # control frames with unsupported opcode */
200#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,0x04) /* # octets in good frames */
201#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,0x05) /* # octets in bad frames */
202#define REG_RX_UNICAST(pn) CRA(0x4,pn,0x06) /* # good unicast frames */
203#define REG_RX_MULTICAST(pn) CRA(0x4,pn,0x07) /* # good multicast frames */
204#define REG_RX_BROADCAST(pn) CRA(0x4,pn,0x08) /* # good broadcast frames */
205#define REG_CRC(pn) CRA(0x4,pn,0x09) /* # frames w/ bad CRC only */
206#define REG_RX_ALIGNMENT(pn) CRA(0x4,pn,0x0a) /* # frames w/ alignment err */
207#define REG_RX_UNDERSIZE(pn) CRA(0x4,pn,0x0b) /* # frames undersize */
208#define REG_RX_FRAGMENTS(pn) CRA(0x4,pn,0x0c) /* # frames undersize w/ crc err */
209#define REG_RX_IN_RANGE_LENGTH_ERROR(pn) CRA(0x4,pn,0x0d) /* # frames with length error */
210#define REG_RX_OUT_OF_RANGE_ERROR(pn) CRA(0x4,pn,0x0e) /* # frames with illegal length field */
211#define REG_RX_OVERSIZE(pn) CRA(0x4,pn,0x0f) /* # frames oversize */
212#define REG_RX_JABBERS(pn) CRA(0x4,pn,0x10) /* # frames oversize w/ crc err */
213#define REG_RX_SIZE_64(pn) CRA(0x4,pn,0x11) /* # frames 64 octets long */
214#define REG_RX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x12) /* # frames 65-127 octets */
215#define REG_RX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x13) /* # frames 128-255 */
216#define REG_RX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x14) /* # frames 256-511 */
217#define REG_RX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x15) /* # frames 512-1023 */
218#define REG_RX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x16) /* # frames 1024-1518 */
219#define REG_RX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x17) /* # frames 1519-max */
220 198
221#define REG_TX_OUT_BYTES(pn) CRA(0x4,pn,0x18) /* # octets tx */ 199enum {
222#define REG_TX_PAUSE(pn) CRA(0x4,pn,0x19) /* # pause frames sent */ 200 RxInBytes = 0x00, // # Rx in octets
223#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,0x1a) /* # octets tx OK */ 201 RxSymbolCarrier = 0x01, // Frames w/ symbol errors
224#define REG_TX_UNICAST(pn) CRA(0x4,pn,0x1b) /* # frames unicast */ 202 RxPause = 0x02, // # pause frames received
225#define REG_TX_MULTICAST(pn) CRA(0x4,pn,0x1c) /* # frames multicast */ 203 RxUnsupOpcode = 0x03, // # control frames with unsupported opcode
226#define REG_TX_BROADCAST(pn) CRA(0x4,pn,0x1d) /* # frames broadcast */ 204 RxOkBytes = 0x04, // # octets in good frames
227#define REG_TX_MULTIPLE_COLL(pn) CRA(0x4,pn,0x1e) /* # frames tx after multiple collisions */ 205 RxBadBytes = 0x05, // # octets in bad frames
228#define REG_TX_LATE_COLL(pn) CRA(0x4,pn,0x1f) /* # late collisions detected */ 206 RxUnicast = 0x06, // # good unicast frames
229#define REG_TX_XCOLL(pn) CRA(0x4,pn,0x20) /* # frames lost, excessive collisions */ 207 RxMulticast = 0x07, // # good multicast frames
230#define REG_TX_DEFER(pn) CRA(0x4,pn,0x21) /* # frames deferred on first tx attempt */ 208 RxBroadcast = 0x08, // # good broadcast frames
231#define REG_TX_XDEFER(pn) CRA(0x4,pn,0x22) /* # frames excessively deferred */ 209 Crc = 0x09, // # frames w/ bad CRC only
232#define REG_TX_CSENSE(pn) CRA(0x4,pn,0x23) /* carrier sense errors at frame end */ 210 RxAlignment = 0x0a, // # frames w/ alignment err
233#define REG_TX_SIZE_64(pn) CRA(0x4,pn,0x24) /* # frames 64 octets long */ 211 RxUndersize = 0x0b, // # frames undersize
234#define REG_TX_SIZE_65_TO_127(pn) CRA(0x4,pn,0x25) /* # frames 65-127 octets */ 212 RxFragments = 0x0c, // # frames undersize w/ crc err
235#define REG_TX_SIZE_128_TO_255(pn) CRA(0x4,pn,0x26) /* # frames 128-255 */ 213 RxInRangeLengthError = 0x0d, // # frames with length error
236#define REG_TX_SIZE_256_TO_511(pn) CRA(0x4,pn,0x27) /* # frames 256-511 */ 214 RxOutOfRangeError = 0x0e, // # frames with illegal length field
237#define REG_TX_SIZE_512_TO_1023(pn) CRA(0x4,pn,0x28) /* # frames 512-1023 */ 215 RxOversize = 0x0f, // # frames oversize
238#define REG_TX_SIZE_1024_TO_1518(pn) CRA(0x4,pn,0x29) /* # frames 1024-1518 */ 216 RxJabbers = 0x10, // # frames oversize w/ crc err
239#define REG_TX_SIZE_1519_TO_MAX(pn) CRA(0x4,pn,0x2a) /* # frames 1519-max */ 217 RxSize64 = 0x11, // # frames 64 octets long
240#define REG_TX_SINGLE_COLL(pn) CRA(0x4,pn,0x2b) /* # frames tx after single collision */ 218 RxSize65To127 = 0x12, // # frames 65-127 octets
241#define REG_TX_BACKOFF2(pn) CRA(0x4,pn,0x2c) /* # frames tx ok after 2 backoffs/collisions */ 219 RxSize128To255 = 0x13, // # frames 128-255
242#define REG_TX_BACKOFF3(pn) CRA(0x4,pn,0x2d) /* after 3 backoffs/collisions */ 220 RxSize256To511 = 0x14, // # frames 256-511
243#define REG_TX_BACKOFF4(pn) CRA(0x4,pn,0x2e) /* after 4 */ 221 RxSize512To1023 = 0x15, // # frames 512-1023
244#define REG_TX_BACKOFF5(pn) CRA(0x4,pn,0x2f) /* after 5 */ 222 RxSize1024To1518 = 0x16, // # frames 1024-1518
245#define REG_TX_BACKOFF6(pn) CRA(0x4,pn,0x30) /* after 6 */ 223 RxSize1519ToMax = 0x17, // # frames 1519-max
246#define REG_TX_BACKOFF7(pn) CRA(0x4,pn,0x31) /* after 7 */
247#define REG_TX_BACKOFF8(pn) CRA(0x4,pn,0x32) /* after 8 */
248#define REG_TX_BACKOFF9(pn) CRA(0x4,pn,0x33) /* after 9 */
249#define REG_TX_BACKOFF10(pn) CRA(0x4,pn,0x34) /* after 10 */
250#define REG_TX_BACKOFF11(pn) CRA(0x4,pn,0x35) /* after 11 */
251#define REG_TX_BACKOFF12(pn) CRA(0x4,pn,0x36) /* after 12 */
252#define REG_TX_BACKOFF13(pn) CRA(0x4,pn,0x37) /* after 13 */
253#define REG_TX_BACKOFF14(pn) CRA(0x4,pn,0x38) /* after 14 */
254#define REG_TX_BACKOFF15(pn) CRA(0x4,pn,0x39) /* after 15 */
255#define REG_TX_UNDERRUN(pn) CRA(0x4,pn,0x3a) /* # frames dropped from underrun */
256#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
257#define REG_RX_IPG_SHRINK(pn) CRA(0x4,pn,0x3c) /* # of IPG shrinks detected */
258 224
259#define REG_STAT_STICKY1G(pn) CRA(0x4,pn,0x3e) /* tri-speed sticky bits */ 225 TxOutBytes = 0x18, // # octets tx
260#define REG_STAT_STICKY10G CRA(0x4,0xa,0x3e) /* 10GbE sticky bits */ 226 TxPause = 0x19, // # pause frames sent
261#define REG_STAT_INIT(pn) CRA(0x4,pn,0x3f) /* Clear all statistics */ 227 TxOkBytes = 0x1a, // # octets tx OK
228 TxUnicast = 0x1b, // # frames unicast
229 TxMulticast = 0x1c, // # frames multicast
230 TxBroadcast = 0x1d, // # frames broadcast
231 TxMultipleColl = 0x1e, // # frames tx after multiple collisions
232 TxLateColl = 0x1f, // # late collisions detected
233 TxXcoll = 0x20, // # frames lost, excessive collisions
234 TxDefer = 0x21, // # frames deferred on first tx attempt
235 TxXdefer = 0x22, // # frames excessively deferred
236 TxCsense = 0x23, // carrier sense errors at frame end
237 TxSize64 = 0x24, // # frames 64 octets long
238 TxSize65To127 = 0x25, // # frames 65-127 octets
239 TxSize128To255 = 0x26, // # frames 128-255
240 TxSize256To511 = 0x27, // # frames 256-511
241 TxSize512To1023 = 0x28, // # frames 512-1023
242 TxSize1024To1518 = 0x29, // # frames 1024-1518
243 TxSize1519ToMax = 0x2a, // # frames 1519-max
244 TxSingleColl = 0x2b, // # frames tx after single collision
245 TxBackoff2 = 0x2c, // # frames tx ok after 2 backoffs/collisions
246 TxBackoff3 = 0x2d, // after 3 backoffs/collisions
247 TxBackoff4 = 0x2e, // after 4
248 TxBackoff5 = 0x2f, // after 5
249 TxBackoff6 = 0x30, // after 6
250 TxBackoff7 = 0x31, // after 7
251 TxBackoff8 = 0x32, // after 8
252 TxBackoff9 = 0x33, // after 9
253 TxBackoff10 = 0x34, // after 10
254 TxBackoff11 = 0x35, // after 11
255 TxBackoff12 = 0x36, // after 12
256 TxBackoff13 = 0x37, // after 13
257 TxBackoff14 = 0x38, // after 14
258 TxBackoff15 = 0x39, // after 15
259 TxUnderrun = 0x3a, // # frames dropped from underrun
260 // Hole. See REG_RX_XGMII_PROT_ERR below.
261 RxIpgShrink = 0x3c, // # of IPG shrinks detected
262 // Duplicate. See REG_STAT_STICKY10G below.
263 StatSticky1G = 0x3e, // tri-speed sticky bits
264 StatInit = 0x3f // Clear all statistics
265};
266
267#define REG_RX_XGMII_PROT_ERR CRA(0x4,0xa,0x3b) /* # protocol errors detected on XGMII interface */
268#define REG_STAT_STICKY10G CRA(0x4,0xa,StatSticky1G) /* 10GbE sticky bits */
269
270#define REG_RX_OK_BYTES(pn) CRA(0x4,pn,RxOkBytes)
271#define REG_RX_BAD_BYTES(pn) CRA(0x4,pn,RxBadBytes)
272#define REG_TX_OK_BYTES(pn) CRA(0x4,pn,TxOkBytes)
262 273
263/* MII-Management Block registers */ 274/* MII-Management Block registers */
264/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If 275/* These are for MII-M interface 0, which is the bidirectional LVTTL one. If
diff --git a/drivers/net/chelsio/vsc8244.c b/drivers/net/chelsio/vsc8244.c
index c493e783d459..251d4859c91d 100644
--- a/drivers/net/chelsio/vsc8244.c
+++ b/drivers/net/chelsio/vsc8244.c
@@ -54,7 +54,7 @@ enum {
54}; 54};
55 55
56#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \ 56#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
57 VSC_INTR_NEG_DONE) 57 VSC_INTR_NEG_DONE)
58#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \ 58#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
59 VSC_INTR_ENABLE) 59 VSC_INTR_ENABLE)
60 60
@@ -94,19 +94,18 @@ static int vsc8244_intr_enable(struct cphy *cphy)
94{ 94{
95 simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK); 95 simple_mdio_write(cphy, VSC8244_INTR_ENABLE, INTR_MASK);
96 96
97 /* Enable interrupts through Elmer */ 97 /* Enable interrupts through Elmer */
98 if (t1_is_asic(cphy->adapter)) { 98 if (t1_is_asic(cphy->adapter)) {
99 u32 elmer; 99 u32 elmer;
100 100
101 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 101 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
102 elmer |= ELMER0_GP_BIT1; 102 elmer |= ELMER0_GP_BIT1;
103 if (is_T2(cphy->adapter)) { 103 if (is_T2(cphy->adapter))
104 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 104 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
105 }
106 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 105 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
107 } 106 }
108 107
109 return 0; 108 return 0;
110} 109}
111 110
112static int vsc8244_intr_disable(struct cphy *cphy) 111static int vsc8244_intr_disable(struct cphy *cphy)
@@ -118,19 +117,18 @@ static int vsc8244_intr_disable(struct cphy *cphy)
118 117
119 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); 118 t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
120 elmer &= ~ELMER0_GP_BIT1; 119 elmer &= ~ELMER0_GP_BIT1;
121 if (is_T2(cphy->adapter)) { 120 if (is_T2(cphy->adapter))
122 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4); 121 elmer &= ~(ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4);
123 }
124 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); 122 t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
125 } 123 }
126 124
127 return 0; 125 return 0;
128} 126}
129 127
130static int vsc8244_intr_clear(struct cphy *cphy) 128static int vsc8244_intr_clear(struct cphy *cphy)
131{ 129{
132 u32 val; 130 u32 val;
133 u32 elmer; 131 u32 elmer;
134 132
135 /* Clear PHY interrupts by reading the register. */ 133 /* Clear PHY interrupts by reading the register. */
136 simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val); 134 simple_mdio_read(cphy, VSC8244_INTR_ENABLE, &val);
@@ -138,13 +136,12 @@ static int vsc8244_intr_clear(struct cphy *cphy)
138 if (t1_is_asic(cphy->adapter)) { 136 if (t1_is_asic(cphy->adapter)) {
139 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); 137 t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
140 elmer |= ELMER0_GP_BIT1; 138 elmer |= ELMER0_GP_BIT1;
141 if (is_T2(cphy->adapter)) { 139 if (is_T2(cphy->adapter))
142 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4; 140 elmer |= ELMER0_GP_BIT2|ELMER0_GP_BIT3|ELMER0_GP_BIT4;
143 }
144 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); 141 t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
145 } 142 }
146 143
147 return 0; 144 return 0;
148} 145}
149 146
150/* 147/*
@@ -179,13 +176,13 @@ static int vsc8244_set_speed_duplex(struct cphy *phy, int speed, int duplex)
179 176
180int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits) 177int t1_mdio_set_bits(struct cphy *phy, int mmd, int reg, unsigned int bits)
181{ 178{
182 int ret; 179 int ret;
183 unsigned int val; 180 unsigned int val;
184 181
185 ret = mdio_read(phy, mmd, reg, &val); 182 ret = mdio_read(phy, mmd, reg, &val);
186 if (!ret) 183 if (!ret)
187 ret = mdio_write(phy, mmd, reg, val | bits); 184 ret = mdio_write(phy, mmd, reg, val | bits);
188 return ret; 185 return ret;
189} 186}
190 187
191static int vsc8244_autoneg_enable(struct cphy *cphy) 188static int vsc8244_autoneg_enable(struct cphy *cphy)
@@ -235,7 +232,7 @@ static int vsc8244_advertise(struct cphy *phy, unsigned int advertise_map)
235} 232}
236 233
237static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok, 234static int vsc8244_get_link_status(struct cphy *cphy, int *link_ok,
238 int *speed, int *duplex, int *fc) 235 int *speed, int *duplex, int *fc)
239{ 236{
240 unsigned int bmcr, status, lpa, adv; 237 unsigned int bmcr, status, lpa, adv;
241 int err, sp = -1, dplx = -1, pause = 0; 238 int err, sp = -1, dplx = -1, pause = 0;
@@ -343,11 +340,13 @@ static struct cphy_ops vsc8244_ops = {
343 .get_link_status = vsc8244_get_link_status 340 .get_link_status = vsc8244_get_link_status
344}; 341};
345 342
346static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr, struct mdio_ops *mdio_ops) 343static struct cphy* vsc8244_phy_create(adapter_t *adapter, int phy_addr,
344 struct mdio_ops *mdio_ops)
347{ 345{
348 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); 346 struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
349 347
350 if (!cphy) return NULL; 348 if (!cphy)
349 return NULL;
351 350
352 cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops); 351 cphy_init(cphy, adapter, phy_addr, &vsc8244_ops, mdio_ops);
353 352
diff --git a/drivers/net/cxgb3/Makefile b/drivers/net/cxgb3/Makefile
new file mode 100644
index 000000000000..343467985321
--- /dev/null
+++ b/drivers/net/cxgb3/Makefile
@@ -0,0 +1,8 @@
1#
2# Chelsio T3 driver
3#
4
5obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
6
7cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
8 xgmac.o sge.o l2t.o cxgb3_offload.o
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
new file mode 100644
index 000000000000..5c97a64451ce
--- /dev/null
+++ b/drivers/net/cxgb3/adapter.h
@@ -0,0 +1,279 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33/* This file should not be included directly. Include common.h instead. */
34
35#ifndef __T3_ADAPTER_H__
36#define __T3_ADAPTER_H__
37
38#include <linux/pci.h>
39#include <linux/spinlock.h>
40#include <linux/interrupt.h>
41#include <linux/timer.h>
42#include <linux/cache.h>
43#include <linux/mutex.h>
44#include "t3cdev.h"
45#include <asm/semaphore.h>
46#include <asm/bitops.h>
47#include <asm/io.h>
48
49typedef irqreturn_t(*intr_handler_t) (int, void *);
50
51struct vlan_group;
52
53struct port_info {
54 struct vlan_group *vlan_grp;
55 const struct port_type_info *port_type;
56 u8 port_id;
57 u8 rx_csum_offload;
58 u8 nqsets;
59 u8 first_qset;
60 struct cphy phy;
61 struct cmac mac;
62 struct link_config link_config;
63 struct net_device_stats netstats;
64 int activity;
65};
66
67enum { /* adapter flags */
68 FULL_INIT_DONE = (1 << 0),
69 USING_MSI = (1 << 1),
70 USING_MSIX = (1 << 2),
71 QUEUES_BOUND = (1 << 3),
72};
73
74struct rx_desc;
75struct rx_sw_desc;
76
77struct sge_fl { /* SGE per free-buffer list state */
78 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned int credits; /* # of available Rx buffers */
80 unsigned int size; /* capacity of free list */
81 unsigned int cidx; /* consumer index */
82 unsigned int pidx; /* producer index */
83 unsigned int gen; /* free list generation */
84 struct rx_desc *desc; /* address of HW Rx descriptor ring */
85 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
86 dma_addr_t phys_addr; /* physical address of HW ring start */
87 unsigned int cntxt_id; /* SGE context id for the free list */
88 unsigned long empty; /* # of times queue ran out of buffers */
89};
90
91/*
92 * Bundle size for grouping offload RX packets for delivery to the stack.
93 * Don't make this too big as we do prefetch on each packet in a bundle.
94 */
95# define RX_BUNDLE_SIZE 8
96
97struct rsp_desc;
98
99struct sge_rspq { /* state for an SGE response queue */
100 unsigned int credits; /* # of pending response credits */
101 unsigned int size; /* capacity of response queue */
102 unsigned int cidx; /* consumer index */
103 unsigned int gen; /* current generation bit */
104 unsigned int polling; /* is the queue serviced through NAPI? */
105 unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
106 unsigned int next_holdoff; /* holdoff time for next interrupt */
107 struct rsp_desc *desc; /* address of HW response ring */
108 dma_addr_t phys_addr; /* physical address of the ring */
109 unsigned int cntxt_id; /* SGE context id for the response q */
110 spinlock_t lock; /* guards response processing */
111 struct sk_buff *rx_head; /* offload packet receive queue head */
112 struct sk_buff *rx_tail; /* offload packet receive queue tail */
113
114 unsigned long offload_pkts;
115 unsigned long offload_bundles;
116 unsigned long eth_pkts; /* # of ethernet packets */
117 unsigned long pure_rsps; /* # of pure (non-data) responses */
118 unsigned long imm_data; /* responses with immediate data */
119 unsigned long rx_drops; /* # of packets dropped due to no mem */
120 unsigned long async_notif; /* # of asynchronous notification events */
121 unsigned long empty; /* # of times queue ran out of credits */
122 unsigned long nomem; /* # of responses deferred due to no mem */
123 unsigned long unhandled_irqs; /* # of spurious intrs */
124};
125
126struct tx_desc;
127struct tx_sw_desc;
128
129struct sge_txq { /* state for an SGE Tx queue */
130 unsigned long flags; /* HW DMA fetch status */
131 unsigned int in_use; /* # of in-use Tx descriptors */
132 unsigned int size; /* # of descriptors */
133 unsigned int processed; /* total # of descs HW has processed */
134 unsigned int cleaned; /* total # of descs SW has reclaimed */
135 unsigned int stop_thres; /* SW TX queue suspend threshold */
136 unsigned int cidx; /* consumer index */
137 unsigned int pidx; /* producer index */
138 unsigned int gen; /* current value of generation bit */
139 unsigned int unacked; /* Tx descriptors used since last COMPL */
140 struct tx_desc *desc; /* address of HW Tx descriptor ring */
141 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
142 spinlock_t lock; /* guards enqueueing of new packets */
143 unsigned int token; /* WR token */
144 dma_addr_t phys_addr; /* physical address of the ring */
145 struct sk_buff_head sendq; /* List of backpressured offload packets */
146 struct tasklet_struct qresume_tsk; /* restarts the queue */
147 unsigned int cntxt_id; /* SGE context id for the Tx q */
148 unsigned long stops; /* # of times q has been stopped */
149 unsigned long restarts; /* # of queue restarts */
150};
151
152enum { /* per port SGE statistics */
153 SGE_PSTAT_TSO, /* # of TSO requests */
154 SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
155 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
156 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
157 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
158
159 SGE_PSTAT_MAX /* must be last */
160};
161
162struct sge_qset { /* an SGE queue set */
163 struct sge_rspq rspq;
164 struct sge_fl fl[SGE_RXQ_PER_SET];
165 struct sge_txq txq[SGE_TXQ_PER_SET];
166 struct net_device *netdev; /* associated net device */
167 unsigned long txq_stopped; /* which Tx queues are stopped */
168 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
169 unsigned long port_stats[SGE_PSTAT_MAX];
170} ____cacheline_aligned;
171
172struct sge {
173 struct sge_qset qs[SGE_QSETS];
174 spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
175};
176
177struct adapter {
178 struct t3cdev tdev;
179 struct list_head adapter_list;
180 void __iomem *regs;
181 struct pci_dev *pdev;
182 unsigned long registered_device_map;
183 unsigned long open_device_map;
184 unsigned long flags;
185
186 const char *name;
187 int msg_enable;
188 unsigned int mmio_len;
189
190 struct adapter_params params;
191 unsigned int slow_intr_mask;
192 unsigned long irq_stats[IRQ_NUM_STATS];
193
194 struct {
195 unsigned short vec;
196 char desc[22];
197 } msix_info[SGE_QSETS + 1];
198
199 /* T3 modules */
200 struct sge sge;
201 struct mc7 pmrx;
202 struct mc7 pmtx;
203 struct mc7 cm;
204 struct mc5 mc5;
205
206 struct net_device *port[MAX_NPORTS];
207 unsigned int check_task_cnt;
208 struct delayed_work adap_check_task;
209 struct work_struct ext_intr_handler_task;
210
211 /*
212 * Dummy netdevices are needed when using multiple receive queues with
213 * NAPI as each netdevice can service only one queue.
214 */
215 struct net_device *dummy_netdev[SGE_QSETS - 1];
216
217 struct dentry *debugfs_root;
218
219 struct mutex mdio_lock;
220 spinlock_t stats_lock;
221 spinlock_t work_lock;
222};
223
224static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
225{
226 u32 val = readl(adapter->regs + reg_addr);
227
228 CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
229 return val;
230}
231
232static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
233{
234 CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
235 writel(val, adapter->regs + reg_addr);
236}
237
238static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
239{
240 return netdev_priv(adap->port[idx]);
241}
242
243/*
244 * We use the spare atalk_ptr to map a net device to its SGE queue set.
245 * This is a macro so it can be used as l-value.
246 */
247#define dev2qset(netdev) ((netdev)->atalk_ptr)
248
249#define OFFLOAD_DEVMAP_BIT 15
250
251#define tdev2adap(d) container_of(d, struct adapter, tdev)
252
253static inline int offload_running(struct adapter *adapter)
254{
255 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
256}
257
258int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
259
260void t3_os_ext_intr_handler(struct adapter *adapter);
261void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
262 int speed, int duplex, int fc);
263
264void t3_sge_start(struct adapter *adap);
265void t3_sge_stop(struct adapter *adap);
266void t3_free_sge_resources(struct adapter *adap);
267void t3_sge_err_intr_handler(struct adapter *adapter);
268intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
269int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
270int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
271void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
272int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
273 int irq_vec_idx, const struct qset_params *p,
274 int ntxq, struct net_device *netdev);
275int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
276 unsigned char *data);
277irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
278
279#endif /* __T3_ADAPTER_H__ */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
new file mode 100644
index 000000000000..73a41e6a5bfc
--- /dev/null
+++ b/drivers/net/cxgb3/ael1002.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 AEL100X_TX_DISABLE = 9,
37 AEL100X_TX_CONFIG1 = 0xc002,
38 AEL1002_PWR_DOWN_HI = 0xc011,
39 AEL1002_PWR_DOWN_LO = 0xc012,
40 AEL1002_XFI_EQL = 0xc015,
41 AEL1002_LB_EN = 0xc017,
42
43 LASI_CTRL = 0x9002,
44 LASI_STAT = 0x9005
45};
46
47static void ael100x_txon(struct cphy *phy)
48{
49 int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
50
51 msleep(100);
52 t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
53 msleep(30);
54}
55
56static int ael1002_power_down(struct cphy *phy, int enable)
57{
58 int err;
59
60 err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
61 if (!err)
62 err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
63 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
64 return err;
65}
66
67static int ael1002_reset(struct cphy *phy, int wait)
68{
69 int err;
70
71 if ((err = ael1002_power_down(phy, 0)) ||
72 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
73 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
74 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
75 (err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
76 (err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
77 0, 1 << 5)))
78 return err;
79 return 0;
80}
81
82static int ael1002_intr_noop(struct cphy *phy)
83{
84 return 0;
85}
86
87static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
88 int *speed, int *duplex, int *fc)
89{
90 if (link_ok) {
91 unsigned int status;
92 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
93
94 /*
95 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
96 * once more to get the current link state.
97 */
98 if (!err && !(status & BMSR_LSTATUS))
99 err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
100 &status);
101 if (err)
102 return err;
103 *link_ok = !!(status & BMSR_LSTATUS);
104 }
105 if (speed)
106 *speed = SPEED_10000;
107 if (duplex)
108 *duplex = DUPLEX_FULL;
109 return 0;
110}
111
112static struct cphy_ops ael1002_ops = {
113 .reset = ael1002_reset,
114 .intr_enable = ael1002_intr_noop,
115 .intr_disable = ael1002_intr_noop,
116 .intr_clear = ael1002_intr_noop,
117 .intr_handler = ael1002_intr_noop,
118 .get_link_status = ael100x_get_link_status,
119 .power_down = ael1002_power_down,
120};
121
122void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
123 int phy_addr, const struct mdio_ops *mdio_ops)
124{
125 cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
126 ael100x_txon(phy);
127}
128
129static int ael1006_reset(struct cphy *phy, int wait)
130{
131 return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
132}
133
134static int ael1006_intr_enable(struct cphy *phy)
135{
136 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
137}
138
139static int ael1006_intr_disable(struct cphy *phy)
140{
141 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
142}
143
144static int ael1006_intr_clear(struct cphy *phy)
145{
146 u32 val;
147
148 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
149}
150
151static int ael1006_intr_handler(struct cphy *phy)
152{
153 unsigned int status;
154 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
155
156 if (err)
157 return err;
158 return (status & 1) ? cphy_cause_link_change : 0;
159}
160
161static int ael1006_power_down(struct cphy *phy, int enable)
162{
163 return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
164 BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
165}
166
167static struct cphy_ops ael1006_ops = {
168 .reset = ael1006_reset,
169 .intr_enable = ael1006_intr_enable,
170 .intr_disable = ael1006_intr_disable,
171 .intr_clear = ael1006_intr_clear,
172 .intr_handler = ael1006_intr_handler,
173 .get_link_status = ael100x_get_link_status,
174 .power_down = ael1006_power_down,
175};
176
177void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
178 int phy_addr, const struct mdio_ops *mdio_ops)
179{
180 cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
181 ael100x_txon(phy);
182}
183
184static struct cphy_ops qt2045_ops = {
185 .reset = ael1006_reset,
186 .intr_enable = ael1006_intr_enable,
187 .intr_disable = ael1006_intr_disable,
188 .intr_clear = ael1006_intr_clear,
189 .intr_handler = ael1006_intr_handler,
190 .get_link_status = ael100x_get_link_status,
191 .power_down = ael1006_power_down,
192};
193
194void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
195 int phy_addr, const struct mdio_ops *mdio_ops)
196{
197 unsigned int stat;
198
199 cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
200
201 /*
202 * Some cards where the PHY is supposed to be at address 0 actually
203 * have it at 1.
204 */
205 if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
206 stat == 0xffff)
207 phy->addr = 1;
208}
209
210static int xaui_direct_reset(struct cphy *phy, int wait)
211{
212 return 0;
213}
214
215static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
216 int *speed, int *duplex, int *fc)
217{
218 if (link_ok) {
219 unsigned int status;
220
221 status = t3_read_reg(phy->adapter,
222 XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
223 *link_ok = !(status & F_LOWSIG0);
224 }
225 if (speed)
226 *speed = SPEED_10000;
227 if (duplex)
228 *duplex = DUPLEX_FULL;
229 return 0;
230}
231
232static int xaui_direct_power_down(struct cphy *phy, int enable)
233{
234 return 0;
235}
236
237static struct cphy_ops xaui_direct_ops = {
238 .reset = xaui_direct_reset,
239 .intr_enable = ael1002_intr_noop,
240 .intr_disable = ael1002_intr_noop,
241 .intr_clear = ael1002_intr_noop,
242 .intr_handler = ael1002_intr_noop,
243 .get_link_status = xaui_direct_get_link_status,
244 .power_down = xaui_direct_power_down,
245};
246
247void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
248 int phy_addr, const struct mdio_ops *mdio_ops)
249{
250 cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
251}
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
new file mode 100644
index 000000000000..e23deeb7d06d
--- /dev/null
+++ b/drivers/net/cxgb3/common.h
@@ -0,0 +1,729 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHELSIO_COMMON_H
33#define __CHELSIO_COMMON_H
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/ctype.h>
38#include <linux/delay.h>
39#include <linux/init.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include "version.h"
44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49
50/*
51 * More powerful macro that selectively prints messages based on msg_enable.
52 * For info and debugging messages.
53 */
54#define CH_MSG(adapter, level, category, fmt, ...) do { \
55 if ((adapter)->msg_enable & NETIF_MSG_##category) \
56 dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
57 ## __VA_ARGS__); \
58} while (0)
59
60#ifdef DEBUG
61# define CH_DBG(adapter, category, fmt, ...) \
62 CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
63#else
64# define CH_DBG(adapter, category, fmt, ...)
65#endif
66
67/* Additional NETIF_MSG_* categories */
68#define NETIF_MSG_MMIO 0x8000000
69
70struct t3_rx_mode {
71 struct net_device *dev;
72 struct dev_mc_list *mclist;
73 unsigned int idx;
74};
75
76static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
77 struct dev_mc_list *mclist)
78{
79 p->dev = dev;
80 p->mclist = mclist;
81 p->idx = 0;
82}
83
84static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
85{
86 u8 *addr = NULL;
87
88 if (rm->mclist && rm->idx < rm->dev->mc_count) {
89 addr = rm->mclist->dmi_addr;
90 rm->mclist = rm->mclist->next;
91 rm->idx++;
92 }
93 return addr;
94}
95
96enum {
97 MAX_NPORTS = 2, /* max # of ports */
98 MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
99 EEPROMSIZE = 8192, /* Serial EEPROM size */
100 RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */
104};
105
106#define MAX_RX_COALESCING_LEN 16224U
107
108enum {
109 PAUSE_RX = 1 << 0,
110 PAUSE_TX = 1 << 1,
111 PAUSE_AUTONEG = 1 << 2
112};
113
114enum {
115 SUPPORTED_OFFLOAD = 1 << 24,
116 SUPPORTED_IRQ = 1 << 25
117};
118
119enum { /* adapter interrupt-maintained statistics */
120 STAT_ULP_CH0_PBL_OOB,
121 STAT_ULP_CH1_PBL_OOB,
122 STAT_PCI_CORR_ECC,
123
124 IRQ_NUM_STATS /* keep last */
125};
126
127enum {
128 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
129 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
130 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
131};
132
133enum sge_context_type { /* SGE egress context types */
134 SGE_CNTXT_RDMA = 0,
135 SGE_CNTXT_ETH = 2,
136 SGE_CNTXT_OFLD = 4,
137 SGE_CNTXT_CTRL = 5
138};
139
140enum {
141 AN_PKT_SIZE = 32, /* async notification packet size */
142 IMMED_PKT_SIZE = 48 /* packet size for immediate data */
143};
144
145struct sg_ent { /* SGE scatter/gather entry */
146 u32 len[2];
147 u64 addr[2];
148};
149
150#ifndef SGE_NUM_GENBITS
151/* Must be 1 or 2 */
152# define SGE_NUM_GENBITS 2
153#endif
154
155#define TX_DESC_FLITS 16U
156#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
157
158struct cphy;
159struct adapter;
160
161struct mdio_ops {
162 int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
163 int reg_addr, unsigned int *val);
164 int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
165 int reg_addr, unsigned int val);
166};
167
168struct adapter_info {
169 unsigned char nports; /* # of ports */
170 unsigned char phy_base_addr; /* MDIO PHY base address */
171 unsigned char mdien;
172 unsigned char mdiinv;
173 unsigned int gpio_out; /* GPIO output settings */
174 unsigned int gpio_intr; /* GPIO IRQ enable mask */
175 unsigned long caps; /* adapter capabilities */
176 const struct mdio_ops *mdio_ops; /* MDIO operations */
177 const char *desc; /* product description */
178};
179
180struct port_type_info {
181 void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
182 int phy_addr, const struct mdio_ops *ops);
183 unsigned int caps;
184 const char *desc;
185};
186
187struct mc5_stats {
188 unsigned long parity_err;
189 unsigned long active_rgn_full;
190 unsigned long nfa_srch_err;
191 unsigned long unknown_cmd;
192 unsigned long reqq_parity_err;
193 unsigned long dispq_parity_err;
194 unsigned long del_act_empty;
195};
196
197struct mc7_stats {
198 unsigned long corr_err;
199 unsigned long uncorr_err;
200 unsigned long parity_err;
201 unsigned long addr_err;
202};
203
204struct mac_stats {
205 u64 tx_octets; /* total # of octets in good frames */
206 u64 tx_octets_bad; /* total # of octets in error frames */
207 u64 tx_frames; /* all good frames */
208 u64 tx_mcast_frames; /* good multicast frames */
209 u64 tx_bcast_frames; /* good broadcast frames */
210 u64 tx_pause; /* # of transmitted pause frames */
211 u64 tx_deferred; /* frames with deferred transmissions */
212 u64 tx_late_collisions; /* # of late collisions */
213 u64 tx_total_collisions; /* # of total collisions */
214 u64 tx_excess_collisions; /* frame errors from excessive collissions */
215 u64 tx_underrun; /* # of Tx FIFO underruns */
216 u64 tx_len_errs; /* # of Tx length errors */
217 u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
218 u64 tx_excess_deferral; /* # of frames with excessive deferral */
219 u64 tx_fcs_errs; /* # of frames with bad FCS */
220
221 u64 tx_frames_64; /* # of Tx frames in a particular range */
222 u64 tx_frames_65_127;
223 u64 tx_frames_128_255;
224 u64 tx_frames_256_511;
225 u64 tx_frames_512_1023;
226 u64 tx_frames_1024_1518;
227 u64 tx_frames_1519_max;
228
229 u64 rx_octets; /* total # of octets in good frames */
230 u64 rx_octets_bad; /* total # of octets in error frames */
231 u64 rx_frames; /* all good frames */
232 u64 rx_mcast_frames; /* good multicast frames */
233 u64 rx_bcast_frames; /* good broadcast frames */
234 u64 rx_pause; /* # of received pause frames */
235 u64 rx_fcs_errs; /* # of received frames with bad FCS */
236 u64 rx_align_errs; /* alignment errors */
237 u64 rx_symbol_errs; /* symbol errors */
238 u64 rx_data_errs; /* data errors */
239 u64 rx_sequence_errs; /* sequence errors */
240 u64 rx_runt; /* # of runt frames */
241 u64 rx_jabber; /* # of jabber frames */
242 u64 rx_short; /* # of short frames */
243 u64 rx_too_long; /* # of oversized frames */
244 u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
245
246 u64 rx_frames_64; /* # of Rx frames in a particular range */
247 u64 rx_frames_65_127;
248 u64 rx_frames_128_255;
249 u64 rx_frames_256_511;
250 u64 rx_frames_512_1023;
251 u64 rx_frames_1024_1518;
252 u64 rx_frames_1519_max;
253
254 u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
255
256 unsigned long tx_fifo_parity_err;
257 unsigned long rx_fifo_parity_err;
258 unsigned long tx_fifo_urun;
259 unsigned long rx_fifo_ovfl;
260 unsigned long serdes_signal_loss;
261 unsigned long xaui_pcs_ctc_err;
262 unsigned long xaui_pcs_align_change;
263};
264
265struct tp_mib_stats {
266 u32 ipInReceive_hi;
267 u32 ipInReceive_lo;
268 u32 ipInHdrErrors_hi;
269 u32 ipInHdrErrors_lo;
270 u32 ipInAddrErrors_hi;
271 u32 ipInAddrErrors_lo;
272 u32 ipInUnknownProtos_hi;
273 u32 ipInUnknownProtos_lo;
274 u32 ipInDiscards_hi;
275 u32 ipInDiscards_lo;
276 u32 ipInDelivers_hi;
277 u32 ipInDelivers_lo;
278 u32 ipOutRequests_hi;
279 u32 ipOutRequests_lo;
280 u32 ipOutDiscards_hi;
281 u32 ipOutDiscards_lo;
282 u32 ipOutNoRoutes_hi;
283 u32 ipOutNoRoutes_lo;
284 u32 ipReasmTimeout;
285 u32 ipReasmReqds;
286 u32 ipReasmOKs;
287 u32 ipReasmFails;
288
289 u32 reserved[8];
290
291 u32 tcpActiveOpens;
292 u32 tcpPassiveOpens;
293 u32 tcpAttemptFails;
294 u32 tcpEstabResets;
295 u32 tcpOutRsts;
296 u32 tcpCurrEstab;
297 u32 tcpInSegs_hi;
298 u32 tcpInSegs_lo;
299 u32 tcpOutSegs_hi;
300 u32 tcpOutSegs_lo;
301 u32 tcpRetransSeg_hi;
302 u32 tcpRetransSeg_lo;
303 u32 tcpInErrs_hi;
304 u32 tcpInErrs_lo;
305 u32 tcpRtoMin;
306 u32 tcpRtoMax;
307};
308
309struct tp_params {
310 unsigned int nchan; /* # of channels */
311 unsigned int pmrx_size; /* total PMRX capacity */
312 unsigned int pmtx_size; /* total PMTX capacity */
313 unsigned int cm_size; /* total CM capacity */
314 unsigned int chan_rx_size; /* per channel Rx size */
315 unsigned int chan_tx_size; /* per channel Tx size */
316 unsigned int rx_pg_size; /* Rx page size */
317 unsigned int tx_pg_size; /* Tx page size */
318 unsigned int rx_num_pgs; /* # of Rx pages */
319 unsigned int tx_num_pgs; /* # of Tx pages */
320 unsigned int ntimer_qs; /* # of timer queues */
321};
322
323struct qset_params { /* SGE queue set parameters */
324 unsigned int polling; /* polling/interrupt service for rspq */
325 unsigned int coalesce_usecs; /* irq coalescing timer */
326 unsigned int rspq_size; /* # of entries in response queue */
327 unsigned int fl_size; /* # of entries in regular free list */
328 unsigned int jumbo_size; /* # of entries in jumbo free list */
329 unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
330 unsigned int cong_thres; /* FL congestion threshold */
331};
332
333struct sge_params {
334 unsigned int max_pkt_size; /* max offload pkt size */
335 struct qset_params qset[SGE_QSETS];
336};
337
338struct mc5_params {
339 unsigned int mode; /* selects MC5 width */
340 unsigned int nservers; /* size of server region */
341 unsigned int nfilters; /* size of filter region */
342 unsigned int nroutes; /* size of routing region */
343};
344
345/* Default MC5 region sizes */
346enum {
347 DEFAULT_NSERVERS = 512,
348 DEFAULT_NFILTERS = 128
349};
350
351/* MC5 modes, these must be non-0 */
352enum {
353 MC5_MODE_144_BIT = 1,
354 MC5_MODE_72_BIT = 2
355};
356
357struct vpd_params {
358 unsigned int cclk;
359 unsigned int mclk;
360 unsigned int uclk;
361 unsigned int mdc;
362 unsigned int mem_timing;
363 u8 eth_base[6];
364 u8 port_type[MAX_NPORTS];
365 unsigned short xauicfg[2];
366};
367
368struct pci_params {
369 unsigned int vpd_cap_addr;
370 unsigned int pcie_cap_addr;
371 unsigned short speed;
372 unsigned char width;
373 unsigned char variant;
374};
375
376enum {
377 PCI_VARIANT_PCI,
378 PCI_VARIANT_PCIX_MODE1_PARITY,
379 PCI_VARIANT_PCIX_MODE1_ECC,
380 PCI_VARIANT_PCIX_266_MODE2,
381 PCI_VARIANT_PCIE
382};
383
384struct adapter_params {
385 struct sge_params sge;
386 struct mc5_params mc5;
387 struct tp_params tp;
388 struct vpd_params vpd;
389 struct pci_params pci;
390
391 const struct adapter_info *info;
392
393 unsigned short mtus[NMTUS];
394 unsigned short a_wnd[NCCTRL_WIN];
395 unsigned short b_wnd[NCCTRL_WIN];
396
397 unsigned int nports; /* # of ethernet ports */
398 unsigned int stats_update_period; /* MAC stats accumulation period */
399 unsigned int linkpoll_period; /* link poll period in 0.1s */
400 unsigned int rev; /* chip revision */
401};
402
403struct trace_params {
404 u32 sip;
405 u32 sip_mask;
406 u32 dip;
407 u32 dip_mask;
408 u16 sport;
409 u16 sport_mask;
410 u16 dport;
411 u16 dport_mask;
412 u32 vlan:12;
413 u32 vlan_mask:12;
414 u32 intf:4;
415 u32 intf_mask:4;
416 u8 proto;
417 u8 proto_mask;
418};
419
420struct link_config {
421 unsigned int supported; /* link capabilities */
422 unsigned int advertising; /* advertised capabilities */
423 unsigned short requested_speed; /* speed user has requested */
424 unsigned short speed; /* actual link speed */
425 unsigned char requested_duplex; /* duplex user has requested */
426 unsigned char duplex; /* actual link duplex */
427 unsigned char requested_fc; /* flow control user has requested */
428 unsigned char fc; /* actual link flow control */
429 unsigned char autoneg; /* autonegotiating? */
430 unsigned int link_ok; /* link up? */
431};
432
433#define SPEED_INVALID 0xffff
434#define DUPLEX_INVALID 0xff
435
436struct mc5 {
437 struct adapter *adapter;
438 unsigned int tcam_size;
439 unsigned char part_type;
440 unsigned char parity_enabled;
441 unsigned char mode;
442 struct mc5_stats stats;
443};
444
445static inline unsigned int t3_mc5_size(const struct mc5 *p)
446{
447 return p->tcam_size;
448}
449
450struct mc7 {
451 struct adapter *adapter; /* backpointer to adapter */
452 unsigned int size; /* memory size in bytes */
453 unsigned int width; /* MC7 interface width */
454 unsigned int offset; /* register address offset for MC7 instance */
455 const char *name; /* name of MC7 instance */
456 struct mc7_stats stats; /* MC7 statistics */
457};
458
459static inline unsigned int t3_mc7_size(const struct mc7 *p)
460{
461 return p->size;
462}
463
464struct cmac {
465 struct adapter *adapter;
466 unsigned int offset;
467 unsigned int nucast; /* # of address filters for unicast MACs */
468 struct mac_stats stats;
469};
470
471enum {
472 MAC_DIRECTION_RX = 1,
473 MAC_DIRECTION_TX = 2,
474 MAC_RXFIFO_SIZE = 32768
475};
476
477/* IEEE 802.3ae specified MDIO devices */
478enum {
479 MDIO_DEV_PMA_PMD = 1,
480 MDIO_DEV_WIS = 2,
481 MDIO_DEV_PCS = 3,
482 MDIO_DEV_XGXS = 4
483};
484
485/* PHY loopback direction */
486enum {
487 PHY_LOOPBACK_TX = 1,
488 PHY_LOOPBACK_RX = 2
489};
490
491/* PHY interrupt types */
492enum {
493 cphy_cause_link_change = 1,
494 cphy_cause_fifo_error = 2
495};
496
497/* PHY operations */
498struct cphy_ops {
499 void (*destroy)(struct cphy *phy);
500 int (*reset)(struct cphy *phy, int wait);
501
502 int (*intr_enable)(struct cphy *phy);
503 int (*intr_disable)(struct cphy *phy);
504 int (*intr_clear)(struct cphy *phy);
505 int (*intr_handler)(struct cphy *phy);
506
507 int (*autoneg_enable)(struct cphy *phy);
508 int (*autoneg_restart)(struct cphy *phy);
509
510 int (*advertise)(struct cphy *phy, unsigned int advertise_map);
511 int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
512 int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
513 int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
514 int *duplex, int *fc);
515 int (*power_down)(struct cphy *phy, int enable);
516};
517
518/* A PHY instance */
519struct cphy {
520 int addr; /* PHY address */
521 struct adapter *adapter; /* associated adapter */
522 unsigned long fifo_errors; /* FIFO over/under-flows */
523 const struct cphy_ops *ops; /* PHY operations */
524 int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
525 int reg_addr, unsigned int *val);
526 int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
527 int reg_addr, unsigned int val);
528};
529
530/* Convenience MDIO read/write wrappers */
531static inline int mdio_read(struct cphy *phy, int mmd, int reg,
532 unsigned int *valp)
533{
534 return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
535}
536
537static inline int mdio_write(struct cphy *phy, int mmd, int reg,
538 unsigned int val)
539{
540 return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
541}
542
543/* Convenience initializer */
544static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
545 int phy_addr, struct cphy_ops *phy_ops,
546 const struct mdio_ops *mdio_ops)
547{
548 phy->adapter = adapter;
549 phy->addr = phy_addr;
550 phy->ops = phy_ops;
551 if (mdio_ops) {
552 phy->mdio_read = mdio_ops->read;
553 phy->mdio_write = mdio_ops->write;
554 }
555}
556
557/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
558#define MAC_STATS_ACCUM_SECS 180
559
560#define XGM_REG(reg_addr, idx) \
561 ((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
562
563struct addr_val_pair {
564 unsigned int reg_addr;
565 unsigned int val;
566};
567
568#include "adapter.h"
569
570#ifndef PCI_VENDOR_ID_CHELSIO
571# define PCI_VENDOR_ID_CHELSIO 0x1425
572#endif
573
574#define for_each_port(adapter, iter) \
575 for (iter = 0; iter < (adapter)->params.nports; ++iter)
576
577#define adapter_info(adap) ((adap)->params.info)
578
579static inline int uses_xaui(const struct adapter *adap)
580{
581 return adapter_info(adap)->caps & SUPPORTED_AUI;
582}
583
584static inline int is_10G(const struct adapter *adap)
585{
586 return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
587}
588
589static inline int is_offload(const struct adapter *adap)
590{
591 return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
592}
593
594static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
595{
596 return adap->params.vpd.cclk / 1000;
597}
598
599static inline unsigned int is_pcie(const struct adapter *adap)
600{
601 return adap->params.pci.variant == PCI_VARIANT_PCIE;
602}
603
604void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
605 u32 val);
606void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
607 int n, unsigned int offset);
608int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
609 int polarity, int attempts, int delay, u32 *valp);
610static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
611 int polarity, int attempts, int delay)
612{
613 return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
614 delay, NULL);
615}
616int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
617 unsigned int set);
618int t3_phy_reset(struct cphy *phy, int mmd, int wait);
619int t3_phy_advertise(struct cphy *phy, unsigned int advert);
620int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
621
622void t3_intr_enable(struct adapter *adapter);
623void t3_intr_disable(struct adapter *adapter);
624void t3_intr_clear(struct adapter *adapter);
625void t3_port_intr_enable(struct adapter *adapter, int idx);
626void t3_port_intr_disable(struct adapter *adapter, int idx);
627void t3_port_intr_clear(struct adapter *adapter, int idx);
628int t3_slow_intr_handler(struct adapter *adapter);
629int t3_phy_intr_handler(struct adapter *adapter);
630
631void t3_link_changed(struct adapter *adapter, int port_id);
632int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
633const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
634int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
635int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
636int t3_seeprom_wp(struct adapter *adapter, int enable);
637int t3_read_flash(struct adapter *adapter, unsigned int addr,
638 unsigned int nwords, u32 *data, int byte_oriented);
639int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
640int t3_get_fw_version(struct adapter *adapter, u32 *vers);
641int t3_check_fw_version(struct adapter *adapter);
642int t3_init_hw(struct adapter *adapter, u32 fw_params);
643void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
644void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
645int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
646 int reset);
647void t3_led_ready(struct adapter *adapter);
648void t3_fatal_err(struct adapter *adapter);
649void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
650void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
651 const u8 * cpus, const u16 *rspq);
652int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
653int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
654int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
655 unsigned int n, unsigned int *valp);
656int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
657 u64 *buf);
658
659int t3_mac_reset(struct cmac *mac);
660void t3b_pcs_reset(struct cmac *mac);
661int t3_mac_enable(struct cmac *mac, int which);
662int t3_mac_disable(struct cmac *mac, int which);
663int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
664int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
665int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
666int t3_mac_set_num_ucast(struct cmac *mac, int n);
667const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
668int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
669
670void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
671int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
672 unsigned int nroutes);
673void t3_mc5_intr_handler(struct mc5 *mc5);
674int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
675 u32 *buf);
676
677int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
678void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
679void t3_tp_set_offload_mode(struct adapter *adap, int enable);
680void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
681void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
682 unsigned short alpha[NCCTRL_WIN],
683 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
684void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
685void t3_get_cong_cntl_tab(struct adapter *adap,
686 unsigned short incr[NMTUS][NCCTRL_WIN]);
687void t3_config_trace_filter(struct adapter *adapter,
688 const struct trace_params *tp, int filter_index,
689 int invert, int enable);
690int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
691
692void t3_sge_prep(struct adapter *adap, struct sge_params *p);
693void t3_sge_init(struct adapter *adap, struct sge_params *p);
694int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
695 enum sge_context_type type, int respq, u64 base_addr,
696 unsigned int size, unsigned int token, int gen,
697 unsigned int cidx);
698int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
699 int gts_enable, u64 base_addr, unsigned int size,
700 unsigned int esize, unsigned int cong_thres, int gen,
701 unsigned int cidx);
702int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
703 int irq_vec_idx, u64 base_addr, unsigned int size,
704 unsigned int fl_thres, int gen, unsigned int cidx);
705int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
706 unsigned int size, int rspq, int ovfl_mode,
707 unsigned int credits, unsigned int credit_thres);
708int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
709int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
710int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
711int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
712int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
713int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
714int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
715int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
716int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
717 unsigned int credits);
718
719void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
720 int phy_addr, const struct mdio_ops *mdio_ops);
721void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
722 int phy_addr, const struct mdio_ops *mdio_ops);
723void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
724 int phy_addr, const struct mdio_ops *mdio_ops);
725void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
726 const struct mdio_ops *mdio_ops);
727void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
728 int phy_addr, const struct mdio_ops *mdio_ops);
729#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
new file mode 100644
index 000000000000..2095ddacff78
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
33#define _CXGB3_OFFLOAD_CTL_DEFS_H
34
35enum {
36 GET_MAX_OUTSTANDING_WR,
37 GET_TX_MAX_CHUNK,
38 GET_TID_RANGE,
39 GET_STID_RANGE,
40 GET_RTBL_RANGE,
41 GET_L2T_CAPACITY,
42 GET_MTUS,
43 GET_WR_LEN,
44 GET_IFF_FROM_MAC,
45 GET_DDP_PARAMS,
46 GET_PORTS,
47
48 ULP_ISCSI_GET_PARAMS,
49 ULP_ISCSI_SET_PARAMS,
50
51 RDMA_GET_PARAMS,
52 RDMA_CQ_OP,
53 RDMA_CQ_SETUP,
54 RDMA_CQ_DISABLE,
55 RDMA_CTRL_QP_SETUP,
56 RDMA_GET_MEM,
57};
58
59/*
60 * Structure used to describe a TID range. Valid TIDs are [base, base+num).
61 */
62struct tid_range {
63 unsigned int base; /* first TID */
64 unsigned int num; /* number of TIDs in range */
65};
66
67/*
68 * Structure used to request the size and contents of the MTU table.
69 */
70struct mtutab {
71 unsigned int size; /* # of entries in the MTU table */
72 const unsigned short *mtus; /* the MTU table values */
73};
74
75struct net_device;
76
77/*
78 * Structure used to request the adapter net_device owning a given MAC address.
79 */
80struct iff_mac {
81 struct net_device *dev; /* the net_device */
82 const unsigned char *mac_addr; /* MAC address to lookup */
83 u16 vlan_tag;
84};
85
86struct pci_dev;
87
88/*
89 * Structure used to request the TCP DDP parameters.
90 */
91struct ddp_params {
92 unsigned int llimit; /* TDDP region start address */
93 unsigned int ulimit; /* TDDP region end address */
94 unsigned int tag_mask; /* TDDP tag mask */
95 struct pci_dev *pdev;
96};
97
98struct adap_ports {
99 unsigned int nports; /* number of ports on this adapter */
100 struct net_device *lldevs[2];
101};
102
103/*
104 * Structure used to return information to the iscsi layer.
105 */
106struct ulp_iscsi_info {
107 unsigned int offset;
108 unsigned int llimit;
109 unsigned int ulimit;
110 unsigned int tagmask;
111 unsigned int pgsz3;
112 unsigned int pgsz2;
113 unsigned int pgsz1;
114 unsigned int pgsz0;
115 unsigned int max_rxsz;
116 unsigned int max_txsz;
117 struct pci_dev *pdev;
118};
119
120/*
121 * Structure used to return information to the RDMA layer.
122 */
123struct rdma_info {
124 unsigned int tpt_base; /* TPT base address */
125 unsigned int tpt_top; /* TPT last entry address */
126 unsigned int pbl_base; /* PBL base address */
127 unsigned int pbl_top; /* PBL last entry address */
128 unsigned int rqt_base; /* RQT base address */
129 unsigned int rqt_top; /* RQT last entry address */
130 unsigned int udbell_len; /* user doorbell region length */
131 unsigned long udbell_physbase; /* user doorbell physical start addr */
132 void __iomem *kdb_addr; /* kernel doorbell register address */
133 struct pci_dev *pdev; /* associated PCI device */
134};
135
136/*
137 * Structure used to request an operation on an RDMA completion queue.
138 */
139struct rdma_cq_op {
140 unsigned int id;
141 unsigned int op;
142 unsigned int credits;
143};
144
145/*
146 * Structure used to setup RDMA completion queues.
147 */
148struct rdma_cq_setup {
149 unsigned int id;
150 unsigned long long base_addr;
151 unsigned int size;
152 unsigned int credits;
153 unsigned int credit_thres;
154 unsigned int ovfl_mode;
155};
156
157/*
158 * Structure used to setup the RDMA control egress context.
159 */
160struct rdma_ctrlqp_setup {
161 unsigned long long base_addr;
162 unsigned int size;
163};
164#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
new file mode 100644
index 000000000000..16e004990c59
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_DEFS_H
34#define _CHELSIO_DEFS_H
35
36#include <linux/skbuff.h>
37#include <net/tcp.h>
38
39#include "t3cdev.h"
40
41#include "cxgb3_offload.h"
42
43#define VALIDATE_TID 1
44
45void *cxgb_alloc_mem(unsigned long size);
46void cxgb_free_mem(void *addr);
47void cxgb_neigh_update(struct neighbour *neigh);
48void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
49
50/*
51 * Map an ATID or STID to their entries in the corresponding TID tables.
52 */
53static inline union active_open_entry *atid2entry(const struct tid_info *t,
54 unsigned int atid)
55{
56 return &t->atid_tab[atid - t->atid_base];
57}
58
59static inline union listen_entry *stid2entry(const struct tid_info *t,
60 unsigned int stid)
61{
62 return &t->stid_tab[stid - t->stid_base];
63}
64
65/*
66 * Find the connection corresponding to a TID.
67 */
68static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
69 unsigned int tid)
70{
71 return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
72}
73
74/*
75 * Find the connection corresponding to a server TID.
76 */
77static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
78 unsigned int tid)
79{
80 if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
81 return NULL;
82 return &(stid2entry(t, tid)->t3c_tid);
83}
84
85/*
86 * Find the connection corresponding to an active-open TID.
87 */
88static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
89 unsigned int tid)
90{
91 if (tid < t->atid_base || tid >= t->atid_base + t->natids)
92 return NULL;
93 return &(atid2entry(t, tid)->t3c_tid);
94}
95
96int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
97int attach_t3cdev(struct t3cdev *dev);
98void detach_t3cdev(struct t3cdev *dev);
99#endif
diff --git a/drivers/net/cxgb3/cxgb3_ioctl.h b/drivers/net/cxgb3/cxgb3_ioctl.h
new file mode 100644
index 000000000000..a94281861a66
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_ioctl.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef __CHIOCTL_H__
33#define __CHIOCTL_H__
34
35/*
36 * Ioctl commands specific to this driver.
37 */
38enum {
39 CHELSIO_SETREG = 1024,
40 CHELSIO_GETREG,
41 CHELSIO_SETTPI,
42 CHELSIO_GETTPI,
43 CHELSIO_GETMTUTAB,
44 CHELSIO_SETMTUTAB,
45 CHELSIO_GETMTU,
46 CHELSIO_SET_PM,
47 CHELSIO_GET_PM,
48 CHELSIO_GET_TCAM,
49 CHELSIO_SET_TCAM,
50 CHELSIO_GET_TCB,
51 CHELSIO_GET_MEM,
52 CHELSIO_LOAD_FW,
53 CHELSIO_GET_PROTO,
54 CHELSIO_SET_PROTO,
55 CHELSIO_SET_TRACE_FILTER,
56 CHELSIO_SET_QSET_PARAMS,
57 CHELSIO_GET_QSET_PARAMS,
58 CHELSIO_SET_QSET_NUM,
59 CHELSIO_GET_QSET_NUM,
60 CHELSIO_SET_PKTSCHED,
61};
62
63struct ch_reg {
64 uint32_t cmd;
65 uint32_t addr;
66 uint32_t val;
67};
68
69struct ch_cntxt {
70 uint32_t cmd;
71 uint32_t cntxt_type;
72 uint32_t cntxt_id;
73 uint32_t data[4];
74};
75
76/* context types */
77enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
78
79struct ch_desc {
80 uint32_t cmd;
81 uint32_t queue_num;
82 uint32_t idx;
83 uint32_t size;
84 uint8_t data[128];
85};
86
87struct ch_mem_range {
88 uint32_t cmd;
89 uint32_t mem_id;
90 uint32_t addr;
91 uint32_t len;
92 uint32_t version;
93 uint8_t buf[0];
94};
95
96struct ch_qset_params {
97 uint32_t cmd;
98 uint32_t qset_idx;
99 int32_t txq_size[3];
100 int32_t rspq_size;
101 int32_t fl_size[2];
102 int32_t intr_lat;
103 int32_t polling;
104 int32_t cong_thres;
105};
106
107struct ch_pktsched_params {
108 uint32_t cmd;
109 uint8_t sched;
110 uint8_t idx;
111 uint8_t min;
112 uint8_t max;
113 uint8_t binding;
114};
115
116#ifndef TCB_SIZE
117# define TCB_SIZE 128
118#endif
119
120/* TCB size in 32-bit words */
121#define TCB_WORDS (TCB_SIZE / 4)
122
123enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
124
125struct ch_mtus {
126 uint32_t cmd;
127 uint32_t nmtus;
128 uint16_t mtus[NMTUS];
129};
130
131struct ch_pm {
132 uint32_t cmd;
133 uint32_t tx_pg_sz;
134 uint32_t tx_num_pg;
135 uint32_t rx_pg_sz;
136 uint32_t rx_num_pg;
137 uint32_t pm_total;
138};
139
140struct ch_tcam {
141 uint32_t cmd;
142 uint32_t tcam_size;
143 uint32_t nservers;
144 uint32_t nroutes;
145 uint32_t nfilters;
146};
147
148struct ch_tcb {
149 uint32_t cmd;
150 uint32_t tcb_index;
151 uint32_t tcb_data[TCB_WORDS];
152};
153
154struct ch_tcam_word {
155 uint32_t cmd;
156 uint32_t addr;
157 uint32_t buf[3];
158};
159
160struct ch_trace {
161 uint32_t cmd;
162 uint32_t sip;
163 uint32_t sip_mask;
164 uint32_t dip;
165 uint32_t dip_mask;
166 uint16_t sport;
167 uint16_t sport_mask;
168 uint16_t dport;
169 uint16_t dport_mask;
170 uint32_t vlan:12;
171 uint32_t vlan_mask:12;
172 uint32_t intf:4;
173 uint32_t intf_mask:4;
174 uint8_t proto;
175 uint8_t proto_mask;
176 uint8_t invert_match:1;
177 uint8_t config_tx:1;
178 uint8_t config_rx:1;
179 uint8_t trace_tx:1;
180 uint8_t trace_rx:1;
181};
182
183#define SIOCCHIOCTL SIOCDEVPRIVATE
184
185#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
new file mode 100644
index 000000000000..c67f7d3c2f92
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -0,0 +1,2519 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/mii.h>
41#include <linux/sockios.h>
42#include <linux/workqueue.h>
43#include <linux/proc_fs.h>
44#include <linux/rtnetlink.h>
45#include <asm/uaccess.h>
46
47#include "common.h"
48#include "cxgb3_ioctl.h"
49#include "regs.h"
50#include "cxgb3_offload.h"
51#include "version.h"
52
53#include "cxgb3_ctl_defs.h"
54#include "t3_cpl.h"
55#include "firmware_exports.h"
56
57enum {
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
63 MIN_TXQ_ENTRIES = 4,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
66 MIN_FL_ENTRIES = 32
67};
68
69#define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75#define EEPROM_MAGIC 0x38E2F10C
76
77#define CH_DEVICE(devid, ssid, idx) \
78 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
79
80static const struct pci_device_id cxgb3_pci_tbl[] = {
81 CH_DEVICE(0x20, 1, 0), /* PE9000 */
82 CH_DEVICE(0x21, 1, 1), /* T302E */
83 CH_DEVICE(0x22, 1, 2), /* T310E */
84 CH_DEVICE(0x23, 1, 3), /* T320X */
85 CH_DEVICE(0x24, 1, 1), /* T302X */
86 CH_DEVICE(0x25, 1, 3), /* T320E */
87 CH_DEVICE(0x26, 1, 2), /* T310X */
88 CH_DEVICE(0x30, 1, 2), /* T3B10 */
89 CH_DEVICE(0x31, 1, 3), /* T3B20 */
90 CH_DEVICE(0x32, 1, 1), /* T3B02 */
91 {0,}
92};
93
94MODULE_DESCRIPTION(DRV_DESC);
95MODULE_AUTHOR("Chelsio Communications");
96MODULE_LICENSE("Dual BSD/GPL");
97MODULE_VERSION(DRV_VERSION);
98MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
99
100static int dflt_msg_enable = DFLT_MSG_ENABLE;
101
102module_param(dflt_msg_enable, int, 0644);
103MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
104
105/*
106 * The driver uses the best interrupt scheme available on a platform in the
107 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
108 * of these schemes the driver may consider as follows:
109 *
110 * msi = 2: choose from among all three options
111 * msi = 1: only consider MSI and pin interrupts
112 * msi = 0: force pin interrupts
113 */
114static int msi = 2;
115
116module_param(msi, int, 0644);
117MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
118
119/*
120 * The driver enables offload as a default.
121 * To disable it, use ofld_disable = 1.
122 */
123
124static int ofld_disable = 0;
125
126module_param(ofld_disable, int, 0644);
127MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
128
129/*
130 * We have work elements that we need to cancel when an interface is taken
131 * down. Normally the work elements would be executed by keventd but that
132 * can deadlock because of linkwatch. If our close method takes the rtnl
133 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135 * for our work to complete. Get our own work queue to solve this.
136 */
137static struct workqueue_struct *cxgb3_wq;
138
139/**
140 * link_report - show link status and link speed/duplex
141 * @p: the port whose settings are to be reported
142 *
143 * Shows the link status, speed, and duplex of a port.
144 */
145static void link_report(struct net_device *dev)
146{
147 if (!netif_carrier_ok(dev))
148 printk(KERN_INFO "%s: link down\n", dev->name);
149 else {
150 const char *s = "10Mbps";
151 const struct port_info *p = netdev_priv(dev);
152
153 switch (p->link_config.speed) {
154 case SPEED_10000:
155 s = "10Gbps";
156 break;
157 case SPEED_1000:
158 s = "1000Mbps";
159 break;
160 case SPEED_100:
161 s = "100Mbps";
162 break;
163 }
164
165 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
167 }
168}
169
170/**
171 * t3_os_link_changed - handle link status changes
172 * @adapter: the adapter associated with the link change
173 * @port_id: the port index whose limk status has changed
174 * @link_stat: the new status of the link
175 * @speed: the new speed setting
176 * @duplex: the new duplex setting
177 * @pause: the new flow-control setting
178 *
179 * This is the OS-dependent handler for link status changes. The OS
180 * neutral handler takes care of most of the processing for these events,
181 * then calls this handler for any OS-specific processing.
182 */
183void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause)
185{
186 struct net_device *dev = adapter->port[port_id];
187
188 /* Skip changes from disabled ports. */
189 if (!netif_running(dev))
190 return;
191
192 if (link_stat != netif_carrier_ok(dev)) {
193 if (link_stat)
194 netif_carrier_on(dev);
195 else
196 netif_carrier_off(dev);
197 link_report(dev);
198 }
199}
200
201static void cxgb_set_rxmode(struct net_device *dev)
202{
203 struct t3_rx_mode rm;
204 struct port_info *pi = netdev_priv(dev);
205
206 init_rx_mode(&rm, dev, dev->mc_list);
207 t3_mac_set_rx_mode(&pi->mac, &rm);
208}
209
210/**
211 * link_start - enable a port
212 * @dev: the device to enable
213 *
214 * Performs the MAC and PHY actions needed to enable a port.
215 */
216static void link_start(struct net_device *dev)
217{
218 struct t3_rx_mode rm;
219 struct port_info *pi = netdev_priv(dev);
220 struct cmac *mac = &pi->mac;
221
222 init_rx_mode(&rm, dev, dev->mc_list);
223 t3_mac_reset(mac);
224 t3_mac_set_mtu(mac, dev->mtu);
225 t3_mac_set_address(mac, 0, dev->dev_addr);
226 t3_mac_set_rx_mode(mac, &rm);
227 t3_link_start(&pi->phy, mac, &pi->link_config);
228 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
229}
230
231static inline void cxgb_disable_msi(struct adapter *adapter)
232{
233 if (adapter->flags & USING_MSIX) {
234 pci_disable_msix(adapter->pdev);
235 adapter->flags &= ~USING_MSIX;
236 } else if (adapter->flags & USING_MSI) {
237 pci_disable_msi(adapter->pdev);
238 adapter->flags &= ~USING_MSI;
239 }
240}
241
242/*
243 * Interrupt handler for asynchronous events used with MSI-X.
244 */
245static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
246{
247 t3_slow_intr_handler(cookie);
248 return IRQ_HANDLED;
249}
250
251/*
252 * Name the MSI-X interrupts.
253 */
254static void name_msix_vecs(struct adapter *adap)
255{
256 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
257
258 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259 adap->msix_info[0].desc[n] = 0;
260
261 for_each_port(adap, j) {
262 struct net_device *d = adap->port[j];
263 const struct port_info *pi = netdev_priv(d);
264
265 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266 snprintf(adap->msix_info[msi_idx].desc, n,
267 "%s (queue %d)", d->name, i);
268 adap->msix_info[msi_idx].desc[n] = 0;
269 }
270 }
271}
272
273static int request_msix_data_irqs(struct adapter *adap)
274{
275 int i, j, err, qidx = 0;
276
277 for_each_port(adap, i) {
278 int nqsets = adap2pinfo(adap, i)->nqsets;
279
280 for (j = 0; j < nqsets; ++j) {
281 err = request_irq(adap->msix_info[qidx + 1].vec,
282 t3_intr_handler(adap,
283 adap->sge.qs[qidx].
284 rspq.polling), 0,
285 adap->msix_info[qidx + 1].desc,
286 &adap->sge.qs[qidx]);
287 if (err) {
288 while (--qidx >= 0)
289 free_irq(adap->msix_info[qidx + 1].vec,
290 &adap->sge.qs[qidx]);
291 return err;
292 }
293 qidx++;
294 }
295 }
296 return 0;
297}
298
299/**
300 * setup_rss - configure RSS
301 * @adap: the adapter
302 *
303 * Sets up RSS to distribute packets to multiple receive queues. We
304 * configure the RSS CPU lookup table to distribute to the number of HW
305 * receive queues, and the response queue lookup table to narrow that
306 * down to the response queues actually configured for each port.
307 * We always configure the RSS mapping for two ports since the mapping
308 * table has plenty of entries.
309 */
310static void setup_rss(struct adapter *adap)
311{
312 int i;
313 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315 u8 cpus[SGE_QSETS + 1];
316 u16 rspq_map[RSS_TABLE_SIZE];
317
318 for (i = 0; i < SGE_QSETS; ++i)
319 cpus[i] = i;
320 cpus[SGE_QSETS] = 0xff; /* terminator */
321
322 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323 rspq_map[i] = i % nq0;
324 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
325 }
326
327 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329 V_RRCPLCPUSIZE(6), cpus, rspq_map);
330}
331
332/*
333 * If we have multiple receive queues per port serviced by NAPI we need one
334 * netdevice per queue as NAPI operates on netdevices. We already have one
335 * netdevice, namely the one associated with the interface, so we use dummy
336 * ones for any additional queues. Note that these netdevices exist purely
337 * so that NAPI has something to work with, they do not represent network
338 * ports and are not registered.
339 */
340static int init_dummy_netdevs(struct adapter *adap)
341{
342 int i, j, dummy_idx = 0;
343 struct net_device *nd;
344
345 for_each_port(adap, i) {
346 struct net_device *dev = adap->port[i];
347 const struct port_info *pi = netdev_priv(dev);
348
349 for (j = 0; j < pi->nqsets - 1; j++) {
350 if (!adap->dummy_netdev[dummy_idx]) {
351 nd = alloc_netdev(0, "", ether_setup);
352 if (!nd)
353 goto free_all;
354
355 nd->priv = adap;
356 nd->weight = 64;
357 set_bit(__LINK_STATE_START, &nd->state);
358 adap->dummy_netdev[dummy_idx] = nd;
359 }
360 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
361 dummy_idx++;
362 }
363 }
364 return 0;
365
366free_all:
367 while (--dummy_idx >= 0) {
368 free_netdev(adap->dummy_netdev[dummy_idx]);
369 adap->dummy_netdev[dummy_idx] = NULL;
370 }
371 return -ENOMEM;
372}
373
374/*
375 * Wait until all NAPI handlers are descheduled. This includes the handlers of
376 * both netdevices representing interfaces and the dummy ones for the extra
377 * queues.
378 */
379static void quiesce_rx(struct adapter *adap)
380{
381 int i;
382 struct net_device *dev;
383
384 for_each_port(adap, i) {
385 dev = adap->port[i];
386 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
387 msleep(1);
388 }
389
390 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391 dev = adap->dummy_netdev[i];
392 if (dev)
393 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
394 msleep(1);
395 }
396}
397
398/**
399 * setup_sge_qsets - configure SGE Tx/Rx/response queues
400 * @adap: the adapter
401 *
402 * Determines how many sets of SGE queues to use and initializes them.
403 * We support multiple queue sets per port if we have MSI-X, otherwise
404 * just one queue set per port.
405 */
406static int setup_sge_qsets(struct adapter *adap)
407{
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
410
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
412 irq_idx = -1;
413
414 for_each_port(adap, i) {
415 struct net_device *dev = adap->port[i];
416 const struct port_info *pi = netdev_priv(dev);
417
418 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419 err = t3_sge_alloc_qset(adap, qset_idx, 1,
420 (adap->flags & USING_MSIX) ? qset_idx + 1 :
421 irq_idx,
422 &adap->params.sge.qset[qset_idx], ntxq,
423 j == 0 ? dev :
424 adap-> dummy_netdev[dummy_dev_idx++]);
425 if (err) {
426 t3_free_sge_resources(adap);
427 return err;
428 }
429 }
430 }
431
432 return 0;
433}
434
435static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436 char *buf,
437 ssize_t(*format) (struct adapter *, char *))
438{
439 ssize_t len;
440 struct adapter *adap = to_net_dev(d)->priv;
441
442 /* Synchronize with ioctls that may shut down the device */
443 rtnl_lock();
444 len = (*format) (adap, buf);
445 rtnl_unlock();
446 return len;
447}
448
449static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
453{
454 char *endp;
455 ssize_t ret;
456 unsigned int val;
457 struct adapter *adap = to_net_dev(d)->priv;
458
459 if (!capable(CAP_NET_ADMIN))
460 return -EPERM;
461
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
464 return -EINVAL;
465
466 rtnl_lock();
467 ret = (*set) (adap, val);
468 if (!ret)
469 ret = len;
470 rtnl_unlock();
471 return ret;
472}
473
474#define CXGB3_SHOW(name, val_expr) \
475static ssize_t format_##name(struct adapter *adap, char *buf) \
476{ \
477 return sprintf(buf, "%u\n", val_expr); \
478} \
479static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
480 char *buf) \
481{ \
482 return attr_show(d, attr, buf, format_##name); \
483}
484
485static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
486{
487 if (adap->flags & FULL_INIT_DONE)
488 return -EBUSY;
489 if (val && adap->params.rev == 0)
490 return -EINVAL;
491 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
492 return -EINVAL;
493 adap->params.mc5.nfilters = val;
494 return 0;
495}
496
497static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
498 const char *buf, size_t len)
499{
500 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
501}
502
503static ssize_t set_nservers(struct adapter *adap, unsigned int val)
504{
505 if (adap->flags & FULL_INIT_DONE)
506 return -EBUSY;
507 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
508 return -EINVAL;
509 adap->params.mc5.nservers = val;
510 return 0;
511}
512
513static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
514 const char *buf, size_t len)
515{
516 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
517}
518
519#define CXGB3_ATTR_R(name, val_expr) \
520CXGB3_SHOW(name, val_expr) \
521static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
522
523#define CXGB3_ATTR_RW(name, val_expr, store_method) \
524CXGB3_SHOW(name, val_expr) \
525static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
526
527CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
528CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
529CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
530
531static struct attribute *cxgb3_attrs[] = {
532 &dev_attr_cam_size.attr,
533 &dev_attr_nfilters.attr,
534 &dev_attr_nservers.attr,
535 NULL
536};
537
538static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
539
540static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
541 char *buf, int sched)
542{
543 ssize_t len;
544 unsigned int v, addr, bpt, cpt;
545 struct adapter *adap = to_net_dev(d)->priv;
546
547 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
548 rtnl_lock();
549 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
550 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
551 if (sched & 1)
552 v >>= 16;
553 bpt = (v >> 8) & 0xff;
554 cpt = v & 0xff;
555 if (!cpt)
556 len = sprintf(buf, "disabled\n");
557 else {
558 v = (adap->params.vpd.cclk * 1000) / cpt;
559 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
560 }
561 rtnl_unlock();
562 return len;
563}
564
565static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
566 const char *buf, size_t len, int sched)
567{
568 char *endp;
569 ssize_t ret;
570 unsigned int val;
571 struct adapter *adap = to_net_dev(d)->priv;
572
573 if (!capable(CAP_NET_ADMIN))
574 return -EPERM;
575
576 val = simple_strtoul(buf, &endp, 0);
577 if (endp == buf || val > 10000000)
578 return -EINVAL;
579
580 rtnl_lock();
581 ret = t3_config_sched(adap, val, sched);
582 if (!ret)
583 ret = len;
584 rtnl_unlock();
585 return ret;
586}
587
588#define TM_ATTR(name, sched) \
589static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
590 char *buf) \
591{ \
592 return tm_attr_show(d, attr, buf, sched); \
593} \
594static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
595 const char *buf, size_t len) \
596{ \
597 return tm_attr_store(d, attr, buf, len, sched); \
598} \
599static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
600
601TM_ATTR(sched0, 0);
602TM_ATTR(sched1, 1);
603TM_ATTR(sched2, 2);
604TM_ATTR(sched3, 3);
605TM_ATTR(sched4, 4);
606TM_ATTR(sched5, 5);
607TM_ATTR(sched6, 6);
608TM_ATTR(sched7, 7);
609
610static struct attribute *offload_attrs[] = {
611 &dev_attr_sched0.attr,
612 &dev_attr_sched1.attr,
613 &dev_attr_sched2.attr,
614 &dev_attr_sched3.attr,
615 &dev_attr_sched4.attr,
616 &dev_attr_sched5.attr,
617 &dev_attr_sched6.attr,
618 &dev_attr_sched7.attr,
619 NULL
620};
621
622static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
623
624/*
625 * Sends an sk_buff to an offload queue driver
626 * after dealing with any active network taps.
627 */
628static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
629{
630 int ret;
631
632 local_bh_disable();
633 ret = t3_offload_tx(tdev, skb);
634 local_bh_enable();
635 return ret;
636}
637
638static int write_smt_entry(struct adapter *adapter, int idx)
639{
640 struct cpl_smt_write_req *req;
641 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
642
643 if (!skb)
644 return -ENOMEM;
645
646 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
647 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
648 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
649 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
650 req->iff = idx;
651 memset(req->src_mac1, 0, sizeof(req->src_mac1));
652 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
653 skb->priority = 1;
654 offload_tx(&adapter->tdev, skb);
655 return 0;
656}
657
658static int init_smt(struct adapter *adapter)
659{
660 int i;
661
662 for_each_port(adapter, i)
663 write_smt_entry(adapter, i);
664 return 0;
665}
666
667static void init_port_mtus(struct adapter *adapter)
668{
669 unsigned int mtus = adapter->port[0]->mtu;
670
671 if (adapter->port[1])
672 mtus |= adapter->port[1]->mtu << 16;
673 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
674}
675
676static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
677 int hi, int port)
678{
679 struct sk_buff *skb;
680 struct mngt_pktsched_wr *req;
681
682 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
683 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
684 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
685 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
686 req->sched = sched;
687 req->idx = qidx;
688 req->min = lo;
689 req->max = hi;
690 req->binding = port;
691 t3_mgmt_tx(adap, skb);
692}
693
694static void bind_qsets(struct adapter *adap)
695{
696 int i, j;
697
698 for_each_port(adap, i) {
699 const struct port_info *pi = adap2pinfo(adap, i);
700
701 for (j = 0; j < pi->nqsets; ++j)
702 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
703 -1, i);
704 }
705}
706
707/**
708 * cxgb_up - enable the adapter
709 * @adapter: adapter being enabled
710 *
711 * Called when the first port is enabled, this function performs the
712 * actions necessary to make an adapter operational, such as completing
713 * the initialization of HW modules, and enabling interrupts.
714 *
715 * Must be called with the rtnl lock held.
716 */
717static int cxgb_up(struct adapter *adap)
718{
719 int err = 0;
720
721 if (!(adap->flags & FULL_INIT_DONE)) {
722 err = t3_check_fw_version(adap);
723 if (err)
724 goto out;
725
726 err = init_dummy_netdevs(adap);
727 if (err)
728 goto out;
729
730 err = t3_init_hw(adap, 0);
731 if (err)
732 goto out;
733
734 err = setup_sge_qsets(adap);
735 if (err)
736 goto out;
737
738 setup_rss(adap);
739 adap->flags |= FULL_INIT_DONE;
740 }
741
742 t3_intr_clear(adap);
743
744 if (adap->flags & USING_MSIX) {
745 name_msix_vecs(adap);
746 err = request_irq(adap->msix_info[0].vec,
747 t3_async_intr_handler, 0,
748 adap->msix_info[0].desc, adap);
749 if (err)
750 goto irq_err;
751
752 if (request_msix_data_irqs(adap)) {
753 free_irq(adap->msix_info[0].vec, adap);
754 goto irq_err;
755 }
756 } else if ((err = request_irq(adap->pdev->irq,
757 t3_intr_handler(adap,
758 adap->sge.qs[0].rspq.
759 polling),
760 (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
761 adap->name, adap)))
762 goto irq_err;
763
764 t3_sge_start(adap);
765 t3_intr_enable(adap);
766
767 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
768 bind_qsets(adap);
769 adap->flags |= QUEUES_BOUND;
770
771out:
772 return err;
773irq_err:
774 CH_ERR(adap, "request_irq failed, err %d\n", err);
775 goto out;
776}
777
778/*
779 * Release resources when all the ports and offloading have been stopped.
780 */
781static void cxgb_down(struct adapter *adapter)
782{
783 t3_sge_stop(adapter);
784 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
785 t3_intr_disable(adapter);
786 spin_unlock_irq(&adapter->work_lock);
787
788 if (adapter->flags & USING_MSIX) {
789 int i, n = 0;
790
791 free_irq(adapter->msix_info[0].vec, adapter);
792 for_each_port(adapter, i)
793 n += adap2pinfo(adapter, i)->nqsets;
794
795 for (i = 0; i < n; ++i)
796 free_irq(adapter->msix_info[i + 1].vec,
797 &adapter->sge.qs[i]);
798 } else
799 free_irq(adapter->pdev->irq, adapter);
800
801 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
802 quiesce_rx(adapter);
803}
804
805static void schedule_chk_task(struct adapter *adap)
806{
807 unsigned int timeo;
808
809 timeo = adap->params.linkpoll_period ?
810 (HZ * adap->params.linkpoll_period) / 10 :
811 adap->params.stats_update_period * HZ;
812 if (timeo)
813 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
814}
815
816static int offload_open(struct net_device *dev)
817{
818 struct adapter *adapter = dev->priv;
819 struct t3cdev *tdev = T3CDEV(dev);
820 int adap_up = adapter->open_device_map & PORT_MASK;
821 int err = 0;
822
823 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
824 return 0;
825
826 if (!adap_up && (err = cxgb_up(adapter)) < 0)
827 return err;
828
829 t3_tp_set_offload_mode(adapter, 1);
830 tdev->lldev = adapter->port[0];
831 err = cxgb3_offload_activate(adapter);
832 if (err)
833 goto out;
834
835 init_port_mtus(adapter);
836 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
837 adapter->params.b_wnd,
838 adapter->params.rev == 0 ?
839 adapter->port[0]->mtu : 0xffff);
840 init_smt(adapter);
841
842 /* Never mind if the next step fails */
843 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
844
845 /* Call back all registered clients */
846 cxgb3_add_clients(tdev);
847
848out:
849 /* restore them in case the offload module has changed them */
850 if (err) {
851 t3_tp_set_offload_mode(adapter, 0);
852 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
853 cxgb3_set_dummy_ops(tdev);
854 }
855 return err;
856}
857
858static int offload_close(struct t3cdev *tdev)
859{
860 struct adapter *adapter = tdev2adap(tdev);
861
862 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
863 return 0;
864
865 /* Call back all registered clients */
866 cxgb3_remove_clients(tdev);
867
868 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
869
870 tdev->lldev = NULL;
871 cxgb3_set_dummy_ops(tdev);
872 t3_tp_set_offload_mode(adapter, 0);
873 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
874
875 if (!adapter->open_device_map)
876 cxgb_down(adapter);
877
878 cxgb3_offload_deactivate(adapter);
879 return 0;
880}
881
882static int cxgb_open(struct net_device *dev)
883{
884 int err;
885 struct adapter *adapter = dev->priv;
886 struct port_info *pi = netdev_priv(dev);
887 int other_ports = adapter->open_device_map & PORT_MASK;
888
889 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
890 return err;
891
892 set_bit(pi->port_id, &adapter->open_device_map);
893 if (!ofld_disable) {
894 err = offload_open(dev);
895 if (err)
896 printk(KERN_WARNING
897 "Could not initialize offload capabilities\n");
898 }
899
900 link_start(dev);
901 t3_port_intr_enable(adapter, pi->port_id);
902 netif_start_queue(dev);
903 if (!other_ports)
904 schedule_chk_task(adapter);
905
906 return 0;
907}
908
909static int cxgb_close(struct net_device *dev)
910{
911 struct adapter *adapter = dev->priv;
912 struct port_info *p = netdev_priv(dev);
913
914 t3_port_intr_disable(adapter, p->port_id);
915 netif_stop_queue(dev);
916 p->phy.ops->power_down(&p->phy, 1);
917 netif_carrier_off(dev);
918 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
919
920 spin_lock(&adapter->work_lock); /* sync with update task */
921 clear_bit(p->port_id, &adapter->open_device_map);
922 spin_unlock(&adapter->work_lock);
923
924 if (!(adapter->open_device_map & PORT_MASK))
925 cancel_rearming_delayed_workqueue(cxgb3_wq,
926 &adapter->adap_check_task);
927
928 if (!adapter->open_device_map)
929 cxgb_down(adapter);
930
931 return 0;
932}
933
934static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
935{
936 struct adapter *adapter = dev->priv;
937 struct port_info *p = netdev_priv(dev);
938 struct net_device_stats *ns = &p->netstats;
939 const struct mac_stats *pstats;
940
941 spin_lock(&adapter->stats_lock);
942 pstats = t3_mac_update_stats(&p->mac);
943 spin_unlock(&adapter->stats_lock);
944
945 ns->tx_bytes = pstats->tx_octets;
946 ns->tx_packets = pstats->tx_frames;
947 ns->rx_bytes = pstats->rx_octets;
948 ns->rx_packets = pstats->rx_frames;
949 ns->multicast = pstats->rx_mcast_frames;
950
951 ns->tx_errors = pstats->tx_underrun;
952 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
953 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
954 pstats->rx_fifo_ovfl;
955
956 /* detailed rx_errors */
957 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
958 ns->rx_over_errors = 0;
959 ns->rx_crc_errors = pstats->rx_fcs_errs;
960 ns->rx_frame_errors = pstats->rx_symbol_errs;
961 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
962 ns->rx_missed_errors = pstats->rx_cong_drops;
963
964 /* detailed tx_errors */
965 ns->tx_aborted_errors = 0;
966 ns->tx_carrier_errors = 0;
967 ns->tx_fifo_errors = pstats->tx_underrun;
968 ns->tx_heartbeat_errors = 0;
969 ns->tx_window_errors = 0;
970 return ns;
971}
972
973static u32 get_msglevel(struct net_device *dev)
974{
975 struct adapter *adapter = dev->priv;
976
977 return adapter->msg_enable;
978}
979
980static void set_msglevel(struct net_device *dev, u32 val)
981{
982 struct adapter *adapter = dev->priv;
983
984 adapter->msg_enable = val;
985}
986
987static char stats_strings[][ETH_GSTRING_LEN] = {
988 "TxOctetsOK ",
989 "TxFramesOK ",
990 "TxMulticastFramesOK",
991 "TxBroadcastFramesOK",
992 "TxPauseFrames ",
993 "TxUnderrun ",
994 "TxExtUnderrun ",
995
996 "TxFrames64 ",
997 "TxFrames65To127 ",
998 "TxFrames128To255 ",
999 "TxFrames256To511 ",
1000 "TxFrames512To1023 ",
1001 "TxFrames1024To1518 ",
1002 "TxFrames1519ToMax ",
1003
1004 "RxOctetsOK ",
1005 "RxFramesOK ",
1006 "RxMulticastFramesOK",
1007 "RxBroadcastFramesOK",
1008 "RxPauseFrames ",
1009 "RxFCSErrors ",
1010 "RxSymbolErrors ",
1011 "RxShortErrors ",
1012 "RxJabberErrors ",
1013 "RxLengthErrors ",
1014 "RxFIFOoverflow ",
1015
1016 "RxFrames64 ",
1017 "RxFrames65To127 ",
1018 "RxFrames128To255 ",
1019 "RxFrames256To511 ",
1020 "RxFrames512To1023 ",
1021 "RxFrames1024To1518 ",
1022 "RxFrames1519ToMax ",
1023
1024 "PhyFIFOErrors ",
1025 "TSO ",
1026 "VLANextractions ",
1027 "VLANinsertions ",
1028 "TxCsumOffload ",
1029 "RxCsumGood ",
1030 "RxDrops "
1031};
1032
1033static int get_stats_count(struct net_device *dev)
1034{
1035 return ARRAY_SIZE(stats_strings);
1036}
1037
1038#define T3_REGMAP_SIZE (3 * 1024)
1039
1040static int get_regs_len(struct net_device *dev)
1041{
1042 return T3_REGMAP_SIZE;
1043}
1044
1045static int get_eeprom_len(struct net_device *dev)
1046{
1047 return EEPROMSIZE;
1048}
1049
1050static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1051{
1052 u32 fw_vers = 0;
1053 struct adapter *adapter = dev->priv;
1054
1055 t3_get_fw_version(adapter, &fw_vers);
1056
1057 strcpy(info->driver, DRV_NAME);
1058 strcpy(info->version, DRV_VERSION);
1059 strcpy(info->bus_info, pci_name(adapter->pdev));
1060 if (!fw_vers)
1061 strcpy(info->fw_version, "N/A");
1062 else {
1063 snprintf(info->fw_version, sizeof(info->fw_version),
1064 "%s %u.%u.%u",
1065 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1066 G_FW_VERSION_MAJOR(fw_vers),
1067 G_FW_VERSION_MINOR(fw_vers),
1068 G_FW_VERSION_MICRO(fw_vers));
1069 }
1070}
1071
1072static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1073{
1074 if (stringset == ETH_SS_STATS)
1075 memcpy(data, stats_strings, sizeof(stats_strings));
1076}
1077
1078static unsigned long collect_sge_port_stats(struct adapter *adapter,
1079 struct port_info *p, int idx)
1080{
1081 int i;
1082 unsigned long tot = 0;
1083
1084 for (i = 0; i < p->nqsets; ++i)
1085 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1086 return tot;
1087}
1088
1089static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1090 u64 *data)
1091{
1092 struct adapter *adapter = dev->priv;
1093 struct port_info *pi = netdev_priv(dev);
1094 const struct mac_stats *s;
1095
1096 spin_lock(&adapter->stats_lock);
1097 s = t3_mac_update_stats(&pi->mac);
1098 spin_unlock(&adapter->stats_lock);
1099
1100 *data++ = s->tx_octets;
1101 *data++ = s->tx_frames;
1102 *data++ = s->tx_mcast_frames;
1103 *data++ = s->tx_bcast_frames;
1104 *data++ = s->tx_pause;
1105 *data++ = s->tx_underrun;
1106 *data++ = s->tx_fifo_urun;
1107
1108 *data++ = s->tx_frames_64;
1109 *data++ = s->tx_frames_65_127;
1110 *data++ = s->tx_frames_128_255;
1111 *data++ = s->tx_frames_256_511;
1112 *data++ = s->tx_frames_512_1023;
1113 *data++ = s->tx_frames_1024_1518;
1114 *data++ = s->tx_frames_1519_max;
1115
1116 *data++ = s->rx_octets;
1117 *data++ = s->rx_frames;
1118 *data++ = s->rx_mcast_frames;
1119 *data++ = s->rx_bcast_frames;
1120 *data++ = s->rx_pause;
1121 *data++ = s->rx_fcs_errs;
1122 *data++ = s->rx_symbol_errs;
1123 *data++ = s->rx_short;
1124 *data++ = s->rx_jabber;
1125 *data++ = s->rx_too_long;
1126 *data++ = s->rx_fifo_ovfl;
1127
1128 *data++ = s->rx_frames_64;
1129 *data++ = s->rx_frames_65_127;
1130 *data++ = s->rx_frames_128_255;
1131 *data++ = s->rx_frames_256_511;
1132 *data++ = s->rx_frames_512_1023;
1133 *data++ = s->rx_frames_1024_1518;
1134 *data++ = s->rx_frames_1519_max;
1135
1136 *data++ = pi->phy.fifo_errors;
1137
1138 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1139 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1140 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1141 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1142 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1143 *data++ = s->rx_cong_drops;
1144}
1145
1146static inline void reg_block_dump(struct adapter *ap, void *buf,
1147 unsigned int start, unsigned int end)
1148{
1149 u32 *p = buf + start;
1150
1151 for (; start <= end; start += sizeof(u32))
1152 *p++ = t3_read_reg(ap, start);
1153}
1154
1155static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1156 void *buf)
1157{
1158 struct adapter *ap = dev->priv;
1159
1160 /*
1161 * Version scheme:
1162 * bits 0..9: chip version
1163 * bits 10..15: chip revision
1164 * bit 31: set for PCIe cards
1165 */
1166 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1167
1168 /*
1169 * We skip the MAC statistics registers because they are clear-on-read.
1170 * Also reading multi-register stats would need to synchronize with the
1171 * periodic mac stats accumulation. Hard to justify the complexity.
1172 */
1173 memset(buf, 0, T3_REGMAP_SIZE);
1174 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1175 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1176 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1177 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1178 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1179 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1180 XGM_REG(A_XGM_SERDES_STAT3, 1));
1181 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1182 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1183}
1184
1185static int restart_autoneg(struct net_device *dev)
1186{
1187 struct port_info *p = netdev_priv(dev);
1188
1189 if (!netif_running(dev))
1190 return -EAGAIN;
1191 if (p->link_config.autoneg != AUTONEG_ENABLE)
1192 return -EINVAL;
1193 p->phy.ops->autoneg_restart(&p->phy);
1194 return 0;
1195}
1196
1197static int cxgb3_phys_id(struct net_device *dev, u32 data)
1198{
1199 int i;
1200 struct adapter *adapter = dev->priv;
1201
1202 if (data == 0)
1203 data = 2;
1204
1205 for (i = 0; i < data * 2; i++) {
1206 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1207 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1208 if (msleep_interruptible(500))
1209 break;
1210 }
1211 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1212 F_GPIO0_OUT_VAL);
1213 return 0;
1214}
1215
1216static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1217{
1218 struct port_info *p = netdev_priv(dev);
1219
1220 cmd->supported = p->link_config.supported;
1221 cmd->advertising = p->link_config.advertising;
1222
1223 if (netif_carrier_ok(dev)) {
1224 cmd->speed = p->link_config.speed;
1225 cmd->duplex = p->link_config.duplex;
1226 } else {
1227 cmd->speed = -1;
1228 cmd->duplex = -1;
1229 }
1230
1231 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1232 cmd->phy_address = p->phy.addr;
1233 cmd->transceiver = XCVR_EXTERNAL;
1234 cmd->autoneg = p->link_config.autoneg;
1235 cmd->maxtxpkt = 0;
1236 cmd->maxrxpkt = 0;
1237 return 0;
1238}
1239
1240static int speed_duplex_to_caps(int speed, int duplex)
1241{
1242 int cap = 0;
1243
1244 switch (speed) {
1245 case SPEED_10:
1246 if (duplex == DUPLEX_FULL)
1247 cap = SUPPORTED_10baseT_Full;
1248 else
1249 cap = SUPPORTED_10baseT_Half;
1250 break;
1251 case SPEED_100:
1252 if (duplex == DUPLEX_FULL)
1253 cap = SUPPORTED_100baseT_Full;
1254 else
1255 cap = SUPPORTED_100baseT_Half;
1256 break;
1257 case SPEED_1000:
1258 if (duplex == DUPLEX_FULL)
1259 cap = SUPPORTED_1000baseT_Full;
1260 else
1261 cap = SUPPORTED_1000baseT_Half;
1262 break;
1263 case SPEED_10000:
1264 if (duplex == DUPLEX_FULL)
1265 cap = SUPPORTED_10000baseT_Full;
1266 }
1267 return cap;
1268}
1269
1270#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1271 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1272 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1273 ADVERTISED_10000baseT_Full)
1274
1275static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1276{
1277 struct port_info *p = netdev_priv(dev);
1278 struct link_config *lc = &p->link_config;
1279
1280 if (!(lc->supported & SUPPORTED_Autoneg))
1281 return -EOPNOTSUPP; /* can't change speed/duplex */
1282
1283 if (cmd->autoneg == AUTONEG_DISABLE) {
1284 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1285
1286 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1287 return -EINVAL;
1288 lc->requested_speed = cmd->speed;
1289 lc->requested_duplex = cmd->duplex;
1290 lc->advertising = 0;
1291 } else {
1292 cmd->advertising &= ADVERTISED_MASK;
1293 cmd->advertising &= lc->supported;
1294 if (!cmd->advertising)
1295 return -EINVAL;
1296 lc->requested_speed = SPEED_INVALID;
1297 lc->requested_duplex = DUPLEX_INVALID;
1298 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1299 }
1300 lc->autoneg = cmd->autoneg;
1301 if (netif_running(dev))
1302 t3_link_start(&p->phy, &p->mac, lc);
1303 return 0;
1304}
1305
1306static void get_pauseparam(struct net_device *dev,
1307 struct ethtool_pauseparam *epause)
1308{
1309 struct port_info *p = netdev_priv(dev);
1310
1311 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1312 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1313 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1314}
1315
1316static int set_pauseparam(struct net_device *dev,
1317 struct ethtool_pauseparam *epause)
1318{
1319 struct port_info *p = netdev_priv(dev);
1320 struct link_config *lc = &p->link_config;
1321
1322 if (epause->autoneg == AUTONEG_DISABLE)
1323 lc->requested_fc = 0;
1324 else if (lc->supported & SUPPORTED_Autoneg)
1325 lc->requested_fc = PAUSE_AUTONEG;
1326 else
1327 return -EINVAL;
1328
1329 if (epause->rx_pause)
1330 lc->requested_fc |= PAUSE_RX;
1331 if (epause->tx_pause)
1332 lc->requested_fc |= PAUSE_TX;
1333 if (lc->autoneg == AUTONEG_ENABLE) {
1334 if (netif_running(dev))
1335 t3_link_start(&p->phy, &p->mac, lc);
1336 } else {
1337 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1338 if (netif_running(dev))
1339 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1340 }
1341 return 0;
1342}
1343
1344static u32 get_rx_csum(struct net_device *dev)
1345{
1346 struct port_info *p = netdev_priv(dev);
1347
1348 return p->rx_csum_offload;
1349}
1350
1351static int set_rx_csum(struct net_device *dev, u32 data)
1352{
1353 struct port_info *p = netdev_priv(dev);
1354
1355 p->rx_csum_offload = data;
1356 return 0;
1357}
1358
1359static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1360{
1361 struct adapter *adapter = dev->priv;
1362
1363 e->rx_max_pending = MAX_RX_BUFFERS;
1364 e->rx_mini_max_pending = 0;
1365 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1366 e->tx_max_pending = MAX_TXQ_ENTRIES;
1367
1368 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1369 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1370 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1371 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1372}
1373
1374static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1375{
1376 int i;
1377 struct adapter *adapter = dev->priv;
1378
1379 if (e->rx_pending > MAX_RX_BUFFERS ||
1380 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1381 e->tx_pending > MAX_TXQ_ENTRIES ||
1382 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1383 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1384 e->rx_pending < MIN_FL_ENTRIES ||
1385 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1386 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1387 return -EINVAL;
1388
1389 if (adapter->flags & FULL_INIT_DONE)
1390 return -EBUSY;
1391
1392 for (i = 0; i < SGE_QSETS; ++i) {
1393 struct qset_params *q = &adapter->params.sge.qset[i];
1394
1395 q->rspq_size = e->rx_mini_pending;
1396 q->fl_size = e->rx_pending;
1397 q->jumbo_size = e->rx_jumbo_pending;
1398 q->txq_size[0] = e->tx_pending;
1399 q->txq_size[1] = e->tx_pending;
1400 q->txq_size[2] = e->tx_pending;
1401 }
1402 return 0;
1403}
1404
1405static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1406{
1407 struct adapter *adapter = dev->priv;
1408 struct qset_params *qsp = &adapter->params.sge.qset[0];
1409 struct sge_qset *qs = &adapter->sge.qs[0];
1410
1411 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1412 return -EINVAL;
1413
1414 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1415 t3_update_qset_coalesce(qs, qsp);
1416 return 0;
1417}
1418
1419static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1420{
1421 struct adapter *adapter = dev->priv;
1422 struct qset_params *q = adapter->params.sge.qset;
1423
1424 c->rx_coalesce_usecs = q->coalesce_usecs;
1425 return 0;
1426}
1427
1428static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1429 u8 * data)
1430{
1431 int i, err = 0;
1432 struct adapter *adapter = dev->priv;
1433
1434 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1435 if (!buf)
1436 return -ENOMEM;
1437
1438 e->magic = EEPROM_MAGIC;
1439 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1440 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1441
1442 if (!err)
1443 memcpy(data, buf + e->offset, e->len);
1444 kfree(buf);
1445 return err;
1446}
1447
1448static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1449 u8 * data)
1450{
1451 u8 *buf;
1452 int err = 0;
1453 u32 aligned_offset, aligned_len, *p;
1454 struct adapter *adapter = dev->priv;
1455
1456 if (eeprom->magic != EEPROM_MAGIC)
1457 return -EINVAL;
1458
1459 aligned_offset = eeprom->offset & ~3;
1460 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1461
1462 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1463 buf = kmalloc(aligned_len, GFP_KERNEL);
1464 if (!buf)
1465 return -ENOMEM;
1466 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1467 if (!err && aligned_len > 4)
1468 err = t3_seeprom_read(adapter,
1469 aligned_offset + aligned_len - 4,
1470 (u32 *) & buf[aligned_len - 4]);
1471 if (err)
1472 goto out;
1473 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1474 } else
1475 buf = data;
1476
1477 err = t3_seeprom_wp(adapter, 0);
1478 if (err)
1479 goto out;
1480
1481 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1482 err = t3_seeprom_write(adapter, aligned_offset, *p);
1483 aligned_offset += 4;
1484 }
1485
1486 if (!err)
1487 err = t3_seeprom_wp(adapter, 1);
1488out:
1489 if (buf != data)
1490 kfree(buf);
1491 return err;
1492}
1493
1494static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1495{
1496 wol->supported = 0;
1497 wol->wolopts = 0;
1498 memset(&wol->sopass, 0, sizeof(wol->sopass));
1499}
1500
1501static const struct ethtool_ops cxgb_ethtool_ops = {
1502 .get_settings = get_settings,
1503 .set_settings = set_settings,
1504 .get_drvinfo = get_drvinfo,
1505 .get_msglevel = get_msglevel,
1506 .set_msglevel = set_msglevel,
1507 .get_ringparam = get_sge_param,
1508 .set_ringparam = set_sge_param,
1509 .get_coalesce = get_coalesce,
1510 .set_coalesce = set_coalesce,
1511 .get_eeprom_len = get_eeprom_len,
1512 .get_eeprom = get_eeprom,
1513 .set_eeprom = set_eeprom,
1514 .get_pauseparam = get_pauseparam,
1515 .set_pauseparam = set_pauseparam,
1516 .get_rx_csum = get_rx_csum,
1517 .set_rx_csum = set_rx_csum,
1518 .get_tx_csum = ethtool_op_get_tx_csum,
1519 .set_tx_csum = ethtool_op_set_tx_csum,
1520 .get_sg = ethtool_op_get_sg,
1521 .set_sg = ethtool_op_set_sg,
1522 .get_link = ethtool_op_get_link,
1523 .get_strings = get_strings,
1524 .phys_id = cxgb3_phys_id,
1525 .nway_reset = restart_autoneg,
1526 .get_stats_count = get_stats_count,
1527 .get_ethtool_stats = get_stats,
1528 .get_regs_len = get_regs_len,
1529 .get_regs = get_regs,
1530 .get_wol = get_wol,
1531 .get_tso = ethtool_op_get_tso,
1532 .set_tso = ethtool_op_set_tso,
1533 .get_perm_addr = ethtool_op_get_perm_addr
1534};
1535
1536static int in_range(int val, int lo, int hi)
1537{
1538 return val < 0 || (val <= hi && val >= lo);
1539}
1540
1541static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1542{
1543 int ret;
1544 u32 cmd;
1545 struct adapter *adapter = dev->priv;
1546
1547 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1548 return -EFAULT;
1549
1550 switch (cmd) {
1551 case CHELSIO_SETREG:{
1552 struct ch_reg edata;
1553
1554 if (!capable(CAP_NET_ADMIN))
1555 return -EPERM;
1556 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1557 return -EFAULT;
1558 if ((edata.addr & 3) != 0
1559 || edata.addr >= adapter->mmio_len)
1560 return -EINVAL;
1561 writel(edata.val, adapter->regs + edata.addr);
1562 break;
1563 }
1564 case CHELSIO_GETREG:{
1565 struct ch_reg edata;
1566
1567 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1568 return -EFAULT;
1569 if ((edata.addr & 3) != 0
1570 || edata.addr >= adapter->mmio_len)
1571 return -EINVAL;
1572 edata.val = readl(adapter->regs + edata.addr);
1573 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1574 return -EFAULT;
1575 break;
1576 }
1577 case CHELSIO_SET_QSET_PARAMS:{
1578 int i;
1579 struct qset_params *q;
1580 struct ch_qset_params t;
1581
1582 if (!capable(CAP_NET_ADMIN))
1583 return -EPERM;
1584 if (copy_from_user(&t, useraddr, sizeof(t)))
1585 return -EFAULT;
1586 if (t.qset_idx >= SGE_QSETS)
1587 return -EINVAL;
1588 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1589 !in_range(t.cong_thres, 0, 255) ||
1590 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1591 MAX_TXQ_ENTRIES) ||
1592 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1593 MAX_TXQ_ENTRIES) ||
1594 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1595 MAX_CTRL_TXQ_ENTRIES) ||
1596 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1597 MAX_RX_BUFFERS)
1598 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1599 MAX_RX_JUMBO_BUFFERS)
1600 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1601 MAX_RSPQ_ENTRIES))
1602 return -EINVAL;
1603 if ((adapter->flags & FULL_INIT_DONE) &&
1604 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1605 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1606 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1607 t.polling >= 0 || t.cong_thres >= 0))
1608 return -EBUSY;
1609
1610 q = &adapter->params.sge.qset[t.qset_idx];
1611
1612 if (t.rspq_size >= 0)
1613 q->rspq_size = t.rspq_size;
1614 if (t.fl_size[0] >= 0)
1615 q->fl_size = t.fl_size[0];
1616 if (t.fl_size[1] >= 0)
1617 q->jumbo_size = t.fl_size[1];
1618 if (t.txq_size[0] >= 0)
1619 q->txq_size[0] = t.txq_size[0];
1620 if (t.txq_size[1] >= 0)
1621 q->txq_size[1] = t.txq_size[1];
1622 if (t.txq_size[2] >= 0)
1623 q->txq_size[2] = t.txq_size[2];
1624 if (t.cong_thres >= 0)
1625 q->cong_thres = t.cong_thres;
1626 if (t.intr_lat >= 0) {
1627 struct sge_qset *qs =
1628 &adapter->sge.qs[t.qset_idx];
1629
1630 q->coalesce_usecs = t.intr_lat;
1631 t3_update_qset_coalesce(qs, q);
1632 }
1633 if (t.polling >= 0) {
1634 if (adapter->flags & USING_MSIX)
1635 q->polling = t.polling;
1636 else {
1637 /* No polling with INTx for T3A */
1638 if (adapter->params.rev == 0 &&
1639 !(adapter->flags & USING_MSI))
1640 t.polling = 0;
1641
1642 for (i = 0; i < SGE_QSETS; i++) {
1643 q = &adapter->params.sge.
1644 qset[i];
1645 q->polling = t.polling;
1646 }
1647 }
1648 }
1649 break;
1650 }
1651 case CHELSIO_GET_QSET_PARAMS:{
1652 struct qset_params *q;
1653 struct ch_qset_params t;
1654
1655 if (copy_from_user(&t, useraddr, sizeof(t)))
1656 return -EFAULT;
1657 if (t.qset_idx >= SGE_QSETS)
1658 return -EINVAL;
1659
1660 q = &adapter->params.sge.qset[t.qset_idx];
1661 t.rspq_size = q->rspq_size;
1662 t.txq_size[0] = q->txq_size[0];
1663 t.txq_size[1] = q->txq_size[1];
1664 t.txq_size[2] = q->txq_size[2];
1665 t.fl_size[0] = q->fl_size;
1666 t.fl_size[1] = q->jumbo_size;
1667 t.polling = q->polling;
1668 t.intr_lat = q->coalesce_usecs;
1669 t.cong_thres = q->cong_thres;
1670
1671 if (copy_to_user(useraddr, &t, sizeof(t)))
1672 return -EFAULT;
1673 break;
1674 }
1675 case CHELSIO_SET_QSET_NUM:{
1676 struct ch_reg edata;
1677 struct port_info *pi = netdev_priv(dev);
1678 unsigned int i, first_qset = 0, other_qsets = 0;
1679
1680 if (!capable(CAP_NET_ADMIN))
1681 return -EPERM;
1682 if (adapter->flags & FULL_INIT_DONE)
1683 return -EBUSY;
1684 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1685 return -EFAULT;
1686 if (edata.val < 1 ||
1687 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1688 return -EINVAL;
1689
1690 for_each_port(adapter, i)
1691 if (adapter->port[i] && adapter->port[i] != dev)
1692 other_qsets += adap2pinfo(adapter, i)->nqsets;
1693
1694 if (edata.val + other_qsets > SGE_QSETS)
1695 return -EINVAL;
1696
1697 pi->nqsets = edata.val;
1698
1699 for_each_port(adapter, i)
1700 if (adapter->port[i]) {
1701 pi = adap2pinfo(adapter, i);
1702 pi->first_qset = first_qset;
1703 first_qset += pi->nqsets;
1704 }
1705 break;
1706 }
1707 case CHELSIO_GET_QSET_NUM:{
1708 struct ch_reg edata;
1709 struct port_info *pi = netdev_priv(dev);
1710
1711 edata.cmd = CHELSIO_GET_QSET_NUM;
1712 edata.val = pi->nqsets;
1713 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1714 return -EFAULT;
1715 break;
1716 }
1717 case CHELSIO_LOAD_FW:{
1718 u8 *fw_data;
1719 struct ch_mem_range t;
1720
1721 if (!capable(CAP_NET_ADMIN))
1722 return -EPERM;
1723 if (copy_from_user(&t, useraddr, sizeof(t)))
1724 return -EFAULT;
1725
1726 fw_data = kmalloc(t.len, GFP_KERNEL);
1727 if (!fw_data)
1728 return -ENOMEM;
1729
1730 if (copy_from_user
1731 (fw_data, useraddr + sizeof(t), t.len)) {
1732 kfree(fw_data);
1733 return -EFAULT;
1734 }
1735
1736 ret = t3_load_fw(adapter, fw_data, t.len);
1737 kfree(fw_data);
1738 if (ret)
1739 return ret;
1740 break;
1741 }
1742 case CHELSIO_SETMTUTAB:{
1743 struct ch_mtus m;
1744 int i;
1745
1746 if (!is_offload(adapter))
1747 return -EOPNOTSUPP;
1748 if (!capable(CAP_NET_ADMIN))
1749 return -EPERM;
1750 if (offload_running(adapter))
1751 return -EBUSY;
1752 if (copy_from_user(&m, useraddr, sizeof(m)))
1753 return -EFAULT;
1754 if (m.nmtus != NMTUS)
1755 return -EINVAL;
1756 if (m.mtus[0] < 81) /* accommodate SACK */
1757 return -EINVAL;
1758
1759 /* MTUs must be in ascending order */
1760 for (i = 1; i < NMTUS; ++i)
1761 if (m.mtus[i] < m.mtus[i - 1])
1762 return -EINVAL;
1763
1764 memcpy(adapter->params.mtus, m.mtus,
1765 sizeof(adapter->params.mtus));
1766 break;
1767 }
1768 case CHELSIO_GET_PM:{
1769 struct tp_params *p = &adapter->params.tp;
1770 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1771
1772 if (!is_offload(adapter))
1773 return -EOPNOTSUPP;
1774 m.tx_pg_sz = p->tx_pg_size;
1775 m.tx_num_pg = p->tx_num_pgs;
1776 m.rx_pg_sz = p->rx_pg_size;
1777 m.rx_num_pg = p->rx_num_pgs;
1778 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1779 if (copy_to_user(useraddr, &m, sizeof(m)))
1780 return -EFAULT;
1781 break;
1782 }
1783 case CHELSIO_SET_PM:{
1784 struct ch_pm m;
1785 struct tp_params *p = &adapter->params.tp;
1786
1787 if (!is_offload(adapter))
1788 return -EOPNOTSUPP;
1789 if (!capable(CAP_NET_ADMIN))
1790 return -EPERM;
1791 if (adapter->flags & FULL_INIT_DONE)
1792 return -EBUSY;
1793 if (copy_from_user(&m, useraddr, sizeof(m)))
1794 return -EFAULT;
1795 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1796 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1797 return -EINVAL; /* not power of 2 */
1798 if (!(m.rx_pg_sz & 0x14000))
1799 return -EINVAL; /* not 16KB or 64KB */
1800 if (!(m.tx_pg_sz & 0x1554000))
1801 return -EINVAL;
1802 if (m.tx_num_pg == -1)
1803 m.tx_num_pg = p->tx_num_pgs;
1804 if (m.rx_num_pg == -1)
1805 m.rx_num_pg = p->rx_num_pgs;
1806 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1807 return -EINVAL;
1808 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1809 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1810 return -EINVAL;
1811 p->rx_pg_size = m.rx_pg_sz;
1812 p->tx_pg_size = m.tx_pg_sz;
1813 p->rx_num_pgs = m.rx_num_pg;
1814 p->tx_num_pgs = m.tx_num_pg;
1815 break;
1816 }
1817 case CHELSIO_GET_MEM:{
1818 struct ch_mem_range t;
1819 struct mc7 *mem;
1820 u64 buf[32];
1821
1822 if (!is_offload(adapter))
1823 return -EOPNOTSUPP;
1824 if (!(adapter->flags & FULL_INIT_DONE))
1825 return -EIO; /* need the memory controllers */
1826 if (copy_from_user(&t, useraddr, sizeof(t)))
1827 return -EFAULT;
1828 if ((t.addr & 7) || (t.len & 7))
1829 return -EINVAL;
1830 if (t.mem_id == MEM_CM)
1831 mem = &adapter->cm;
1832 else if (t.mem_id == MEM_PMRX)
1833 mem = &adapter->pmrx;
1834 else if (t.mem_id == MEM_PMTX)
1835 mem = &adapter->pmtx;
1836 else
1837 return -EINVAL;
1838
1839 /*
1840 * Version scheme:
1841 * bits 0..9: chip version
1842 * bits 10..15: chip revision
1843 */
1844 t.version = 3 | (adapter->params.rev << 10);
1845 if (copy_to_user(useraddr, &t, sizeof(t)))
1846 return -EFAULT;
1847
1848 /*
1849 * Read 256 bytes at a time as len can be large and we don't
1850 * want to use huge intermediate buffers.
1851 */
1852 useraddr += sizeof(t); /* advance to start of buffer */
1853 while (t.len) {
1854 unsigned int chunk =
1855 min_t(unsigned int, t.len, sizeof(buf));
1856
1857 ret =
1858 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1859 buf);
1860 if (ret)
1861 return ret;
1862 if (copy_to_user(useraddr, buf, chunk))
1863 return -EFAULT;
1864 useraddr += chunk;
1865 t.addr += chunk;
1866 t.len -= chunk;
1867 }
1868 break;
1869 }
1870 case CHELSIO_SET_TRACE_FILTER:{
1871 struct ch_trace t;
1872 const struct trace_params *tp;
1873
1874 if (!capable(CAP_NET_ADMIN))
1875 return -EPERM;
1876 if (!offload_running(adapter))
1877 return -EAGAIN;
1878 if (copy_from_user(&t, useraddr, sizeof(t)))
1879 return -EFAULT;
1880
1881 tp = (const struct trace_params *)&t.sip;
1882 if (t.config_tx)
1883 t3_config_trace_filter(adapter, tp, 0,
1884 t.invert_match,
1885 t.trace_tx);
1886 if (t.config_rx)
1887 t3_config_trace_filter(adapter, tp, 1,
1888 t.invert_match,
1889 t.trace_rx);
1890 break;
1891 }
1892 case CHELSIO_SET_PKTSCHED:{
1893 struct ch_pktsched_params p;
1894
1895 if (!capable(CAP_NET_ADMIN))
1896 return -EPERM;
1897 if (!adapter->open_device_map)
1898 return -EAGAIN; /* uP and SGE must be running */
1899 if (copy_from_user(&p, useraddr, sizeof(p)))
1900 return -EFAULT;
1901 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1902 p.binding);
1903 break;
1904
1905 }
1906 default:
1907 return -EOPNOTSUPP;
1908 }
1909 return 0;
1910}
1911
1912static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1913{
1914 int ret, mmd;
1915 struct adapter *adapter = dev->priv;
1916 struct port_info *pi = netdev_priv(dev);
1917 struct mii_ioctl_data *data = if_mii(req);
1918
1919 switch (cmd) {
1920 case SIOCGMIIPHY:
1921 data->phy_id = pi->phy.addr;
1922 /* FALLTHRU */
1923 case SIOCGMIIREG:{
1924 u32 val;
1925 struct cphy *phy = &pi->phy;
1926
1927 if (!phy->mdio_read)
1928 return -EOPNOTSUPP;
1929 if (is_10G(adapter)) {
1930 mmd = data->phy_id >> 8;
1931 if (!mmd)
1932 mmd = MDIO_DEV_PCS;
1933 else if (mmd > MDIO_DEV_XGXS)
1934 return -EINVAL;
1935
1936 ret =
1937 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938 mmd, data->reg_num, &val);
1939 } else
1940 ret =
1941 phy->mdio_read(adapter, data->phy_id & 0x1f,
1942 0, data->reg_num & 0x1f,
1943 &val);
1944 if (!ret)
1945 data->val_out = val;
1946 break;
1947 }
1948 case SIOCSMIIREG:{
1949 struct cphy *phy = &pi->phy;
1950
1951 if (!capable(CAP_NET_ADMIN))
1952 return -EPERM;
1953 if (!phy->mdio_write)
1954 return -EOPNOTSUPP;
1955 if (is_10G(adapter)) {
1956 mmd = data->phy_id >> 8;
1957 if (!mmd)
1958 mmd = MDIO_DEV_PCS;
1959 else if (mmd > MDIO_DEV_XGXS)
1960 return -EINVAL;
1961
1962 ret =
1963 phy->mdio_write(adapter,
1964 data->phy_id & 0x1f, mmd,
1965 data->reg_num,
1966 data->val_in);
1967 } else
1968 ret =
1969 phy->mdio_write(adapter,
1970 data->phy_id & 0x1f, 0,
1971 data->reg_num & 0x1f,
1972 data->val_in);
1973 break;
1974 }
1975 case SIOCCHIOCTL:
1976 return cxgb_extension_ioctl(dev, req->ifr_data);
1977 default:
1978 return -EOPNOTSUPP;
1979 }
1980 return ret;
1981}
1982
1983static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1984{
1985 int ret;
1986 struct adapter *adapter = dev->priv;
1987 struct port_info *pi = netdev_priv(dev);
1988
1989 if (new_mtu < 81) /* accommodate SACK */
1990 return -EINVAL;
1991 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1992 return ret;
1993 dev->mtu = new_mtu;
1994 init_port_mtus(adapter);
1995 if (adapter->params.rev == 0 && offload_running(adapter))
1996 t3_load_mtus(adapter, adapter->params.mtus,
1997 adapter->params.a_wnd, adapter->params.b_wnd,
1998 adapter->port[0]->mtu);
1999 return 0;
2000}
2001
2002static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2003{
2004 struct adapter *adapter = dev->priv;
2005 struct port_info *pi = netdev_priv(dev);
2006 struct sockaddr *addr = p;
2007
2008 if (!is_valid_ether_addr(addr->sa_data))
2009 return -EINVAL;
2010
2011 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2012 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2013 if (offload_running(adapter))
2014 write_smt_entry(adapter, pi->port_id);
2015 return 0;
2016}
2017
2018/**
2019 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2020 * @adap: the adapter
2021 * @p: the port
2022 *
2023 * Ensures that current Rx processing on any of the queues associated with
2024 * the given port completes before returning. We do this by acquiring and
2025 * releasing the locks of the response queues associated with the port.
2026 */
2027static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2028{
2029 int i;
2030
2031 for (i = 0; i < p->nqsets; i++) {
2032 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2033
2034 spin_lock_irq(&q->lock);
2035 spin_unlock_irq(&q->lock);
2036 }
2037}
2038
2039static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2040{
2041 struct adapter *adapter = dev->priv;
2042 struct port_info *pi = netdev_priv(dev);
2043
2044 pi->vlan_grp = grp;
2045 if (adapter->params.rev > 0)
2046 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2047 else {
2048 /* single control for all ports */
2049 unsigned int i, have_vlans = 0;
2050 for_each_port(adapter, i)
2051 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2052
2053 t3_set_vlan_accel(adapter, 1, have_vlans);
2054 }
2055 t3_synchronize_rx(adapter, pi);
2056}
2057
2058static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2059{
2060 /* nothing */
2061}
2062
2063#ifdef CONFIG_NET_POLL_CONTROLLER
2064static void cxgb_netpoll(struct net_device *dev)
2065{
2066 struct adapter *adapter = dev->priv;
2067 struct sge_qset *qs = dev2qset(dev);
2068
2069 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2070 adapter);
2071}
2072#endif
2073
2074/*
2075 * Periodic accumulation of MAC statistics.
2076 */
2077static void mac_stats_update(struct adapter *adapter)
2078{
2079 int i;
2080
2081 for_each_port(adapter, i) {
2082 struct net_device *dev = adapter->port[i];
2083 struct port_info *p = netdev_priv(dev);
2084
2085 if (netif_running(dev)) {
2086 spin_lock(&adapter->stats_lock);
2087 t3_mac_update_stats(&p->mac);
2088 spin_unlock(&adapter->stats_lock);
2089 }
2090 }
2091}
2092
2093static void check_link_status(struct adapter *adapter)
2094{
2095 int i;
2096
2097 for_each_port(adapter, i) {
2098 struct net_device *dev = adapter->port[i];
2099 struct port_info *p = netdev_priv(dev);
2100
2101 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2102 t3_link_changed(adapter, i);
2103 }
2104}
2105
2106static void t3_adap_check_task(struct work_struct *work)
2107{
2108 struct adapter *adapter = container_of(work, struct adapter,
2109 adap_check_task.work);
2110 const struct adapter_params *p = &adapter->params;
2111
2112 adapter->check_task_cnt++;
2113
2114 /* Check link status for PHYs without interrupts */
2115 if (p->linkpoll_period)
2116 check_link_status(adapter);
2117
2118 /* Accumulate MAC stats if needed */
2119 if (!p->linkpoll_period ||
2120 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2121 p->stats_update_period) {
2122 mac_stats_update(adapter);
2123 adapter->check_task_cnt = 0;
2124 }
2125
2126 /* Schedule the next check update if any port is active. */
2127 spin_lock(&adapter->work_lock);
2128 if (adapter->open_device_map & PORT_MASK)
2129 schedule_chk_task(adapter);
2130 spin_unlock(&adapter->work_lock);
2131}
2132
2133/*
2134 * Processes external (PHY) interrupts in process context.
2135 */
2136static void ext_intr_task(struct work_struct *work)
2137{
2138 struct adapter *adapter = container_of(work, struct adapter,
2139 ext_intr_handler_task);
2140
2141 t3_phy_intr_handler(adapter);
2142
2143 /* Now reenable external interrupts */
2144 spin_lock_irq(&adapter->work_lock);
2145 if (adapter->slow_intr_mask) {
2146 adapter->slow_intr_mask |= F_T3DBG;
2147 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2148 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2149 adapter->slow_intr_mask);
2150 }
2151 spin_unlock_irq(&adapter->work_lock);
2152}
2153
2154/*
2155 * Interrupt-context handler for external (PHY) interrupts.
2156 */
2157void t3_os_ext_intr_handler(struct adapter *adapter)
2158{
2159 /*
2160 * Schedule a task to handle external interrupts as they may be slow
2161 * and we use a mutex to protect MDIO registers. We disable PHY
2162 * interrupts in the meantime and let the task reenable them when
2163 * it's done.
2164 */
2165 spin_lock(&adapter->work_lock);
2166 if (adapter->slow_intr_mask) {
2167 adapter->slow_intr_mask &= ~F_T3DBG;
2168 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2169 adapter->slow_intr_mask);
2170 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2171 }
2172 spin_unlock(&adapter->work_lock);
2173}
2174
2175void t3_fatal_err(struct adapter *adapter)
2176{
2177 unsigned int fw_status[4];
2178
2179 if (adapter->flags & FULL_INIT_DONE) {
2180 t3_sge_stop(adapter);
2181 t3_intr_disable(adapter);
2182 }
2183 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2184 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2185 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2186 fw_status[0], fw_status[1],
2187 fw_status[2], fw_status[3]);
2188
2189}
2190
2191static int __devinit cxgb_enable_msix(struct adapter *adap)
2192{
2193 struct msix_entry entries[SGE_QSETS + 1];
2194 int i, err;
2195
2196 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2197 entries[i].entry = i;
2198
2199 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2200 if (!err) {
2201 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2202 adap->msix_info[i].vec = entries[i].vector;
2203 } else if (err > 0)
2204 dev_info(&adap->pdev->dev,
2205 "only %d MSI-X vectors left, not using MSI-X\n", err);
2206 return err;
2207}
2208
2209static void __devinit print_port_info(struct adapter *adap,
2210 const struct adapter_info *ai)
2211{
2212 static const char *pci_variant[] = {
2213 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2214 };
2215
2216 int i;
2217 char buf[80];
2218
2219 if (is_pcie(adap))
2220 snprintf(buf, sizeof(buf), "%s x%d",
2221 pci_variant[adap->params.pci.variant],
2222 adap->params.pci.width);
2223 else
2224 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2225 pci_variant[adap->params.pci.variant],
2226 adap->params.pci.speed, adap->params.pci.width);
2227
2228 for_each_port(adap, i) {
2229 struct net_device *dev = adap->port[i];
2230 const struct port_info *pi = netdev_priv(dev);
2231
2232 if (!test_bit(i, &adap->registered_device_map))
2233 continue;
2234 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2235 dev->name, ai->desc, pi->port_type->desc,
2236 adap->params.rev, buf,
2237 (adap->flags & USING_MSIX) ? " MSI-X" :
2238 (adap->flags & USING_MSI) ? " MSI" : "");
2239 if (adap->name == dev->name && adap->params.vpd.mclk)
2240 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2241 adap->name, t3_mc7_size(&adap->cm) >> 20,
2242 t3_mc7_size(&adap->pmtx) >> 20,
2243 t3_mc7_size(&adap->pmrx) >> 20);
2244 }
2245}
2246
2247static int __devinit init_one(struct pci_dev *pdev,
2248 const struct pci_device_id *ent)
2249{
2250 static int version_printed;
2251
2252 int i, err, pci_using_dac = 0;
2253 unsigned long mmio_start, mmio_len;
2254 const struct adapter_info *ai;
2255 struct adapter *adapter = NULL;
2256 struct port_info *pi;
2257
2258 if (!version_printed) {
2259 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2260 ++version_printed;
2261 }
2262
2263 if (!cxgb3_wq) {
2264 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2265 if (!cxgb3_wq) {
2266 printk(KERN_ERR DRV_NAME
2267 ": cannot initialize work queue\n");
2268 return -ENOMEM;
2269 }
2270 }
2271
2272 err = pci_request_regions(pdev, DRV_NAME);
2273 if (err) {
2274 /* Just info, some other driver may have claimed the device. */
2275 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2276 return err;
2277 }
2278
2279 err = pci_enable_device(pdev);
2280 if (err) {
2281 dev_err(&pdev->dev, "cannot enable PCI device\n");
2282 goto out_release_regions;
2283 }
2284
2285 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2286 pci_using_dac = 1;
2287 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2288 if (err) {
2289 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2290 "coherent allocations\n");
2291 goto out_disable_device;
2292 }
2293 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2294 dev_err(&pdev->dev, "no usable DMA configuration\n");
2295 goto out_disable_device;
2296 }
2297
2298 pci_set_master(pdev);
2299
2300 mmio_start = pci_resource_start(pdev, 0);
2301 mmio_len = pci_resource_len(pdev, 0);
2302 ai = t3_get_adapter_info(ent->driver_data);
2303
2304 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2305 if (!adapter) {
2306 err = -ENOMEM;
2307 goto out_disable_device;
2308 }
2309
2310 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2311 if (!adapter->regs) {
2312 dev_err(&pdev->dev, "cannot map device registers\n");
2313 err = -ENOMEM;
2314 goto out_free_adapter;
2315 }
2316
2317 adapter->pdev = pdev;
2318 adapter->name = pci_name(pdev);
2319 adapter->msg_enable = dflt_msg_enable;
2320 adapter->mmio_len = mmio_len;
2321
2322 mutex_init(&adapter->mdio_lock);
2323 spin_lock_init(&adapter->work_lock);
2324 spin_lock_init(&adapter->stats_lock);
2325
2326 INIT_LIST_HEAD(&adapter->adapter_list);
2327 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2328 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2329
2330 for (i = 0; i < ai->nports; ++i) {
2331 struct net_device *netdev;
2332
2333 netdev = alloc_etherdev(sizeof(struct port_info));
2334 if (!netdev) {
2335 err = -ENOMEM;
2336 goto out_free_dev;
2337 }
2338
2339 SET_MODULE_OWNER(netdev);
2340 SET_NETDEV_DEV(netdev, &pdev->dev);
2341
2342 adapter->port[i] = netdev;
2343 pi = netdev_priv(netdev);
2344 pi->rx_csum_offload = 1;
2345 pi->nqsets = 1;
2346 pi->first_qset = i;
2347 pi->activity = 0;
2348 pi->port_id = i;
2349 netif_carrier_off(netdev);
2350 netdev->irq = pdev->irq;
2351 netdev->mem_start = mmio_start;
2352 netdev->mem_end = mmio_start + mmio_len - 1;
2353 netdev->priv = adapter;
2354 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2355 netdev->features |= NETIF_F_LLTX;
2356 if (pci_using_dac)
2357 netdev->features |= NETIF_F_HIGHDMA;
2358
2359 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2360 netdev->vlan_rx_register = vlan_rx_register;
2361 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2362
2363 netdev->open = cxgb_open;
2364 netdev->stop = cxgb_close;
2365 netdev->hard_start_xmit = t3_eth_xmit;
2366 netdev->get_stats = cxgb_get_stats;
2367 netdev->set_multicast_list = cxgb_set_rxmode;
2368 netdev->do_ioctl = cxgb_ioctl;
2369 netdev->change_mtu = cxgb_change_mtu;
2370 netdev->set_mac_address = cxgb_set_mac_addr;
2371#ifdef CONFIG_NET_POLL_CONTROLLER
2372 netdev->poll_controller = cxgb_netpoll;
2373#endif
2374 netdev->weight = 64;
2375
2376 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2377 }
2378
2379 pci_set_drvdata(pdev, adapter->port[0]);
2380 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2381 err = -ENODEV;
2382 goto out_free_dev;
2383 }
2384
2385 /*
2386 * The card is now ready to go. If any errors occur during device
2387 * registration we do not fail the whole card but rather proceed only
2388 * with the ports we manage to register successfully. However we must
2389 * register at least one net device.
2390 */
2391 for_each_port(adapter, i) {
2392 err = register_netdev(adapter->port[i]);
2393 if (err)
2394 dev_warn(&pdev->dev,
2395 "cannot register net device %s, skipping\n",
2396 adapter->port[i]->name);
2397 else {
2398 /*
2399 * Change the name we use for messages to the name of
2400 * the first successfully registered interface.
2401 */
2402 if (!adapter->registered_device_map)
2403 adapter->name = adapter->port[i]->name;
2404
2405 __set_bit(i, &adapter->registered_device_map);
2406 }
2407 }
2408 if (!adapter->registered_device_map) {
2409 dev_err(&pdev->dev, "could not register any net devices\n");
2410 goto out_free_dev;
2411 }
2412
2413 /* Driver's ready. Reflect it on LEDs */
2414 t3_led_ready(adapter);
2415
2416 if (is_offload(adapter)) {
2417 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2418 cxgb3_adapter_ofld(adapter);
2419 }
2420
2421 /* See what interrupts we'll be using */
2422 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2423 adapter->flags |= USING_MSIX;
2424 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2425 adapter->flags |= USING_MSI;
2426
2427 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2428 &cxgb3_attr_group);
2429
2430 print_port_info(adapter, ai);
2431 return 0;
2432
2433out_free_dev:
2434 iounmap(adapter->regs);
2435 for (i = ai->nports - 1; i >= 0; --i)
2436 if (adapter->port[i])
2437 free_netdev(adapter->port[i]);
2438
2439out_free_adapter:
2440 kfree(adapter);
2441
2442out_disable_device:
2443 pci_disable_device(pdev);
2444out_release_regions:
2445 pci_release_regions(pdev);
2446 pci_set_drvdata(pdev, NULL);
2447 return err;
2448}
2449
2450static void __devexit remove_one(struct pci_dev *pdev)
2451{
2452 struct net_device *dev = pci_get_drvdata(pdev);
2453
2454 if (dev) {
2455 int i;
2456 struct adapter *adapter = dev->priv;
2457
2458 t3_sge_stop(adapter);
2459 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2460 &cxgb3_attr_group);
2461
2462 for_each_port(adapter, i)
2463 if (test_bit(i, &adapter->registered_device_map))
2464 unregister_netdev(adapter->port[i]);
2465
2466 if (is_offload(adapter)) {
2467 cxgb3_adapter_unofld(adapter);
2468 if (test_bit(OFFLOAD_DEVMAP_BIT,
2469 &adapter->open_device_map))
2470 offload_close(&adapter->tdev);
2471 }
2472
2473 t3_free_sge_resources(adapter);
2474 cxgb_disable_msi(adapter);
2475
2476 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2477 if (adapter->dummy_netdev[i]) {
2478 free_netdev(adapter->dummy_netdev[i]);
2479 adapter->dummy_netdev[i] = NULL;
2480 }
2481
2482 for_each_port(adapter, i)
2483 if (adapter->port[i])
2484 free_netdev(adapter->port[i]);
2485
2486 iounmap(adapter->regs);
2487 kfree(adapter);
2488 pci_release_regions(pdev);
2489 pci_disable_device(pdev);
2490 pci_set_drvdata(pdev, NULL);
2491 }
2492}
2493
2494static struct pci_driver driver = {
2495 .name = DRV_NAME,
2496 .id_table = cxgb3_pci_tbl,
2497 .probe = init_one,
2498 .remove = __devexit_p(remove_one),
2499};
2500
2501static int __init cxgb3_init_module(void)
2502{
2503 int ret;
2504
2505 cxgb3_offload_init();
2506
2507 ret = pci_register_driver(&driver);
2508 return ret;
2509}
2510
2511static void __exit cxgb3_cleanup_module(void)
2512{
2513 pci_unregister_driver(&driver);
2514 if (cxgb3_wq)
2515 destroy_workqueue(cxgb3_wq);
2516}
2517
2518module_init(cxgb3_init_module);
2519module_exit(cxgb3_cleanup_module);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
new file mode 100644
index 000000000000..c6b726643185
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -0,0 +1,1222 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/list.h>
35#include <net/neighbour.h>
36#include <linux/notifier.h>
37#include <asm/atomic.h>
38#include <linux/proc_fs.h>
39#include <linux/if_vlan.h>
40#include <net/netevent.h>
41#include <linux/highmem.h>
42#include <linux/vmalloc.h>
43
44#include "common.h"
45#include "regs.h"
46#include "cxgb3_ioctl.h"
47#include "cxgb3_ctl_defs.h"
48#include "cxgb3_defs.h"
49#include "l2t.h"
50#include "firmware_exports.h"
51#include "cxgb3_offload.h"
52
53static LIST_HEAD(client_list);
54static LIST_HEAD(ofld_dev_list);
55static DEFINE_MUTEX(cxgb3_db_lock);
56
57static DEFINE_RWLOCK(adapter_list_lock);
58static LIST_HEAD(adapter_list);
59
60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x100000;
62
63static inline int offload_activated(struct t3cdev *tdev)
64{
65 const struct adapter *adapter = tdev2adap(tdev);
66
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map));
68}
69
70/**
71 * cxgb3_register_client - register an offload client
72 * @client: the client
73 *
74 * Add the client to the client list,
75 * and call backs the client for each activated offload device
76 */
77void cxgb3_register_client(struct cxgb3_client *client)
78{
79 struct t3cdev *tdev;
80
81 mutex_lock(&cxgb3_db_lock);
82 list_add_tail(&client->client_list, &client_list);
83
84 if (client->add) {
85 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
86 if (offload_activated(tdev))
87 client->add(tdev);
88 }
89 }
90 mutex_unlock(&cxgb3_db_lock);
91}
92
93EXPORT_SYMBOL(cxgb3_register_client);
94
95/**
96 * cxgb3_unregister_client - unregister an offload client
97 * @client: the client
98 *
99 * Remove the client to the client list,
100 * and call backs the client for each activated offload device.
101 */
102void cxgb3_unregister_client(struct cxgb3_client *client)
103{
104 struct t3cdev *tdev;
105
106 mutex_lock(&cxgb3_db_lock);
107 list_del(&client->client_list);
108
109 if (client->remove) {
110 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
111 if (offload_activated(tdev))
112 client->remove(tdev);
113 }
114 }
115 mutex_unlock(&cxgb3_db_lock);
116}
117
118EXPORT_SYMBOL(cxgb3_unregister_client);
119
120/**
121 * cxgb3_add_clients - activate registered clients for an offload device
122 * @tdev: the offload device
123 *
124 * Call backs all registered clients once a offload device is activated
125 */
126void cxgb3_add_clients(struct t3cdev *tdev)
127{
128 struct cxgb3_client *client;
129
130 mutex_lock(&cxgb3_db_lock);
131 list_for_each_entry(client, &client_list, client_list) {
132 if (client->add)
133 client->add(tdev);
134 }
135 mutex_unlock(&cxgb3_db_lock);
136}
137
138/**
139 * cxgb3_remove_clients - deactivates registered clients
140 * for an offload device
141 * @tdev: the offload device
142 *
143 * Call backs all registered clients once a offload device is deactivated
144 */
145void cxgb3_remove_clients(struct t3cdev *tdev)
146{
147 struct cxgb3_client *client;
148
149 mutex_lock(&cxgb3_db_lock);
150 list_for_each_entry(client, &client_list, client_list) {
151 if (client->remove)
152 client->remove(tdev);
153 }
154 mutex_unlock(&cxgb3_db_lock);
155}
156
157static struct net_device *get_iff_from_mac(struct adapter *adapter,
158 const unsigned char *mac,
159 unsigned int vlan)
160{
161 int i;
162
163 for_each_port(adapter, i) {
164 const struct vlan_group *grp;
165 struct net_device *dev = adapter->port[i];
166 const struct port_info *p = netdev_priv(dev);
167
168 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
169 if (vlan && vlan != VLAN_VID_MASK) {
170 grp = p->vlan_grp;
171 dev = grp ? grp->vlan_devices[vlan] : NULL;
172 } else
173 while (dev->master)
174 dev = dev->master;
175 return dev;
176 }
177 }
178 return NULL;
179}
180
181static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
182 void *data)
183{
184 int ret = 0;
185 struct ulp_iscsi_info *uiip = data;
186
187 switch (req) {
188 case ULP_ISCSI_GET_PARAMS:
189 uiip->pdev = adapter->pdev;
190 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
191 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
192 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
193 /*
194 * On tx, the iscsi pdu has to be <= tx page size and has to
195 * fit into the Tx PM FIFO.
196 */
197 uiip->max_txsz = min(adapter->params.tp.tx_pg_size,
198 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
199 /* on rx, the iscsi pdu has to be < rx page size and the
200 whole pdu + cpl headers has to fit into one sge buffer */
201 uiip->max_rxsz = min_t(unsigned int,
202 adapter->params.tp.rx_pg_size,
203 (adapter->sge.qs[0].fl[1].buf_size -
204 sizeof(struct cpl_rx_data) * 2 -
205 sizeof(struct cpl_rx_data_ddp)));
206 break;
207 case ULP_ISCSI_SET_PARAMS:
208 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
209 break;
210 default:
211 ret = -EOPNOTSUPP;
212 }
213 return ret;
214}
215
216/* Response queue used for RDMA events. */
217#define ASYNC_NOTIF_RSPQ 0
218
219static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
220{
221 int ret = 0;
222
223 switch (req) {
224 case RDMA_GET_PARAMS:{
225 struct rdma_info *req = data;
226 struct pci_dev *pdev = adapter->pdev;
227
228 req->udbell_physbase = pci_resource_start(pdev, 2);
229 req->udbell_len = pci_resource_len(pdev, 2);
230 req->tpt_base =
231 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
232 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
233 req->pbl_base =
234 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
235 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
236 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
237 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
238 req->kdb_addr = adapter->regs + A_SG_KDOORBELL;
239 req->pdev = pdev;
240 break;
241 }
242 case RDMA_CQ_OP:{
243 unsigned long flags;
244 struct rdma_cq_op *req = data;
245
246 /* may be called in any context */
247 spin_lock_irqsave(&adapter->sge.reg_lock, flags);
248 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op,
249 req->credits);
250 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
251 break;
252 }
253 case RDMA_GET_MEM:{
254 struct ch_mem_range *t = data;
255 struct mc7 *mem;
256
257 if ((t->addr & 7) || (t->len & 7))
258 return -EINVAL;
259 if (t->mem_id == MEM_CM)
260 mem = &adapter->cm;
261 else if (t->mem_id == MEM_PMRX)
262 mem = &adapter->pmrx;
263 else if (t->mem_id == MEM_PMTX)
264 mem = &adapter->pmtx;
265 else
266 return -EINVAL;
267
268 ret =
269 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
270 (u64 *) t->buf);
271 if (ret)
272 return ret;
273 break;
274 }
275 case RDMA_CQ_SETUP:{
276 struct rdma_cq_setup *req = data;
277
278 spin_lock_irq(&adapter->sge.reg_lock);
279 ret =
280 t3_sge_init_cqcntxt(adapter, req->id,
281 req->base_addr, req->size,
282 ASYNC_NOTIF_RSPQ,
283 req->ovfl_mode, req->credits,
284 req->credit_thres);
285 spin_unlock_irq(&adapter->sge.reg_lock);
286 break;
287 }
288 case RDMA_CQ_DISABLE:
289 spin_lock_irq(&adapter->sge.reg_lock);
290 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
291 spin_unlock_irq(&adapter->sge.reg_lock);
292 break;
293 case RDMA_CTRL_QP_SETUP:{
294 struct rdma_ctrlqp_setup *req = data;
295
296 spin_lock_irq(&adapter->sge.reg_lock);
297 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
298 SGE_CNTXT_RDMA,
299 ASYNC_NOTIF_RSPQ,
300 req->base_addr, req->size,
301 FW_RI_TID_START, 1, 0);
302 spin_unlock_irq(&adapter->sge.reg_lock);
303 break;
304 }
305 default:
306 ret = -EOPNOTSUPP;
307 }
308 return ret;
309}
310
311static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
312{
313 struct adapter *adapter = tdev2adap(tdev);
314 struct tid_range *tid;
315 struct mtutab *mtup;
316 struct iff_mac *iffmacp;
317 struct ddp_params *ddpp;
318 struct adap_ports *ports;
319 int i;
320
321 switch (req) {
322 case GET_MAX_OUTSTANDING_WR:
323 *(unsigned int *)data = FW_WR_NUM;
324 break;
325 case GET_WR_LEN:
326 *(unsigned int *)data = WR_FLITS;
327 break;
328 case GET_TX_MAX_CHUNK:
329 *(unsigned int *)data = 1 << 20; /* 1MB */
330 break;
331 case GET_TID_RANGE:
332 tid = data;
333 tid->num = t3_mc5_size(&adapter->mc5) -
334 adapter->params.mc5.nroutes -
335 adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
336 tid->base = 0;
337 break;
338 case GET_STID_RANGE:
339 tid = data;
340 tid->num = adapter->params.mc5.nservers;
341 tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
342 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
343 break;
344 case GET_L2T_CAPACITY:
345 *(unsigned int *)data = 2048;
346 break;
347 case GET_MTUS:
348 mtup = data;
349 mtup->size = NMTUS;
350 mtup->mtus = adapter->params.mtus;
351 break;
352 case GET_IFF_FROM_MAC:
353 iffmacp = data;
354 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
355 iffmacp->vlan_tag &
356 VLAN_VID_MASK);
357 break;
358 case GET_DDP_PARAMS:
359 ddpp = data;
360 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
361 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
362 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
363 break;
364 case GET_PORTS:
365 ports = data;
366 ports->nports = adapter->params.nports;
367 for_each_port(adapter, i)
368 ports->lldevs[i] = adapter->port[i];
369 break;
370 case ULP_ISCSI_GET_PARAMS:
371 case ULP_ISCSI_SET_PARAMS:
372 if (!offload_running(adapter))
373 return -EAGAIN;
374 return cxgb_ulp_iscsi_ctl(adapter, req, data);
375 case RDMA_GET_PARAMS:
376 case RDMA_CQ_OP:
377 case RDMA_CQ_SETUP:
378 case RDMA_CQ_DISABLE:
379 case RDMA_CTRL_QP_SETUP:
380 case RDMA_GET_MEM:
381 if (!offload_running(adapter))
382 return -EAGAIN;
383 return cxgb_rdma_ctl(adapter, req, data);
384 default:
385 return -EOPNOTSUPP;
386 }
387 return 0;
388}
389
390/*
391 * Dummy handler for Rx offload packets in case we get an offload packet before
392 * proper processing is setup. This complains and drops the packet as it isn't
393 * normal to get offload packets at this stage.
394 */
395static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
396 int n)
397{
398 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n",
399 n, ntohl(*(__be32 *)skbs[0]->data));
400 while (n--)
401 dev_kfree_skb_any(skbs[n]);
402 return 0;
403}
404
405static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
406{
407}
408
409void cxgb3_set_dummy_ops(struct t3cdev *dev)
410{
411 dev->recv = rx_offload_blackhole;
412 dev->neigh_update = dummy_neigh_update;
413}
414
415/*
416 * Free an active-open TID.
417 */
418void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
419{
420 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
421 union active_open_entry *p = atid2entry(t, atid);
422 void *ctx = p->t3c_tid.ctx;
423
424 spin_lock_bh(&t->atid_lock);
425 p->next = t->afree;
426 t->afree = p;
427 t->atids_in_use--;
428 spin_unlock_bh(&t->atid_lock);
429
430 return ctx;
431}
432
433EXPORT_SYMBOL(cxgb3_free_atid);
434
435/*
436 * Free a server TID and return it to the free pool.
437 */
438void cxgb3_free_stid(struct t3cdev *tdev, int stid)
439{
440 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
441 union listen_entry *p = stid2entry(t, stid);
442
443 spin_lock_bh(&t->stid_lock);
444 p->next = t->sfree;
445 t->sfree = p;
446 t->stids_in_use--;
447 spin_unlock_bh(&t->stid_lock);
448}
449
450EXPORT_SYMBOL(cxgb3_free_stid);
451
452void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
453 void *ctx, unsigned int tid)
454{
455 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
456
457 t->tid_tab[tid].client = client;
458 t->tid_tab[tid].ctx = ctx;
459 atomic_inc(&t->tids_in_use);
460}
461
462EXPORT_SYMBOL(cxgb3_insert_tid);
463
464/*
465 * Populate a TID_RELEASE WR. The skb must be already propely sized.
466 */
467static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
468{
469 struct cpl_tid_release *req;
470
471 skb->priority = CPL_PRIORITY_SETUP;
472 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
473 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
475}
476
477static void t3_process_tid_release_list(struct work_struct *work)
478{
479 struct t3c_data *td = container_of(work, struct t3c_data,
480 tid_release_task);
481 struct sk_buff *skb;
482 struct t3cdev *tdev = td->dev;
483
484
485 spin_lock_bh(&td->tid_release_lock);
486 while (td->tid_release_list) {
487 struct t3c_tid_entry *p = td->tid_release_list;
488
489 td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
490 spin_unlock_bh(&td->tid_release_lock);
491
492 skb = alloc_skb(sizeof(struct cpl_tid_release),
493 GFP_KERNEL | __GFP_NOFAIL);
494 mk_tid_release(skb, p - td->tid_maps.tid_tab);
495 cxgb3_ofld_send(tdev, skb);
496 p->ctx = NULL;
497 spin_lock_bh(&td->tid_release_lock);
498 }
499 spin_unlock_bh(&td->tid_release_lock);
500}
501
502/* use ctx as a next pointer in the tid release list */
503void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
504{
505 struct t3c_data *td = T3C_DATA(tdev);
506 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
507
508 spin_lock_bh(&td->tid_release_lock);
509 p->ctx = (void *)td->tid_release_list;
510 td->tid_release_list = p;
511 if (!p->ctx)
512 schedule_work(&td->tid_release_task);
513 spin_unlock_bh(&td->tid_release_lock);
514}
515
516EXPORT_SYMBOL(cxgb3_queue_tid_release);
517
518/*
519 * Remove a tid from the TID table. A client may defer processing its last
520 * CPL message if it is locked at the time it arrives, and while the message
521 * sits in the client's backlog the TID may be reused for another connection.
522 * To handle this we atomically switch the TID association if it still points
523 * to the original client context.
524 */
525void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
526{
527 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
528
529 BUG_ON(tid >= t->ntids);
530 if (tdev->type == T3A)
531 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
532 else {
533 struct sk_buff *skb;
534
535 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
536 if (likely(skb)) {
537 mk_tid_release(skb, tid);
538 cxgb3_ofld_send(tdev, skb);
539 t->tid_tab[tid].ctx = NULL;
540 } else
541 cxgb3_queue_tid_release(tdev, tid);
542 }
543 atomic_dec(&t->tids_in_use);
544}
545
546EXPORT_SYMBOL(cxgb3_remove_tid);
547
548int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
549 void *ctx)
550{
551 int atid = -1;
552 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
553
554 spin_lock_bh(&t->atid_lock);
555 if (t->afree) {
556 union active_open_entry *p = t->afree;
557
558 atid = (p - t->atid_tab) + t->atid_base;
559 t->afree = p->next;
560 p->t3c_tid.ctx = ctx;
561 p->t3c_tid.client = client;
562 t->atids_in_use++;
563 }
564 spin_unlock_bh(&t->atid_lock);
565 return atid;
566}
567
568EXPORT_SYMBOL(cxgb3_alloc_atid);
569
570int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
571 void *ctx)
572{
573 int stid = -1;
574 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
575
576 spin_lock_bh(&t->stid_lock);
577 if (t->sfree) {
578 union listen_entry *p = t->sfree;
579
580 stid = (p - t->stid_tab) + t->stid_base;
581 t->sfree = p->next;
582 p->t3c_tid.ctx = ctx;
583 p->t3c_tid.client = client;
584 t->stids_in_use++;
585 }
586 spin_unlock_bh(&t->stid_lock);
587 return stid;
588}
589
590EXPORT_SYMBOL(cxgb3_alloc_stid);
591
592static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
593{
594 struct cpl_smt_write_rpl *rpl = cplhdr(skb);
595
596 if (rpl->status != CPL_ERR_NONE)
597 printk(KERN_ERR
598 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
599 rpl->status, GET_TID(rpl));
600
601 return CPL_RET_BUF_DONE;
602}
603
604static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
605{
606 struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
607
608 if (rpl->status != CPL_ERR_NONE)
609 printk(KERN_ERR
610 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
611 rpl->status, GET_TID(rpl));
612
613 return CPL_RET_BUF_DONE;
614}
615
616static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
617{
618 struct cpl_act_open_rpl *rpl = cplhdr(skb);
619 unsigned int atid = G_TID(ntohl(rpl->atid));
620 struct t3c_tid_entry *t3c_tid;
621
622 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
623 if (t3c_tid->ctx && t3c_tid->client && t3c_tid->client->handlers &&
624 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
625 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
626 t3c_tid->
627 ctx);
628 } else {
629 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
630 dev->name, CPL_ACT_OPEN_RPL);
631 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
632 }
633}
634
635static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
636{
637 union opcode_tid *p = cplhdr(skb);
638 unsigned int stid = G_TID(ntohl(p->opcode_tid));
639 struct t3c_tid_entry *t3c_tid;
640
641 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
642 if (t3c_tid->ctx && t3c_tid->client->handlers &&
643 t3c_tid->client->handlers[p->opcode]) {
644 return t3c_tid->client->handlers[p->opcode] (dev, skb,
645 t3c_tid->ctx);
646 } else {
647 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
648 dev->name, p->opcode);
649 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
650 }
651}
652
653static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
654{
655 union opcode_tid *p = cplhdr(skb);
656 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
657 struct t3c_tid_entry *t3c_tid;
658
659 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
660 if (t3c_tid->ctx && t3c_tid->client->handlers &&
661 t3c_tid->client->handlers[p->opcode]) {
662 return t3c_tid->client->handlers[p->opcode]
663 (dev, skb, t3c_tid->ctx);
664 } else {
665 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
666 dev->name, p->opcode);
667 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
668 }
669}
670
671static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
672{
673 struct cpl_pass_accept_req *req = cplhdr(skb);
674 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
675 struct t3c_tid_entry *t3c_tid;
676
677 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
678 if (t3c_tid->ctx && t3c_tid->client->handlers &&
679 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
680 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
681 (dev, skb, t3c_tid->ctx);
682 } else {
683 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
684 dev->name, CPL_PASS_ACCEPT_REQ);
685 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
686 }
687}
688
689static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
690{
691 union opcode_tid *p = cplhdr(skb);
692 unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
693 struct t3c_tid_entry *t3c_tid;
694
695 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
696 if (t3c_tid->ctx && t3c_tid->client->handlers &&
697 t3c_tid->client->handlers[p->opcode]) {
698 return t3c_tid->client->handlers[p->opcode]
699 (dev, skb, t3c_tid->ctx);
700 } else {
701 struct cpl_abort_req_rss *req = cplhdr(skb);
702 struct cpl_abort_rpl *rpl;
703
704 struct sk_buff *skb =
705 alloc_skb(sizeof(struct cpl_abort_rpl), GFP_ATOMIC);
706 if (!skb) {
707 printk("do_abort_req_rss: couldn't get skb!\n");
708 goto out;
709 }
710 skb->priority = CPL_PRIORITY_DATA;
711 __skb_put(skb, sizeof(struct cpl_abort_rpl));
712 rpl = cplhdr(skb);
713 rpl->wr.wr_hi =
714 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
715 rpl->wr.wr_lo = htonl(V_WR_TID(GET_TID(req)));
716 OPCODE_TID(rpl) =
717 htonl(MK_OPCODE_TID(CPL_ABORT_RPL, GET_TID(req)));
718 rpl->cmd = req->status;
719 cxgb3_ofld_send(dev, skb);
720out:
721 return CPL_RET_BUF_DONE;
722 }
723}
724
725static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
726{
727 struct cpl_act_establish *req = cplhdr(skb);
728 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
729 struct t3c_tid_entry *t3c_tid;
730
731 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
732 if (t3c_tid->ctx && t3c_tid->client->handlers &&
733 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
734 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
735 (dev, skb, t3c_tid->ctx);
736 } else {
737 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
738 dev->name, CPL_PASS_ACCEPT_REQ);
739 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
740 }
741}
742
743static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb)
744{
745 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
746
747 if (rpl->status != CPL_ERR_NONE)
748 printk(KERN_ERR
749 "Unexpected SET_TCB_RPL status %u for tid %u\n",
750 rpl->status, GET_TID(rpl));
751 return CPL_RET_BUF_DONE;
752}
753
754static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
755{
756 struct cpl_trace_pkt *p = cplhdr(skb);
757
758 skb->protocol = htons(0xffff);
759 skb->dev = dev->lldev;
760 skb_pull(skb, sizeof(*p));
761 skb->mac.raw = skb->data;
762 netif_receive_skb(skb);
763 return 0;
764}
765
766static int do_term(struct t3cdev *dev, struct sk_buff *skb)
767{
768 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
769 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
770 struct t3c_tid_entry *t3c_tid;
771
772 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
773 if (t3c_tid->ctx && t3c_tid->client->handlers &&
774 t3c_tid->client->handlers[opcode]) {
775 return t3c_tid->client->handlers[opcode] (dev, skb,
776 t3c_tid->ctx);
777 } else {
778 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
779 dev->name, opcode);
780 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
781 }
782}
783
784static int nb_callback(struct notifier_block *self, unsigned long event,
785 void *ctx)
786{
787 switch (event) {
788 case (NETEVENT_NEIGH_UPDATE):{
789 cxgb_neigh_update((struct neighbour *)ctx);
790 break;
791 }
792 case (NETEVENT_PMTU_UPDATE):
793 break;
794 case (NETEVENT_REDIRECT):{
795 struct netevent_redirect *nr = ctx;
796 cxgb_redirect(nr->old, nr->new);
797 cxgb_neigh_update(nr->new->neighbour);
798 break;
799 }
800 default:
801 break;
802 }
803 return 0;
804}
805
806static struct notifier_block nb = {
807 .notifier_call = nb_callback
808};
809
810/*
811 * Process a received packet with an unknown/unexpected CPL opcode.
812 */
813static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
814{
815 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
816 *skb->data);
817 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
818}
819
820/*
821 * Handlers for each CPL opcode
822 */
823static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
824
825/*
826 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
827 * to unregister an existing handler.
828 */
829void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
830{
831 if (opcode < NUM_CPL_CMDS)
832 cpl_handlers[opcode] = h ? h : do_bad_cpl;
833 else
834 printk(KERN_ERR "T3C: handler registration for "
835 "opcode %x failed\n", opcode);
836}
837
838EXPORT_SYMBOL(t3_register_cpl_handler);
839
840/*
841 * T3CDEV's receive method.
842 */
843int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
844{
845 while (n--) {
846 struct sk_buff *skb = *skbs++;
847 unsigned int opcode = G_OPCODE(ntohl(skb->csum));
848 int ret = cpl_handlers[opcode] (dev, skb);
849
850#if VALIDATE_TID
851 if (ret & CPL_RET_UNKNOWN_TID) {
852 union opcode_tid *p = cplhdr(skb);
853
854 printk(KERN_ERR "%s: CPL message (opcode %u) had "
855 "unknown TID %u\n", dev->name, opcode,
856 G_TID(ntohl(p->opcode_tid)));
857 }
858#endif
859 if (ret & CPL_RET_BUF_DONE)
860 kfree_skb(skb);
861 }
862 return 0;
863}
864
865/*
866 * Sends an sk_buff to a T3C driver after dealing with any active network taps.
867 */
868int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
869{
870 int r;
871
872 local_bh_disable();
873 r = dev->send(dev, skb);
874 local_bh_enable();
875 return r;
876}
877
878EXPORT_SYMBOL(cxgb3_ofld_send);
879
880static int is_offloading(struct net_device *dev)
881{
882 struct adapter *adapter;
883 int i;
884
885 read_lock_bh(&adapter_list_lock);
886 list_for_each_entry(adapter, &adapter_list, adapter_list) {
887 for_each_port(adapter, i) {
888 if (dev == adapter->port[i]) {
889 read_unlock_bh(&adapter_list_lock);
890 return 1;
891 }
892 }
893 }
894 read_unlock_bh(&adapter_list_lock);
895 return 0;
896}
897
898void cxgb_neigh_update(struct neighbour *neigh)
899{
900 struct net_device *dev = neigh->dev;
901
902 if (dev && (is_offloading(dev))) {
903 struct t3cdev *tdev = T3CDEV(dev);
904
905 BUG_ON(!tdev);
906 t3_l2t_update(tdev, neigh);
907 }
908}
909
910static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
911{
912 struct sk_buff *skb;
913 struct cpl_set_tcb_field *req;
914
915 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
916 if (!skb) {
917 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__);
918 return;
919 }
920 skb->priority = CPL_PRIORITY_CONTROL;
921 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req));
922 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
923 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
924 req->reply = 0;
925 req->cpu_idx = 0;
926 req->word = htons(W_TCB_L2T_IX);
927 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
928 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
929 tdev->send(tdev, skb);
930}
931
932void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
933{
934 struct net_device *olddev, *newdev;
935 struct tid_info *ti;
936 struct t3cdev *tdev;
937 u32 tid;
938 int update_tcb;
939 struct l2t_entry *e;
940 struct t3c_tid_entry *te;
941
942 olddev = old->neighbour->dev;
943 newdev = new->neighbour->dev;
944 if (!is_offloading(olddev))
945 return;
946 if (!is_offloading(newdev)) {
947 printk(KERN_WARNING "%s: Redirect to non-offload"
948 "device ignored.\n", __FUNCTION__);
949 return;
950 }
951 tdev = T3CDEV(olddev);
952 BUG_ON(!tdev);
953 if (tdev != T3CDEV(newdev)) {
954 printk(KERN_WARNING "%s: Redirect to different "
955 "offload device ignored.\n", __FUNCTION__);
956 return;
957 }
958
959 /* Add new L2T entry */
960 e = t3_l2t_get(tdev, new->neighbour, newdev);
961 if (!e) {
962 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
963 __FUNCTION__);
964 return;
965 }
966
967 /* Walk tid table and notify clients of dst change. */
968 ti = &(T3C_DATA(tdev))->tid_maps;
969 for (tid = 0; tid < ti->ntids; tid++) {
970 te = lookup_tid(ti, tid);
971 BUG_ON(!te);
972 if (te->ctx && te->client && te->client->redirect) {
973 update_tcb = te->client->redirect(te->ctx, old, new, e);
974 if (update_tcb) {
975 l2t_hold(L2DATA(tdev), e);
976 set_l2t_ix(tdev, tid, e);
977 }
978 }
979 }
980 l2t_release(L2DATA(tdev), e);
981}
982
983/*
984 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
985 * The allocated memory is cleared.
986 */
987void *cxgb_alloc_mem(unsigned long size)
988{
989 void *p = kmalloc(size, GFP_KERNEL);
990
991 if (!p)
992 p = vmalloc(size);
993 if (p)
994 memset(p, 0, size);
995 return p;
996}
997
998/*
999 * Free memory allocated through t3_alloc_mem().
1000 */
1001void cxgb_free_mem(void *addr)
1002{
1003 unsigned long p = (unsigned long)addr;
1004
1005 if (p >= VMALLOC_START && p < VMALLOC_END)
1006 vfree(addr);
1007 else
1008 kfree(addr);
1009}
1010
1011/*
1012 * Allocate and initialize the TID tables. Returns 0 on success.
1013 */
1014static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1015 unsigned int natids, unsigned int nstids,
1016 unsigned int atid_base, unsigned int stid_base)
1017{
1018 unsigned long size = ntids * sizeof(*t->tid_tab) +
1019 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1020
1021 t->tid_tab = cxgb_alloc_mem(size);
1022 if (!t->tid_tab)
1023 return -ENOMEM;
1024
1025 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1026 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1027 t->ntids = ntids;
1028 t->nstids = nstids;
1029 t->stid_base = stid_base;
1030 t->sfree = NULL;
1031 t->natids = natids;
1032 t->atid_base = atid_base;
1033 t->afree = NULL;
1034 t->stids_in_use = t->atids_in_use = 0;
1035 atomic_set(&t->tids_in_use, 0);
1036 spin_lock_init(&t->stid_lock);
1037 spin_lock_init(&t->atid_lock);
1038
1039 /*
1040 * Setup the free lists for stid_tab and atid_tab.
1041 */
1042 if (nstids) {
1043 while (--nstids)
1044 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1045 t->sfree = t->stid_tab;
1046 }
1047 if (natids) {
1048 while (--natids)
1049 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1050 t->afree = t->atid_tab;
1051 }
1052 return 0;
1053}
1054
1055static void free_tid_maps(struct tid_info *t)
1056{
1057 cxgb_free_mem(t->tid_tab);
1058}
1059
1060static inline void add_adapter(struct adapter *adap)
1061{
1062 write_lock_bh(&adapter_list_lock);
1063 list_add_tail(&adap->adapter_list, &adapter_list);
1064 write_unlock_bh(&adapter_list_lock);
1065}
1066
1067static inline void remove_adapter(struct adapter *adap)
1068{
1069 write_lock_bh(&adapter_list_lock);
1070 list_del(&adap->adapter_list);
1071 write_unlock_bh(&adapter_list_lock);
1072}
1073
1074int cxgb3_offload_activate(struct adapter *adapter)
1075{
1076 struct t3cdev *dev = &adapter->tdev;
1077 int natids, err;
1078 struct t3c_data *t;
1079 struct tid_range stid_range, tid_range;
1080 struct mtutab mtutab;
1081 unsigned int l2t_capacity;
1082
1083 t = kcalloc(1, sizeof(*t), GFP_KERNEL);
1084 if (!t)
1085 return -ENOMEM;
1086
1087 err = -EOPNOTSUPP;
1088 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1089 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1090 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1091 dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1092 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1093 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1094 goto out_free;
1095
1096 err = -ENOMEM;
1097 L2DATA(dev) = t3_init_l2t(l2t_capacity);
1098 if (!L2DATA(dev))
1099 goto out_free;
1100
1101 natids = min(tid_range.num / 2, MAX_ATIDS);
1102 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1103 stid_range.num, ATID_BASE, stid_range.base);
1104 if (err)
1105 goto out_free_l2t;
1106
1107 t->mtus = mtutab.mtus;
1108 t->nmtus = mtutab.size;
1109
1110 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1111 spin_lock_init(&t->tid_release_lock);
1112 INIT_LIST_HEAD(&t->list_node);
1113 t->dev = dev;
1114
1115 T3C_DATA(dev) = t;
1116 dev->recv = process_rx;
1117 dev->neigh_update = t3_l2t_update;
1118
1119 /* Register netevent handler once */
1120 if (list_empty(&adapter_list))
1121 register_netevent_notifier(&nb);
1122
1123 add_adapter(adapter);
1124 return 0;
1125
1126out_free_l2t:
1127 t3_free_l2t(L2DATA(dev));
1128 L2DATA(dev) = NULL;
1129out_free:
1130 kfree(t);
1131 return err;
1132}
1133
1134void cxgb3_offload_deactivate(struct adapter *adapter)
1135{
1136 struct t3cdev *tdev = &adapter->tdev;
1137 struct t3c_data *t = T3C_DATA(tdev);
1138
1139 remove_adapter(adapter);
1140 if (list_empty(&adapter_list))
1141 unregister_netevent_notifier(&nb);
1142
1143 free_tid_maps(&t->tid_maps);
1144 T3C_DATA(tdev) = NULL;
1145 t3_free_l2t(L2DATA(tdev));
1146 L2DATA(tdev) = NULL;
1147 kfree(t);
1148}
1149
1150static inline void register_tdev(struct t3cdev *tdev)
1151{
1152 static int unit;
1153
1154 mutex_lock(&cxgb3_db_lock);
1155 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1156 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1157 mutex_unlock(&cxgb3_db_lock);
1158}
1159
1160static inline void unregister_tdev(struct t3cdev *tdev)
1161{
1162 mutex_lock(&cxgb3_db_lock);
1163 list_del(&tdev->ofld_dev_list);
1164 mutex_unlock(&cxgb3_db_lock);
1165}
1166
1167void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1168{
1169 struct t3cdev *tdev = &adapter->tdev;
1170
1171 INIT_LIST_HEAD(&tdev->ofld_dev_list);
1172
1173 cxgb3_set_dummy_ops(tdev);
1174 tdev->send = t3_offload_tx;
1175 tdev->ctl = cxgb_offload_ctl;
1176 tdev->type = adapter->params.rev == 0 ? T3A : T3B;
1177
1178 register_tdev(tdev);
1179}
1180
1181void __devexit cxgb3_adapter_unofld(struct adapter *adapter)
1182{
1183 struct t3cdev *tdev = &adapter->tdev;
1184
1185 tdev->recv = NULL;
1186 tdev->neigh_update = NULL;
1187
1188 unregister_tdev(tdev);
1189}
1190
1191void __init cxgb3_offload_init(void)
1192{
1193 int i;
1194
1195 for (i = 0; i < NUM_CPL_CMDS; ++i)
1196 cpl_handlers[i] = do_bad_cpl;
1197
1198 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1199 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1200 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1201 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1202 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1203 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1204 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1205 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1206 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1207 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1208 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1209 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1210 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1211 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1212 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1213 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1214 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1215 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
1216 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1217 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1218 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1219 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1220 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1221 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1222}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
new file mode 100644
index 000000000000..0e6beb69ba17
--- /dev/null
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CXGB3_OFFLOAD_H
34#define _CXGB3_OFFLOAD_H
35
36#include <linux/list.h>
37#include <linux/skbuff.h>
38
39#include "l2t.h"
40
41#include "t3cdev.h"
42#include "t3_cpl.h"
43
44struct adapter;
45
46void cxgb3_offload_init(void);
47
48void cxgb3_adapter_ofld(struct adapter *adapter);
49void cxgb3_adapter_unofld(struct adapter *adapter);
50int cxgb3_offload_activate(struct adapter *adapter);
51void cxgb3_offload_deactivate(struct adapter *adapter);
52
53void cxgb3_set_dummy_ops(struct t3cdev *dev);
54
55/*
56 * Client registration. Users of T3 driver must register themselves.
57 * The T3 driver will call the add function of every client for each T3
58 * adapter activated, passing up the t3cdev ptr. Each client fills out an
59 * array of callback functions to process CPL messages.
60 */
61
62void cxgb3_register_client(struct cxgb3_client *client);
63void cxgb3_unregister_client(struct cxgb3_client *client);
64void cxgb3_add_clients(struct t3cdev *tdev);
65void cxgb3_remove_clients(struct t3cdev *tdev);
66
67typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
68 struct sk_buff *skb, void *ctx);
69
70struct cxgb3_client {
71 char *name;
72 void (*add) (struct t3cdev *);
73 void (*remove) (struct t3cdev *);
74 cxgb3_cpl_handler_func *handlers;
75 int (*redirect)(void *ctx, struct dst_entry *old,
76 struct dst_entry *new, struct l2t_entry *l2t);
77 struct list_head client_list;
78};
79
80/*
81 * TID allocation services.
82 */
83int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
84 void *ctx);
85int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
86 void *ctx);
87void *cxgb3_free_atid(struct t3cdev *dev, int atid);
88void cxgb3_free_stid(struct t3cdev *dev, int stid);
89void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
90 void *ctx, unsigned int tid);
91void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
92void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
93
94struct t3c_tid_entry {
95 struct cxgb3_client *client;
96 void *ctx;
97};
98
99/* CPL message priority levels */
100enum {
101 CPL_PRIORITY_DATA = 0, /* data messages */
102 CPL_PRIORITY_SETUP = 1, /* connection setup messages */
103 CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
104 CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
105 CPL_PRIORITY_ACK = 1, /* RX ACK messages */
106 CPL_PRIORITY_CONTROL = 1 /* offload control messages */
107};
108
109/* Flags for return value of CPL message handlers */
110enum {
111 CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
112 CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
113 CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
114};
115
116typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
117
118/*
119 * Returns a pointer to the first byte of the CPL header in an sk_buff that
120 * contains a CPL message.
121 */
122static inline void *cplhdr(struct sk_buff *skb)
123{
124 return skb->data;
125}
126
127void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
128
129union listen_entry {
130 struct t3c_tid_entry t3c_tid;
131 union listen_entry *next;
132};
133
134union active_open_entry {
135 struct t3c_tid_entry t3c_tid;
136 union active_open_entry *next;
137};
138
139/*
140 * Holds the size, base address, free list start, etc of the TID, server TID,
141 * and active-open TID tables for a offload device.
142 * The tables themselves are allocated dynamically.
143 */
144struct tid_info {
145 struct t3c_tid_entry *tid_tab;
146 unsigned int ntids;
147 atomic_t tids_in_use;
148
149 union listen_entry *stid_tab;
150 unsigned int nstids;
151 unsigned int stid_base;
152
153 union active_open_entry *atid_tab;
154 unsigned int natids;
155 unsigned int atid_base;
156
157 /*
158 * The following members are accessed R/W so we put them in their own
159 * cache lines.
160 *
161 * XXX We could combine the atid fields above with the lock here since
162 * atids are use once (unlike other tids). OTOH the above fields are
163 * usually in cache due to tid_tab.
164 */
165 spinlock_t atid_lock ____cacheline_aligned_in_smp;
166 union active_open_entry *afree;
167 unsigned int atids_in_use;
168
169 spinlock_t stid_lock ____cacheline_aligned;
170 union listen_entry *sfree;
171 unsigned int stids_in_use;
172};
173
174struct t3c_data {
175 struct list_head list_node;
176 struct t3cdev *dev;
177 unsigned int tx_max_chunk; /* max payload for TX_DATA */
178 unsigned int max_wrs; /* max in-flight WRs per connection */
179 unsigned int nmtus;
180 const unsigned short *mtus;
181 struct tid_info tid_maps;
182
183 struct t3c_tid_entry *tid_release_list;
184 spinlock_t tid_release_lock;
185 struct work_struct tid_release_task;
186};
187
188/*
189 * t3cdev -> t3c_data accessor
190 */
191#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
192
193#endif
diff --git a/drivers/net/cxgb3/firmware_exports.h b/drivers/net/cxgb3/firmware_exports.h
new file mode 100644
index 000000000000..6a835f6a262a
--- /dev/null
+++ b/drivers/net/cxgb3/firmware_exports.h
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _FIRMWARE_EXPORTS_H_
33#define _FIRMWARE_EXPORTS_H_
34
35/* WR OPCODES supported by the firmware.
36 */
37#define FW_WROPCODE_FORWARD 0x01
38#define FW_WROPCODE_BYPASS 0x05
39
40#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
41
42#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
43#define FW_WROPCODE_ULPTX_MEM_READ 0x02
44#define FW_WROPCODE_ULPTX_PKT 0x04
45#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
46
47#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
48
49#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
50#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
51#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
52#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
53#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
54#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
55#define FW_WROPCODE_OFLD_TX_DATA 0x0D
56#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
57
58#define FW_WROPCODE_RI_RDMA_INIT 0x10
59#define FW_WROPCODE_RI_RDMA_WRITE 0x11
60#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
61#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
62#define FW_WROPCODE_RI_SEND 0x14
63#define FW_WROPCODE_RI_TERMINATE 0x15
64#define FW_WROPCODE_RI_RDMA_READ 0x16
65#define FW_WROPCODE_RI_RECEIVE 0x17
66#define FW_WROPCODE_RI_BIND_MW 0x18
67#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
68#define FW_WROPCODE_RI_LOCAL_INV 0x1A
69#define FW_WROPCODE_RI_MODIFY_QP 0x1B
70#define FW_WROPCODE_RI_BYPASS 0x1C
71
72#define FW_WROPOCDE_RSVD 0x1E
73
74#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
75
76#define FW_WROPCODE_MNGT 0x1D
77#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
78
79/* Maximum size of a WR sent from the host, limited by the SGE.
80 *
81 * Note: WR coming from ULP or TP are only limited by CIM.
82 */
83#define FW_WR_SIZE 128
84
85/* Maximum number of outstanding WRs sent from the host. Value must be
86 * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
87 * offload modules to limit the number of WRs per connection.
88 */
89#define FW_T3_WR_NUM 16
90#define FW_N3_WR_NUM 7
91
92#ifndef N3
93# define FW_WR_NUM FW_T3_WR_NUM
94#else
95# define FW_WR_NUM FW_N3_WR_NUM
96#endif
97
98/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
99 * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
100 * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
101 *
102 * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
103 * to RESP Queue[i].
104 */
105#define FW_TUNNEL_NUM 8
106#define FW_TUNNEL_SGEEC_START 8
107#define FW_TUNNEL_TID_START 65544
108
109/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
110 * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
111 * (or 'uP Token') FW_CTRL_TID_START.
112 *
113 * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
114 */
115#define FW_CTRL_NUM 8
116#define FW_CTRL_SGEEC_START 65528
117#define FW_CTRL_TID_START 65536
118
119/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
120 * queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
121 *
122 * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
123 * OFFLOAD Queues, as the host is responsible for providing the correct TID in
124 * every WR.
125 *
126 * Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
127 */
128#define FW_OFLD_NUM 8
129#define FW_OFLD_SGEEC_START 0
130
131/*
132 *
133 */
134#define FW_RI_NUM 1
135#define FW_RI_SGEEC_START 65527
136#define FW_RI_TID_START 65552
137
138/*
139 * The RX_PKT_TID
140 */
141#define FW_RX_PKT_NUM 1
142#define FW_RX_PKT_TID_START 65553
143
144/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
145 * by the firmware.
146 */
147#define FW_WRC_NUM \
148 (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
149
150/*
151 * FW type and version.
152 */
153#define S_FW_VERSION_TYPE 28
154#define M_FW_VERSION_TYPE 0xF
155#define V_FW_VERSION_TYPE(x) ((x) << S_FW_VERSION_TYPE)
156#define G_FW_VERSION_TYPE(x) \
157 (((x) >> S_FW_VERSION_TYPE) & M_FW_VERSION_TYPE)
158
159#define S_FW_VERSION_MAJOR 16
160#define M_FW_VERSION_MAJOR 0xFFF
161#define V_FW_VERSION_MAJOR(x) ((x) << S_FW_VERSION_MAJOR)
162#define G_FW_VERSION_MAJOR(x) \
163 (((x) >> S_FW_VERSION_MAJOR) & M_FW_VERSION_MAJOR)
164
165#define S_FW_VERSION_MINOR 8
166#define M_FW_VERSION_MINOR 0xFF
167#define V_FW_VERSION_MINOR(x) ((x) << S_FW_VERSION_MINOR)
168#define G_FW_VERSION_MINOR(x) \
169 (((x) >> S_FW_VERSION_MINOR) & M_FW_VERSION_MINOR)
170
171#define S_FW_VERSION_MICRO 0
172#define M_FW_VERSION_MICRO 0xFF
173#define V_FW_VERSION_MICRO(x) ((x) << S_FW_VERSION_MICRO)
174#define G_FW_VERSION_MICRO(x) \
175 (((x) >> S_FW_VERSION_MICRO) & M_FW_VERSION_MICRO)
176
177#endif /* _FIRMWARE_EXPORTS_H_ */
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
new file mode 100644
index 000000000000..3c0cb8557058
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.c
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/if.h>
36#include <linux/if_vlan.h>
37#include <linux/jhash.h>
38#include <net/neighbour.h>
39#include "common.h"
40#include "t3cdev.h"
41#include "cxgb3_defs.h"
42#include "l2t.h"
43#include "t3_cpl.h"
44#include "firmware_exports.h"
45
46#define VLAN_NONE 0xfff
47
48/*
49 * Module locking notes: There is a RW lock protecting the L2 table as a
50 * whole plus a spinlock per L2T entry. Entry lookups and allocations happen
51 * under the protection of the table lock, individual entry changes happen
52 * while holding that entry's spinlock. The table lock nests outside the
53 * entry locks. Allocations of new entries take the table lock as writers so
54 * no other lookups can happen while allocating new entries. Entry updates
55 * take the table lock as readers so multiple entries can be updated in
56 * parallel. An L2T entry can be dropped by decrementing its reference count
57 * and therefore can happen in parallel with entry allocation but no entry
58 * can change state or increment its ref count during allocation as both of
59 * these perform lookups.
60 */
61
62static inline unsigned int vlan_prio(const struct l2t_entry *e)
63{
64 return e->vlan >> 13;
65}
66
67static inline unsigned int arp_hash(u32 key, int ifindex,
68 const struct l2t_data *d)
69{
70 return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71}
72
73static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74{
75 neigh_hold(n);
76 if (e->neigh)
77 neigh_release(e->neigh);
78 e->neigh = n;
79}
80
81/*
82 * Set up an L2T entry and send any packets waiting in the arp queue. The
83 * supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
84 * entry locked.
85 */
86static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 struct l2t_entry *e)
88{
89 struct cpl_l2t_write_req *req;
90
91 if (!skb) {
92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
93 if (!skb)
94 return -ENOMEM;
95 }
96
97 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
98 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
99 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
100 req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
101 V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
102 V_L2T_W_PRIO(vlan_prio(e)));
103 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
105 skb->priority = CPL_PRIORITY_CONTROL;
106 cxgb3_ofld_send(dev, skb);
107 while (e->arpq_head) {
108 skb = e->arpq_head;
109 e->arpq_head = skb->next;
110 skb->next = NULL;
111 cxgb3_ofld_send(dev, skb);
112 }
113 e->arpq_tail = NULL;
114 e->state = L2T_STATE_VALID;
115
116 return 0;
117}
118
119/*
120 * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121 * Must be called with the entry's lock held.
122 */
123static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124{
125 skb->next = NULL;
126 if (e->arpq_head)
127 e->arpq_tail->next = skb;
128 else
129 e->arpq_head = skb;
130 e->arpq_tail = skb;
131}
132
133int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
134 struct l2t_entry *e)
135{
136again:
137 switch (e->state) {
138 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
139 neigh_event_send(e->neigh, NULL);
140 spin_lock_bh(&e->lock);
141 if (e->state == L2T_STATE_STALE)
142 e->state = L2T_STATE_VALID;
143 spin_unlock_bh(&e->lock);
144 case L2T_STATE_VALID: /* fast-path, send the packet on */
145 return cxgb3_ofld_send(dev, skb);
146 case L2T_STATE_RESOLVING:
147 spin_lock_bh(&e->lock);
148 if (e->state != L2T_STATE_RESOLVING) {
149 /* ARP already completed */
150 spin_unlock_bh(&e->lock);
151 goto again;
152 }
153 arpq_enqueue(e, skb);
154 spin_unlock_bh(&e->lock);
155
156 /*
157 * Only the first packet added to the arpq should kick off
158 * resolution. However, because the alloc_skb below can fail,
159 * we allow each packet added to the arpq to retry resolution
160 * as a way of recovering from transient memory exhaustion.
161 * A better way would be to use a work request to retry L2T
162 * entries when there's no memory.
163 */
164 if (!neigh_event_send(e->neigh, NULL)) {
165 skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
166 GFP_ATOMIC);
167 if (!skb)
168 break;
169
170 spin_lock_bh(&e->lock);
171 if (e->arpq_head)
172 setup_l2e_send_pending(dev, skb, e);
173 else /* we lost the race */
174 __kfree_skb(skb);
175 spin_unlock_bh(&e->lock);
176 }
177 }
178 return 0;
179}
180
181EXPORT_SYMBOL(t3_l2t_send_slow);
182
183void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
184{
185again:
186 switch (e->state) {
187 case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
188 neigh_event_send(e->neigh, NULL);
189 spin_lock_bh(&e->lock);
190 if (e->state == L2T_STATE_STALE) {
191 e->state = L2T_STATE_VALID;
192 }
193 spin_unlock_bh(&e->lock);
194 return;
195 case L2T_STATE_VALID: /* fast-path, send the packet on */
196 return;
197 case L2T_STATE_RESOLVING:
198 spin_lock_bh(&e->lock);
199 if (e->state != L2T_STATE_RESOLVING) {
200 /* ARP already completed */
201 spin_unlock_bh(&e->lock);
202 goto again;
203 }
204 spin_unlock_bh(&e->lock);
205
206 /*
207 * Only the first packet added to the arpq should kick off
208 * resolution. However, because the alloc_skb below can fail,
209 * we allow each packet added to the arpq to retry resolution
210 * as a way of recovering from transient memory exhaustion.
211 * A better way would be to use a work request to retry L2T
212 * entries when there's no memory.
213 */
214 neigh_event_send(e->neigh, NULL);
215 }
216 return;
217}
218
219EXPORT_SYMBOL(t3_l2t_send_event);
220
221/*
222 * Allocate a free L2T entry. Must be called with l2t_data.lock held.
223 */
224static struct l2t_entry *alloc_l2e(struct l2t_data *d)
225{
226 struct l2t_entry *end, *e, **p;
227
228 if (!atomic_read(&d->nfree))
229 return NULL;
230
231 /* there's definitely a free entry */
232 for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
233 if (atomic_read(&e->refcnt) == 0)
234 goto found;
235
236 for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
237found:
238 d->rover = e + 1;
239 atomic_dec(&d->nfree);
240
241 /*
242 * The entry we found may be an inactive entry that is
243 * presently in the hash table. We need to remove it.
244 */
245 if (e->state != L2T_STATE_UNUSED) {
246 int hash = arp_hash(e->addr, e->ifindex, d);
247
248 for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
249 if (*p == e) {
250 *p = e->next;
251 break;
252 }
253 e->state = L2T_STATE_UNUSED;
254 }
255 return e;
256}
257
258/*
259 * Called when an L2T entry has no more users. The entry is left in the hash
260 * table since it is likely to be reused but we also bump nfree to indicate
261 * that the entry can be reallocated for a different neighbor. We also drop
262 * the existing neighbor reference in case the neighbor is going away and is
263 * waiting on our reference.
264 *
265 * Because entries can be reallocated to other neighbors once their ref count
266 * drops to 0 we need to take the entry's lock to avoid races with a new
267 * incarnation.
268 */
269void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
270{
271 spin_lock_bh(&e->lock);
272 if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
273 if (e->neigh) {
274 neigh_release(e->neigh);
275 e->neigh = NULL;
276 }
277 }
278 spin_unlock_bh(&e->lock);
279 atomic_inc(&d->nfree);
280}
281
282EXPORT_SYMBOL(t3_l2e_free);
283
284/*
285 * Update an L2T entry that was previously used for the same next hop as neigh.
286 * Must be called with softirqs disabled.
287 */
288static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
289{
290 unsigned int nud_state;
291
292 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
293
294 if (neigh != e->neigh)
295 neigh_replace(e, neigh);
296 nud_state = neigh->nud_state;
297 if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
298 !(nud_state & NUD_VALID))
299 e->state = L2T_STATE_RESOLVING;
300 else if (nud_state & NUD_CONNECTED)
301 e->state = L2T_STATE_VALID;
302 else
303 e->state = L2T_STATE_STALE;
304 spin_unlock(&e->lock);
305}
306
307struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
308 struct net_device *dev)
309{
310 struct l2t_entry *e;
311 struct l2t_data *d = L2DATA(cdev);
312 u32 addr = *(u32 *) neigh->primary_key;
313 int ifidx = neigh->dev->ifindex;
314 int hash = arp_hash(addr, ifidx, d);
315 struct port_info *p = netdev_priv(dev);
316 int smt_idx = p->port_id;
317
318 write_lock_bh(&d->lock);
319 for (e = d->l2tab[hash].first; e; e = e->next)
320 if (e->addr == addr && e->ifindex == ifidx &&
321 e->smt_idx == smt_idx) {
322 l2t_hold(d, e);
323 if (atomic_read(&e->refcnt) == 1)
324 reuse_entry(e, neigh);
325 goto done;
326 }
327
328 /* Need to allocate a new entry */
329 e = alloc_l2e(d);
330 if (e) {
331 spin_lock(&e->lock); /* avoid race with t3_l2t_free */
332 e->next = d->l2tab[hash].first;
333 d->l2tab[hash].first = e;
334 e->state = L2T_STATE_RESOLVING;
335 e->addr = addr;
336 e->ifindex = ifidx;
337 e->smt_idx = smt_idx;
338 atomic_set(&e->refcnt, 1);
339 neigh_replace(e, neigh);
340 if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
341 e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
342 else
343 e->vlan = VLAN_NONE;
344 spin_unlock(&e->lock);
345 }
346done:
347 write_unlock_bh(&d->lock);
348 return e;
349}
350
351EXPORT_SYMBOL(t3_l2t_get);
352
353/*
354 * Called when address resolution fails for an L2T entry to handle packets
355 * on the arpq head. If a packet specifies a failure handler it is invoked,
356 * otherwise the packets is sent to the offload device.
357 *
358 * XXX: maybe we should abandon the latter behavior and just require a failure
359 * handler.
360 */
361static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
362{
363 while (arpq) {
364 struct sk_buff *skb = arpq;
365 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
366
367 arpq = skb->next;
368 skb->next = NULL;
369 if (cb->arp_failure_handler)
370 cb->arp_failure_handler(dev, skb);
371 else
372 cxgb3_ofld_send(dev, skb);
373 }
374}
375
376/*
377 * Called when the host's ARP layer makes a change to some entry that is
378 * loaded into the HW L2 table.
379 */
380void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
381{
382 struct l2t_entry *e;
383 struct sk_buff *arpq = NULL;
384 struct l2t_data *d = L2DATA(dev);
385 u32 addr = *(u32 *) neigh->primary_key;
386 int ifidx = neigh->dev->ifindex;
387 int hash = arp_hash(addr, ifidx, d);
388
389 read_lock_bh(&d->lock);
390 for (e = d->l2tab[hash].first; e; e = e->next)
391 if (e->addr == addr && e->ifindex == ifidx) {
392 spin_lock(&e->lock);
393 goto found;
394 }
395 read_unlock_bh(&d->lock);
396 return;
397
398found:
399 read_unlock(&d->lock);
400 if (atomic_read(&e->refcnt)) {
401 if (neigh != e->neigh)
402 neigh_replace(e, neigh);
403
404 if (e->state == L2T_STATE_RESOLVING) {
405 if (neigh->nud_state & NUD_FAILED) {
406 arpq = e->arpq_head;
407 e->arpq_head = e->arpq_tail = NULL;
408 } else if (neigh_is_connected(neigh))
409 setup_l2e_send_pending(dev, NULL, e);
410 } else {
411 e->state = neigh_is_connected(neigh) ?
412 L2T_STATE_VALID : L2T_STATE_STALE;
413 if (memcmp(e->dmac, neigh->ha, 6))
414 setup_l2e_send_pending(dev, NULL, e);
415 }
416 }
417 spin_unlock_bh(&e->lock);
418
419 if (arpq)
420 handle_failed_resolution(dev, arpq);
421}
422
423struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
424{
425 struct l2t_data *d;
426 int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
427
428 d = cxgb_alloc_mem(size);
429 if (!d)
430 return NULL;
431
432 d->nentries = l2t_capacity;
433 d->rover = &d->l2tab[1]; /* entry 0 is not used */
434 atomic_set(&d->nfree, l2t_capacity - 1);
435 rwlock_init(&d->lock);
436
437 for (i = 0; i < l2t_capacity; ++i) {
438 d->l2tab[i].idx = i;
439 d->l2tab[i].state = L2T_STATE_UNUSED;
440 spin_lock_init(&d->l2tab[i].lock);
441 atomic_set(&d->l2tab[i].refcnt, 0);
442 }
443 return d;
444}
445
446void t3_free_l2t(struct l2t_data *d)
447{
448 cxgb_free_mem(d);
449}
450
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
new file mode 100644
index 000000000000..ba5d2cbd7241
--- /dev/null
+++ b/drivers/net/cxgb3/l2t.h
@@ -0,0 +1,143 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _CHELSIO_L2T_H
34#define _CHELSIO_L2T_H
35
36#include <linux/spinlock.h>
37#include "t3cdev.h"
38#include <asm/atomic.h>
39
40enum {
41 L2T_STATE_VALID, /* entry is up to date */
42 L2T_STATE_STALE, /* entry may be used but needs revalidation */
43 L2T_STATE_RESOLVING, /* entry needs address resolution */
44 L2T_STATE_UNUSED /* entry not in use */
45};
46
47struct neighbour;
48struct sk_buff;
49
50/*
51 * Each L2T entry plays multiple roles. First of all, it keeps state for the
52 * corresponding entry of the HW L2 table and maintains a queue of offload
53 * packets awaiting address resolution. Second, it is a node of a hash table
54 * chain, where the nodes of the chain are linked together through their next
55 * pointer. Finally, each node is a bucket of a hash table, pointing to the
56 * first element in its chain through its first pointer.
57 */
58struct l2t_entry {
59 u16 state; /* entry state */
60 u16 idx; /* entry index */
61 u32 addr; /* dest IP address */
62 int ifindex; /* neighbor's net_device's ifindex */
63 u16 smt_idx; /* SMT index */
64 u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
65 struct neighbour *neigh; /* associated neighbour */
66 struct l2t_entry *first; /* start of hash chain */
67 struct l2t_entry *next; /* next l2t_entry on chain */
68 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
69 struct sk_buff *arpq_tail;
70 spinlock_t lock;
71 atomic_t refcnt; /* entry reference count */
72 u8 dmac[6]; /* neighbour's MAC address */
73};
74
75struct l2t_data {
76 unsigned int nentries; /* number of entries */
77 struct l2t_entry *rover; /* starting point for next allocation */
78 atomic_t nfree; /* number of free entries */
79 rwlock_t lock;
80 struct l2t_entry l2tab[0];
81};
82
83typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
84 struct sk_buff * skb);
85
86/*
87 * Callback stored in an skb to handle address resolution failure.
88 */
89struct l2t_skb_cb {
90 arp_failure_handler_func arp_failure_handler;
91};
92
93#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
94
95static inline void set_arp_failure_handler(struct sk_buff *skb,
96 arp_failure_handler_func hnd)
97{
98 L2T_SKB_CB(skb)->arp_failure_handler = hnd;
99}
100
101/*
102 * Getting to the L2 data from an offload device.
103 */
104#define L2DATA(dev) ((dev)->l2opt)
105
106#define W_TCB_L2T_IX 0
107#define S_TCB_L2T_IX 7
108#define M_TCB_L2T_IX 0x7ffULL
109#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
110
111void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
112void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
113struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
114 struct net_device *dev);
115int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
116 struct l2t_entry *e);
117void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
118struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
119void t3_free_l2t(struct l2t_data *d);
120
121int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
122
123static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
124 struct l2t_entry *e)
125{
126 if (likely(e->state == L2T_STATE_VALID))
127 return cxgb3_ofld_send(dev, skb);
128 return t3_l2t_send_slow(dev, skb, e);
129}
130
131static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
132{
133 if (atomic_dec_and_test(&e->refcnt))
134 t3_l2e_free(d, e);
135}
136
137static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
138{
139 if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
140 atomic_dec(&d->nfree);
141}
142
143#endif
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
new file mode 100644
index 000000000000..644d62ea86a6
--- /dev/null
+++ b/drivers/net/cxgb3/mc5.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35enum {
36 IDT75P52100 = 4,
37 IDT75N43102 = 5
38};
39
40/* DBGI command mode */
41enum {
42 DBGI_MODE_MBUS = 0,
43 DBGI_MODE_IDT52100 = 5
44};
45
46/* IDT 75P52100 commands */
47#define IDT_CMD_READ 0
48#define IDT_CMD_WRITE 1
49#define IDT_CMD_SEARCH 2
50#define IDT_CMD_LEARN 3
51
52/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
53#define IDT_LAR_ADR0 0x180006
54#define IDT_LAR_MODE144 0xffff0000
55
56/* IDT SCR and SSR addresses (low 32 bits) */
57#define IDT_SCR_ADR0 0x180000
58#define IDT_SSR0_ADR0 0x180002
59#define IDT_SSR1_ADR0 0x180004
60
61/* IDT GMR base address (low 32 bits) */
62#define IDT_GMR_BASE_ADR0 0x180020
63
64/* IDT data and mask array base addresses (low 32 bits) */
65#define IDT_DATARY_BASE_ADR0 0
66#define IDT_MSKARY_BASE_ADR0 0x80000
67
68/* IDT 75N43102 commands */
69#define IDT4_CMD_SEARCH144 3
70#define IDT4_CMD_WRITE 4
71#define IDT4_CMD_READ 5
72
73/* IDT 75N43102 SCR address (low 32 bits) */
74#define IDT4_SCR_ADR0 0x3
75
76/* IDT 75N43102 GMR base addresses (low 32 bits) */
77#define IDT4_GMR_BASE0 0x10
78#define IDT4_GMR_BASE1 0x20
79#define IDT4_GMR_BASE2 0x30
80
81/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
82#define IDT4_DATARY_BASE_ADR0 0x1000000
83#define IDT4_MSKARY_BASE_ADR0 0x2000000
84
85#define MAX_WRITE_ATTEMPTS 5
86
87#define MAX_ROUTES 2048
88
89/*
90 * Issue a command to the TCAM and wait for its completion. The address and
91 * any data required by the command must have been setup by the caller.
92 */
93static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
94{
95 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
96 return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
97 F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
98}
99
100static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
101 u32 v3)
102{
103 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
104 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
105 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
106}
107
108static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
109 u32 v3)
110{
111 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
112 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
113 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
114}
115
116static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
117 u32 *v3)
118{
119 *v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
120 *v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
121 *v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
122}
123
124/*
125 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
126 * command cmd. The data to be written must have been set up by the caller.
127 * Returns -1 on failure, 0 on success.
128 */
129static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
130{
131 t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
132 if (mc5_cmd_write(adapter, cmd) == 0)
133 return 0;
134 CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
135 addr_lo);
136 return -1;
137}
138
139static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
140 u32 data_array_base, u32 write_cmd,
141 int addr_shift)
142{
143 unsigned int i;
144 struct adapter *adap = mc5->adapter;
145
146 /*
147 * We need the size of the TCAM data and mask arrays in terms of
148 * 72-bit entries.
149 */
150 unsigned int size72 = mc5->tcam_size;
151 unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
152
153 if (mc5->mode == MC5_MODE_144_BIT) {
154 size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
155 server_base *= 2;
156 }
157
158 /* Clear the data array */
159 dbgi_wr_data3(adap, 0, 0, 0);
160 for (i = 0; i < size72; i++)
161 if (mc5_write(adap, data_array_base + (i << addr_shift),
162 write_cmd))
163 return -1;
164
165 /* Initialize the mask array. */
166 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
167 for (i = 0; i < size72; i++) {
168 if (i == server_base) /* entering server or routing region */
169 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
170 mc5->mode == MC5_MODE_144_BIT ?
171 0xfffffff9 : 0xfffffffd);
172 if (mc5_write(adap, mask_array_base + (i << addr_shift),
173 write_cmd))
174 return -1;
175 }
176 return 0;
177}
178
179static int init_idt52100(struct mc5 *mc5)
180{
181 int i;
182 struct adapter *adap = mc5->adapter;
183
184 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
185 V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
186 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
187
188 /*
189 * Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
190 * GMRs 8-9 for ACK- and AOPEN searches.
191 */
192 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
193 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
194 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
195 t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
196 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
197 t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
198 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
199 t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
200 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
201 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
202 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
203 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
204
205 /* Set DBGI command mode for IDT TCAM. */
206 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
207
208 /* Set up LAR */
209 dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
210 if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
211 goto err;
212
213 /* Set up SSRs */
214 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
215 if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
216 mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
217 goto err;
218
219 /* Set up GMRs */
220 for (i = 0; i < 32; ++i) {
221 if (i >= 12 && i < 15)
222 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
223 else if (i == 15)
224 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
225 else
226 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
227
228 if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
229 goto err;
230 }
231
232 /* Set up SCR */
233 dbgi_wr_data3(adap, 1, 0, 0);
234 if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
235 goto err;
236
237 return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
238 IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
239err:
240 return -EIO;
241}
242
243static int init_idt43102(struct mc5 *mc5)
244{
245 int i;
246 struct adapter *adap = mc5->adapter;
247
248 t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
249 adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
250 V_RDLAT(0xd) | V_SRCHLAT(0x12));
251
252 /*
253 * Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
254 * for ACK- and AOPEN searches.
255 */
256 t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
257 t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
258 t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
259 IDT4_CMD_SEARCH144 | 0x3800);
260 t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
261 t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
262 t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
263 t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
264 t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
265 t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
266
267 t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
268
269 /* Set DBGI command mode for IDT TCAM. */
270 t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
271
272 /* Set up GMRs */
273 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
274 for (i = 0; i < 7; ++i)
275 if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
276 goto err;
277
278 for (i = 0; i < 4; ++i)
279 if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
280 goto err;
281
282 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
283 if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
284 mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
285 mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
286 goto err;
287
288 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
289 if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
290 goto err;
291
292 /* Set up SCR */
293 dbgi_wr_data3(adap, 0xf0000000, 0, 0);
294 if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
295 goto err;
296
297 return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
298 IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
299err:
300 return -EIO;
301}
302
303/* Put MC5 in DBGI mode. */
304static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
305{
306 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
307 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
308}
309
310/* Put MC5 in M-Bus mode. */
311static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
312{
313 t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
314 V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
315 V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
316 V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
317}
318
319/*
320 * Initialization that requires the OS and protocol layers to already
321 * be intialized goes here.
322 */
323int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
324 unsigned int nroutes)
325{
326 u32 cfg;
327 int err;
328 unsigned int tcam_size = mc5->tcam_size;
329 struct adapter *adap = mc5->adapter;
330
331 if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
332 return -EINVAL;
333
334 /* Reset the TCAM */
335 cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
336 cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
337 t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
338 if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
339 CH_ERR(adap, "TCAM reset timed out\n");
340 return -1;
341 }
342
343 t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
344 t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
345 tcam_size - nroutes - nfilters);
346 t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
347 tcam_size - nroutes - nfilters - nservers);
348
349 mc5->parity_enabled = 1;
350
351 /* All the TCAM addresses we access have only the low 32 bits non 0 */
352 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
353 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
354
355 mc5_dbgi_mode_enable(mc5);
356
357 switch (mc5->part_type) {
358 case IDT75P52100:
359 err = init_idt52100(mc5);
360 break;
361 case IDT75N43102:
362 err = init_idt43102(mc5);
363 break;
364 default:
365 CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
366 err = -EINVAL;
367 break;
368 }
369
370 mc5_dbgi_mode_disable(mc5);
371 return err;
372}
373
374/*
375 * read_mc5_range - dump a part of the memory managed by MC5
376 * @mc5: the MC5 handle
377 * @start: the start address for the dump
378 * @n: number of 72-bit words to read
379 * @buf: result buffer
380 *
381 * Read n 72-bit words from MC5 memory from the given start location.
382 */
383int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
384 unsigned int n, u32 *buf)
385{
386 u32 read_cmd;
387 int err = 0;
388 struct adapter *adap = mc5->adapter;
389
390 if (mc5->part_type == IDT75P52100)
391 read_cmd = IDT_CMD_READ;
392 else if (mc5->part_type == IDT75N43102)
393 read_cmd = IDT4_CMD_READ;
394 else
395 return -EINVAL;
396
397 mc5_dbgi_mode_enable(mc5);
398
399 while (n--) {
400 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
401 if (mc5_cmd_write(adap, read_cmd)) {
402 err = -EIO;
403 break;
404 }
405 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
406 buf += 3;
407 }
408
409 mc5_dbgi_mode_disable(mc5);
410 return 0;
411}
412
413#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
414
415/*
416 * MC5 interrupt handler
417 */
418void t3_mc5_intr_handler(struct mc5 *mc5)
419{
420 struct adapter *adap = mc5->adapter;
421 u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
422
423 if ((cause & F_PARITYERR) && mc5->parity_enabled) {
424 CH_ALERT(adap, "MC5 parity error\n");
425 mc5->stats.parity_err++;
426 }
427
428 if (cause & F_REQQPARERR) {
429 CH_ALERT(adap, "MC5 request queue parity error\n");
430 mc5->stats.reqq_parity_err++;
431 }
432
433 if (cause & F_DISPQPARERR) {
434 CH_ALERT(adap, "MC5 dispatch queue parity error\n");
435 mc5->stats.dispq_parity_err++;
436 }
437
438 if (cause & F_ACTRGNFULL)
439 mc5->stats.active_rgn_full++;
440 if (cause & F_NFASRCHFAIL)
441 mc5->stats.nfa_srch_err++;
442 if (cause & F_UNKNOWNCMD)
443 mc5->stats.unknown_cmd++;
444 if (cause & F_DELACTEMPTY)
445 mc5->stats.del_act_empty++;
446 if (cause & MC5_INT_FATAL)
447 t3_fatal_err(adap);
448
449 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
450}
451
452void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
453{
454#define K * 1024
455
456 static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
457 64 K, 128 K, 256 K, 32 K
458 };
459
460#undef K
461
462 u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
463
464 mc5->adapter = adapter;
465 mc5->mode = (unsigned char)mode;
466 mc5->part_type = (unsigned char)G_TMTYPE(cfg);
467 if (cfg & F_TMTYPEHI)
468 mc5->part_type |= 4;
469
470 mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
471 if (mode == MC5_MODE_144_BIT)
472 mc5->tcam_size /= 2;
473}
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
new file mode 100644
index 000000000000..b56c5f52bcdc
--- /dev/null
+++ b/drivers/net/cxgb3/regs.h
@@ -0,0 +1,2195 @@
1#define A_SG_CONTROL 0x0
2
3#define S_DROPPKT 20
4#define V_DROPPKT(x) ((x) << S_DROPPKT)
5#define F_DROPPKT V_DROPPKT(1U)
6
7#define S_EGRGENCTRL 19
8#define V_EGRGENCTRL(x) ((x) << S_EGRGENCTRL)
9#define F_EGRGENCTRL V_EGRGENCTRL(1U)
10
11#define S_USERSPACESIZE 14
12#define M_USERSPACESIZE 0x1f
13#define V_USERSPACESIZE(x) ((x) << S_USERSPACESIZE)
14
15#define S_HOSTPAGESIZE 11
16#define M_HOSTPAGESIZE 0x7
17#define V_HOSTPAGESIZE(x) ((x) << S_HOSTPAGESIZE)
18
19#define S_FLMODE 9
20#define V_FLMODE(x) ((x) << S_FLMODE)
21#define F_FLMODE V_FLMODE(1U)
22
23#define S_PKTSHIFT 6
24#define M_PKTSHIFT 0x7
25#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
26
27#define S_ONEINTMULTQ 5
28#define V_ONEINTMULTQ(x) ((x) << S_ONEINTMULTQ)
29#define F_ONEINTMULTQ V_ONEINTMULTQ(1U)
30
31#define S_BIGENDIANINGRESS 2
32#define V_BIGENDIANINGRESS(x) ((x) << S_BIGENDIANINGRESS)
33#define F_BIGENDIANINGRESS V_BIGENDIANINGRESS(1U)
34
35#define S_ISCSICOALESCING 1
36#define V_ISCSICOALESCING(x) ((x) << S_ISCSICOALESCING)
37#define F_ISCSICOALESCING V_ISCSICOALESCING(1U)
38
39#define S_GLOBALENABLE 0
40#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
41#define F_GLOBALENABLE V_GLOBALENABLE(1U)
42
43#define S_AVOIDCQOVFL 24
44#define V_AVOIDCQOVFL(x) ((x) << S_AVOIDCQOVFL)
45#define F_AVOIDCQOVFL V_AVOIDCQOVFL(1U)
46
47#define S_OPTONEINTMULTQ 23
48#define V_OPTONEINTMULTQ(x) ((x) << S_OPTONEINTMULTQ)
49#define F_OPTONEINTMULTQ V_OPTONEINTMULTQ(1U)
50
51#define S_CQCRDTCTRL 22
52#define V_CQCRDTCTRL(x) ((x) << S_CQCRDTCTRL)
53#define F_CQCRDTCTRL V_CQCRDTCTRL(1U)
54
55#define A_SG_KDOORBELL 0x4
56
57#define S_SELEGRCNTX 31
58#define V_SELEGRCNTX(x) ((x) << S_SELEGRCNTX)
59#define F_SELEGRCNTX V_SELEGRCNTX(1U)
60
61#define S_EGRCNTX 0
62#define M_EGRCNTX 0xffff
63#define V_EGRCNTX(x) ((x) << S_EGRCNTX)
64
65#define A_SG_GTS 0x8
66
67#define S_RSPQ 29
68#define M_RSPQ 0x7
69#define V_RSPQ(x) ((x) << S_RSPQ)
70#define G_RSPQ(x) (((x) >> S_RSPQ) & M_RSPQ)
71
72#define S_NEWTIMER 16
73#define M_NEWTIMER 0x1fff
74#define V_NEWTIMER(x) ((x) << S_NEWTIMER)
75
76#define S_NEWINDEX 0
77#define M_NEWINDEX 0xffff
78#define V_NEWINDEX(x) ((x) << S_NEWINDEX)
79
80#define A_SG_CONTEXT_CMD 0xc
81
82#define S_CONTEXT_CMD_OPCODE 28
83#define M_CONTEXT_CMD_OPCODE 0xf
84#define V_CONTEXT_CMD_OPCODE(x) ((x) << S_CONTEXT_CMD_OPCODE)
85
86#define S_CONTEXT_CMD_BUSY 27
87#define V_CONTEXT_CMD_BUSY(x) ((x) << S_CONTEXT_CMD_BUSY)
88#define F_CONTEXT_CMD_BUSY V_CONTEXT_CMD_BUSY(1U)
89
90#define S_CQ_CREDIT 20
91
92#define M_CQ_CREDIT 0x7f
93
94#define V_CQ_CREDIT(x) ((x) << S_CQ_CREDIT)
95
96#define G_CQ_CREDIT(x) (((x) >> S_CQ_CREDIT) & M_CQ_CREDIT)
97
98#define S_CQ 19
99
100#define V_CQ(x) ((x) << S_CQ)
101#define F_CQ V_CQ(1U)
102
103#define S_RESPONSEQ 18
104#define V_RESPONSEQ(x) ((x) << S_RESPONSEQ)
105#define F_RESPONSEQ V_RESPONSEQ(1U)
106
107#define S_EGRESS 17
108#define V_EGRESS(x) ((x) << S_EGRESS)
109#define F_EGRESS V_EGRESS(1U)
110
111#define S_FREELIST 16
112#define V_FREELIST(x) ((x) << S_FREELIST)
113#define F_FREELIST V_FREELIST(1U)
114
115#define S_CONTEXT 0
116#define M_CONTEXT 0xffff
117#define V_CONTEXT(x) ((x) << S_CONTEXT)
118
119#define G_CONTEXT(x) (((x) >> S_CONTEXT) & M_CONTEXT)
120
121#define A_SG_CONTEXT_DATA0 0x10
122
123#define A_SG_CONTEXT_DATA1 0x14
124
125#define A_SG_CONTEXT_DATA2 0x18
126
127#define A_SG_CONTEXT_DATA3 0x1c
128
129#define A_SG_CONTEXT_MASK0 0x20
130
131#define A_SG_CONTEXT_MASK1 0x24
132
133#define A_SG_CONTEXT_MASK2 0x28
134
135#define A_SG_CONTEXT_MASK3 0x2c
136
137#define A_SG_RSPQ_CREDIT_RETURN 0x30
138
139#define S_CREDITS 0
140#define M_CREDITS 0xffff
141#define V_CREDITS(x) ((x) << S_CREDITS)
142
143#define A_SG_DATA_INTR 0x34
144
145#define S_ERRINTR 31
146#define V_ERRINTR(x) ((x) << S_ERRINTR)
147#define F_ERRINTR V_ERRINTR(1U)
148
149#define A_SG_HI_DRB_HI_THRSH 0x38
150
151#define A_SG_HI_DRB_LO_THRSH 0x3c
152
153#define A_SG_LO_DRB_HI_THRSH 0x40
154
155#define A_SG_LO_DRB_LO_THRSH 0x44
156
157#define A_SG_RSPQ_FL_STATUS 0x4c
158
159#define S_RSPQ0DISABLED 8
160
161#define A_SG_EGR_RCQ_DRB_THRSH 0x54
162
163#define S_HIRCQDRBTHRSH 16
164#define M_HIRCQDRBTHRSH 0x7ff
165#define V_HIRCQDRBTHRSH(x) ((x) << S_HIRCQDRBTHRSH)
166
167#define S_LORCQDRBTHRSH 0
168#define M_LORCQDRBTHRSH 0x7ff
169#define V_LORCQDRBTHRSH(x) ((x) << S_LORCQDRBTHRSH)
170
171#define A_SG_EGR_CNTX_BADDR 0x58
172
173#define A_SG_INT_CAUSE 0x5c
174
175#define S_RSPQDISABLED 3
176#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
177#define F_RSPQDISABLED V_RSPQDISABLED(1U)
178
179#define S_RSPQCREDITOVERFOW 2
180#define V_RSPQCREDITOVERFOW(x) ((x) << S_RSPQCREDITOVERFOW)
181#define F_RSPQCREDITOVERFOW V_RSPQCREDITOVERFOW(1U)
182
183#define A_SG_INT_ENABLE 0x60
184
185#define A_SG_CMDQ_CREDIT_TH 0x64
186
187#define S_TIMEOUT 8
188#define M_TIMEOUT 0xffffff
189#define V_TIMEOUT(x) ((x) << S_TIMEOUT)
190
191#define S_THRESHOLD 0
192#define M_THRESHOLD 0xff
193#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
194
195#define A_SG_TIMER_TICK 0x68
196
197#define A_SG_CQ_CONTEXT_BADDR 0x6c
198
199#define A_SG_OCO_BASE 0x70
200
201#define S_BASE1 16
202#define M_BASE1 0xffff
203#define V_BASE1(x) ((x) << S_BASE1)
204
205#define A_SG_DRB_PRI_THRESH 0x74
206
207#define A_PCIX_INT_ENABLE 0x80
208
209#define S_MSIXPARERR 22
210#define M_MSIXPARERR 0x7
211
212#define V_MSIXPARERR(x) ((x) << S_MSIXPARERR)
213
214#define S_CFPARERR 18
215#define M_CFPARERR 0xf
216
217#define V_CFPARERR(x) ((x) << S_CFPARERR)
218
219#define S_RFPARERR 14
220#define M_RFPARERR 0xf
221
222#define V_RFPARERR(x) ((x) << S_RFPARERR)
223
224#define S_WFPARERR 12
225#define M_WFPARERR 0x3
226
227#define V_WFPARERR(x) ((x) << S_WFPARERR)
228
229#define S_PIOPARERR 11
230#define V_PIOPARERR(x) ((x) << S_PIOPARERR)
231#define F_PIOPARERR V_PIOPARERR(1U)
232
233#define S_DETUNCECCERR 10
234#define V_DETUNCECCERR(x) ((x) << S_DETUNCECCERR)
235#define F_DETUNCECCERR V_DETUNCECCERR(1U)
236
237#define S_DETCORECCERR 9
238#define V_DETCORECCERR(x) ((x) << S_DETCORECCERR)
239#define F_DETCORECCERR V_DETCORECCERR(1U)
240
241#define S_RCVSPLCMPERR 8
242#define V_RCVSPLCMPERR(x) ((x) << S_RCVSPLCMPERR)
243#define F_RCVSPLCMPERR V_RCVSPLCMPERR(1U)
244
245#define S_UNXSPLCMP 7
246#define V_UNXSPLCMP(x) ((x) << S_UNXSPLCMP)
247#define F_UNXSPLCMP V_UNXSPLCMP(1U)
248
249#define S_SPLCMPDIS 6
250#define V_SPLCMPDIS(x) ((x) << S_SPLCMPDIS)
251#define F_SPLCMPDIS V_SPLCMPDIS(1U)
252
253#define S_DETPARERR 5
254#define V_DETPARERR(x) ((x) << S_DETPARERR)
255#define F_DETPARERR V_DETPARERR(1U)
256
257#define S_SIGSYSERR 4
258#define V_SIGSYSERR(x) ((x) << S_SIGSYSERR)
259#define F_SIGSYSERR V_SIGSYSERR(1U)
260
261#define S_RCVMSTABT 3
262#define V_RCVMSTABT(x) ((x) << S_RCVMSTABT)
263#define F_RCVMSTABT V_RCVMSTABT(1U)
264
265#define S_RCVTARABT 2
266#define V_RCVTARABT(x) ((x) << S_RCVTARABT)
267#define F_RCVTARABT V_RCVTARABT(1U)
268
269#define S_SIGTARABT 1
270#define V_SIGTARABT(x) ((x) << S_SIGTARABT)
271#define F_SIGTARABT V_SIGTARABT(1U)
272
273#define S_MSTDETPARERR 0
274#define V_MSTDETPARERR(x) ((x) << S_MSTDETPARERR)
275#define F_MSTDETPARERR V_MSTDETPARERR(1U)
276
277#define A_PCIX_INT_CAUSE 0x84
278
279#define A_PCIX_CFG 0x88
280
281#define S_CLIDECEN 18
282#define V_CLIDECEN(x) ((x) << S_CLIDECEN)
283#define F_CLIDECEN V_CLIDECEN(1U)
284
285#define A_PCIX_MODE 0x8c
286
287#define S_PCLKRANGE 6
288#define M_PCLKRANGE 0x3
289#define V_PCLKRANGE(x) ((x) << S_PCLKRANGE)
290#define G_PCLKRANGE(x) (((x) >> S_PCLKRANGE) & M_PCLKRANGE)
291
292#define S_PCIXINITPAT 2
293#define M_PCIXINITPAT 0xf
294#define V_PCIXINITPAT(x) ((x) << S_PCIXINITPAT)
295#define G_PCIXINITPAT(x) (((x) >> S_PCIXINITPAT) & M_PCIXINITPAT)
296
297#define S_64BIT 0
298#define V_64BIT(x) ((x) << S_64BIT)
299#define F_64BIT V_64BIT(1U)
300
301#define A_PCIE_INT_ENABLE 0x80
302
303#define S_BISTERR 15
304#define M_BISTERR 0xff
305
306#define V_BISTERR(x) ((x) << S_BISTERR)
307
308#define S_PCIE_MSIXPARERR 12
309#define M_PCIE_MSIXPARERR 0x7
310
311#define V_PCIE_MSIXPARERR(x) ((x) << S_PCIE_MSIXPARERR)
312
313#define S_PCIE_CFPARERR 11
314#define V_PCIE_CFPARERR(x) ((x) << S_PCIE_CFPARERR)
315#define F_PCIE_CFPARERR V_PCIE_CFPARERR(1U)
316
317#define S_PCIE_RFPARERR 10
318#define V_PCIE_RFPARERR(x) ((x) << S_PCIE_RFPARERR)
319#define F_PCIE_RFPARERR V_PCIE_RFPARERR(1U)
320
321#define S_PCIE_WFPARERR 9
322#define V_PCIE_WFPARERR(x) ((x) << S_PCIE_WFPARERR)
323#define F_PCIE_WFPARERR V_PCIE_WFPARERR(1U)
324
325#define S_PCIE_PIOPARERR 8
326#define V_PCIE_PIOPARERR(x) ((x) << S_PCIE_PIOPARERR)
327#define F_PCIE_PIOPARERR V_PCIE_PIOPARERR(1U)
328
329#define S_UNXSPLCPLERRC 7
330#define V_UNXSPLCPLERRC(x) ((x) << S_UNXSPLCPLERRC)
331#define F_UNXSPLCPLERRC V_UNXSPLCPLERRC(1U)
332
333#define S_UNXSPLCPLERRR 6
334#define V_UNXSPLCPLERRR(x) ((x) << S_UNXSPLCPLERRR)
335#define F_UNXSPLCPLERRR V_UNXSPLCPLERRR(1U)
336
337#define S_PEXERR 0
338#define V_PEXERR(x) ((x) << S_PEXERR)
339#define F_PEXERR V_PEXERR(1U)
340
341#define A_PCIE_INT_CAUSE 0x84
342
343#define A_PCIE_CFG 0x88
344
345#define S_PCIE_CLIDECEN 16
346#define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN)
347#define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
348
349#define S_CRSTWRMMODE 0
350#define V_CRSTWRMMODE(x) ((x) << S_CRSTWRMMODE)
351#define F_CRSTWRMMODE V_CRSTWRMMODE(1U)
352
353#define A_PCIE_MODE 0x8c
354
355#define S_NUMFSTTRNSEQRX 10
356#define M_NUMFSTTRNSEQRX 0xff
357#define V_NUMFSTTRNSEQRX(x) ((x) << S_NUMFSTTRNSEQRX)
358#define G_NUMFSTTRNSEQRX(x) (((x) >> S_NUMFSTTRNSEQRX) & M_NUMFSTTRNSEQRX)
359
360#define A_PCIE_PEX_CTRL0 0x98
361
362#define S_NUMFSTTRNSEQ 22
363#define M_NUMFSTTRNSEQ 0xff
364#define V_NUMFSTTRNSEQ(x) ((x) << S_NUMFSTTRNSEQ)
365#define G_NUMFSTTRNSEQ(x) (((x) >> S_NUMFSTTRNSEQ) & M_NUMFSTTRNSEQ)
366
367#define S_REPLAYLMT 2
368#define M_REPLAYLMT 0xfffff
369
370#define V_REPLAYLMT(x) ((x) << S_REPLAYLMT)
371
372#define A_PCIE_PEX_CTRL1 0x9c
373
374#define S_T3A_ACKLAT 0
375#define M_T3A_ACKLAT 0x7ff
376
377#define V_T3A_ACKLAT(x) ((x) << S_T3A_ACKLAT)
378
379#define S_ACKLAT 0
380#define M_ACKLAT 0x1fff
381
382#define V_ACKLAT(x) ((x) << S_ACKLAT)
383
384#define A_PCIE_PEX_ERR 0xa4
385
386#define A_T3DBG_GPIO_EN 0xd0
387
388#define S_GPIO11_OEN 27
389#define V_GPIO11_OEN(x) ((x) << S_GPIO11_OEN)
390#define F_GPIO11_OEN V_GPIO11_OEN(1U)
391
392#define S_GPIO10_OEN 26
393#define V_GPIO10_OEN(x) ((x) << S_GPIO10_OEN)
394#define F_GPIO10_OEN V_GPIO10_OEN(1U)
395
396#define S_GPIO7_OEN 23
397#define V_GPIO7_OEN(x) ((x) << S_GPIO7_OEN)
398#define F_GPIO7_OEN V_GPIO7_OEN(1U)
399
400#define S_GPIO6_OEN 22
401#define V_GPIO6_OEN(x) ((x) << S_GPIO6_OEN)
402#define F_GPIO6_OEN V_GPIO6_OEN(1U)
403
404#define S_GPIO5_OEN 21
405#define V_GPIO5_OEN(x) ((x) << S_GPIO5_OEN)
406#define F_GPIO5_OEN V_GPIO5_OEN(1U)
407
408#define S_GPIO4_OEN 20
409#define V_GPIO4_OEN(x) ((x) << S_GPIO4_OEN)
410#define F_GPIO4_OEN V_GPIO4_OEN(1U)
411
412#define S_GPIO2_OEN 18
413#define V_GPIO2_OEN(x) ((x) << S_GPIO2_OEN)
414#define F_GPIO2_OEN V_GPIO2_OEN(1U)
415
416#define S_GPIO1_OEN 17
417#define V_GPIO1_OEN(x) ((x) << S_GPIO1_OEN)
418#define F_GPIO1_OEN V_GPIO1_OEN(1U)
419
420#define S_GPIO0_OEN 16
421#define V_GPIO0_OEN(x) ((x) << S_GPIO0_OEN)
422#define F_GPIO0_OEN V_GPIO0_OEN(1U)
423
424#define S_GPIO10_OUT_VAL 10
425#define V_GPIO10_OUT_VAL(x) ((x) << S_GPIO10_OUT_VAL)
426#define F_GPIO10_OUT_VAL V_GPIO10_OUT_VAL(1U)
427
428#define S_GPIO7_OUT_VAL 7
429#define V_GPIO7_OUT_VAL(x) ((x) << S_GPIO7_OUT_VAL)
430#define F_GPIO7_OUT_VAL V_GPIO7_OUT_VAL(1U)
431
432#define S_GPIO6_OUT_VAL 6
433#define V_GPIO6_OUT_VAL(x) ((x) << S_GPIO6_OUT_VAL)
434#define F_GPIO6_OUT_VAL V_GPIO6_OUT_VAL(1U)
435
436#define S_GPIO5_OUT_VAL 5
437#define V_GPIO5_OUT_VAL(x) ((x) << S_GPIO5_OUT_VAL)
438#define F_GPIO5_OUT_VAL V_GPIO5_OUT_VAL(1U)
439
440#define S_GPIO4_OUT_VAL 4
441#define V_GPIO4_OUT_VAL(x) ((x) << S_GPIO4_OUT_VAL)
442#define F_GPIO4_OUT_VAL V_GPIO4_OUT_VAL(1U)
443
444#define S_GPIO2_OUT_VAL 2
445#define V_GPIO2_OUT_VAL(x) ((x) << S_GPIO2_OUT_VAL)
446#define F_GPIO2_OUT_VAL V_GPIO2_OUT_VAL(1U)
447
448#define S_GPIO1_OUT_VAL 1
449#define V_GPIO1_OUT_VAL(x) ((x) << S_GPIO1_OUT_VAL)
450#define F_GPIO1_OUT_VAL V_GPIO1_OUT_VAL(1U)
451
452#define S_GPIO0_OUT_VAL 0
453#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
454#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
455
456#define A_T3DBG_INT_ENABLE 0xd8
457
458#define S_GPIO11 11
459#define V_GPIO11(x) ((x) << S_GPIO11)
460#define F_GPIO11 V_GPIO11(1U)
461
462#define S_GPIO10 10
463#define V_GPIO10(x) ((x) << S_GPIO10)
464#define F_GPIO10 V_GPIO10(1U)
465
466#define S_GPIO7 7
467#define V_GPIO7(x) ((x) << S_GPIO7)
468#define F_GPIO7 V_GPIO7(1U)
469
470#define S_GPIO6 6
471#define V_GPIO6(x) ((x) << S_GPIO6)
472#define F_GPIO6 V_GPIO6(1U)
473
474#define S_GPIO5 5
475#define V_GPIO5(x) ((x) << S_GPIO5)
476#define F_GPIO5 V_GPIO5(1U)
477
478#define S_GPIO4 4
479#define V_GPIO4(x) ((x) << S_GPIO4)
480#define F_GPIO4 V_GPIO4(1U)
481
482#define S_GPIO3 3
483#define V_GPIO3(x) ((x) << S_GPIO3)
484#define F_GPIO3 V_GPIO3(1U)
485
486#define S_GPIO2 2
487#define V_GPIO2(x) ((x) << S_GPIO2)
488#define F_GPIO2 V_GPIO2(1U)
489
490#define S_GPIO1 1
491#define V_GPIO1(x) ((x) << S_GPIO1)
492#define F_GPIO1 V_GPIO1(1U)
493
494#define S_GPIO0 0
495#define V_GPIO0(x) ((x) << S_GPIO0)
496#define F_GPIO0 V_GPIO0(1U)
497
498#define A_T3DBG_INT_CAUSE 0xdc
499
500#define A_T3DBG_GPIO_ACT_LOW 0xf0
501
502#define MC7_PMRX_BASE_ADDR 0x100
503
504#define A_MC7_CFG 0x100
505
506#define S_IFEN 13
507#define V_IFEN(x) ((x) << S_IFEN)
508#define F_IFEN V_IFEN(1U)
509
510#define S_TERM150 11
511#define V_TERM150(x) ((x) << S_TERM150)
512#define F_TERM150 V_TERM150(1U)
513
514#define S_SLOW 10
515#define V_SLOW(x) ((x) << S_SLOW)
516#define F_SLOW V_SLOW(1U)
517
518#define S_WIDTH 8
519#define M_WIDTH 0x3
520#define V_WIDTH(x) ((x) << S_WIDTH)
521#define G_WIDTH(x) (((x) >> S_WIDTH) & M_WIDTH)
522
523#define S_BKS 6
524#define V_BKS(x) ((x) << S_BKS)
525#define F_BKS V_BKS(1U)
526
527#define S_ORG 5
528#define V_ORG(x) ((x) << S_ORG)
529#define F_ORG V_ORG(1U)
530
531#define S_DEN 2
532#define M_DEN 0x7
533#define V_DEN(x) ((x) << S_DEN)
534#define G_DEN(x) (((x) >> S_DEN) & M_DEN)
535
536#define S_RDY 1
537#define V_RDY(x) ((x) << S_RDY)
538#define F_RDY V_RDY(1U)
539
540#define S_CLKEN 0
541#define V_CLKEN(x) ((x) << S_CLKEN)
542#define F_CLKEN V_CLKEN(1U)
543
544#define A_MC7_MODE 0x104
545
546#define S_BUSY 31
547#define V_BUSY(x) ((x) << S_BUSY)
548#define F_BUSY V_BUSY(1U)
549
550#define S_BUSY 31
551#define V_BUSY(x) ((x) << S_BUSY)
552#define F_BUSY V_BUSY(1U)
553
554#define A_MC7_EXT_MODE1 0x108
555
556#define A_MC7_EXT_MODE2 0x10c
557
558#define A_MC7_EXT_MODE3 0x110
559
560#define A_MC7_PRE 0x114
561
562#define A_MC7_REF 0x118
563
564#define S_PREREFDIV 1
565#define M_PREREFDIV 0x3fff
566#define V_PREREFDIV(x) ((x) << S_PREREFDIV)
567
568#define S_PERREFEN 0
569#define V_PERREFEN(x) ((x) << S_PERREFEN)
570#define F_PERREFEN V_PERREFEN(1U)
571
572#define A_MC7_DLL 0x11c
573
574#define S_DLLENB 1
575#define V_DLLENB(x) ((x) << S_DLLENB)
576#define F_DLLENB V_DLLENB(1U)
577
578#define S_DLLRST 0
579#define V_DLLRST(x) ((x) << S_DLLRST)
580#define F_DLLRST V_DLLRST(1U)
581
582#define A_MC7_PARM 0x120
583
584#define S_ACTTOPREDLY 26
585#define M_ACTTOPREDLY 0xf
586#define V_ACTTOPREDLY(x) ((x) << S_ACTTOPREDLY)
587
588#define S_ACTTORDWRDLY 23
589#define M_ACTTORDWRDLY 0x7
590#define V_ACTTORDWRDLY(x) ((x) << S_ACTTORDWRDLY)
591
592#define S_PRECYC 20
593#define M_PRECYC 0x7
594#define V_PRECYC(x) ((x) << S_PRECYC)
595
596#define S_REFCYC 13
597#define M_REFCYC 0x7f
598#define V_REFCYC(x) ((x) << S_REFCYC)
599
600#define S_BKCYC 8
601#define M_BKCYC 0x1f
602#define V_BKCYC(x) ((x) << S_BKCYC)
603
604#define S_WRTORDDLY 4
605#define M_WRTORDDLY 0xf
606#define V_WRTORDDLY(x) ((x) << S_WRTORDDLY)
607
608#define S_RDTOWRDLY 0
609#define M_RDTOWRDLY 0xf
610#define V_RDTOWRDLY(x) ((x) << S_RDTOWRDLY)
611
612#define A_MC7_CAL 0x128
613
614#define S_BUSY 31
615#define V_BUSY(x) ((x) << S_BUSY)
616#define F_BUSY V_BUSY(1U)
617
618#define S_BUSY 31
619#define V_BUSY(x) ((x) << S_BUSY)
620#define F_BUSY V_BUSY(1U)
621
622#define S_CAL_FAULT 30
623#define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
624#define F_CAL_FAULT V_CAL_FAULT(1U)
625
626#define S_SGL_CAL_EN 20
627#define V_SGL_CAL_EN(x) ((x) << S_SGL_CAL_EN)
628#define F_SGL_CAL_EN V_SGL_CAL_EN(1U)
629
630#define A_MC7_ERR_ADDR 0x12c
631
632#define A_MC7_ECC 0x130
633
634#define S_ECCCHKEN 1
635#define V_ECCCHKEN(x) ((x) << S_ECCCHKEN)
636#define F_ECCCHKEN V_ECCCHKEN(1U)
637
638#define S_ECCGENEN 0
639#define V_ECCGENEN(x) ((x) << S_ECCGENEN)
640#define F_ECCGENEN V_ECCGENEN(1U)
641
642#define A_MC7_CE_ADDR 0x134
643
644#define A_MC7_CE_DATA0 0x138
645
646#define A_MC7_CE_DATA1 0x13c
647
648#define A_MC7_CE_DATA2 0x140
649
650#define S_DATA 0
651#define M_DATA 0xff
652
653#define G_DATA(x) (((x) >> S_DATA) & M_DATA)
654
655#define A_MC7_UE_ADDR 0x144
656
657#define A_MC7_UE_DATA0 0x148
658
659#define A_MC7_UE_DATA1 0x14c
660
661#define A_MC7_UE_DATA2 0x150
662
663#define A_MC7_BD_ADDR 0x154
664
665#define S_ADDR 3
666
667#define M_ADDR 0x1fffffff
668
669#define A_MC7_BD_DATA0 0x158
670
671#define A_MC7_BD_DATA1 0x15c
672
673#define A_MC7_BD_OP 0x164
674
675#define S_OP 0
676
677#define V_OP(x) ((x) << S_OP)
678#define F_OP V_OP(1U)
679
680#define F_OP V_OP(1U)
681#define A_SF_OP 0x6dc
682
683#define A_MC7_BIST_ADDR_BEG 0x168
684
685#define A_MC7_BIST_ADDR_END 0x16c
686
687#define A_MC7_BIST_DATA 0x170
688
689#define A_MC7_BIST_OP 0x174
690
691#define S_CONT 3
692#define V_CONT(x) ((x) << S_CONT)
693#define F_CONT V_CONT(1U)
694
695#define F_CONT V_CONT(1U)
696
697#define A_MC7_INT_ENABLE 0x178
698
699#define S_AE 17
700#define V_AE(x) ((x) << S_AE)
701#define F_AE V_AE(1U)
702
703#define S_PE 2
704#define M_PE 0x7fff
705
706#define V_PE(x) ((x) << S_PE)
707
708#define G_PE(x) (((x) >> S_PE) & M_PE)
709
710#define S_UE 1
711#define V_UE(x) ((x) << S_UE)
712#define F_UE V_UE(1U)
713
714#define S_CE 0
715#define V_CE(x) ((x) << S_CE)
716#define F_CE V_CE(1U)
717
718#define A_MC7_INT_CAUSE 0x17c
719
720#define MC7_PMTX_BASE_ADDR 0x180
721
722#define MC7_CM_BASE_ADDR 0x200
723
724#define A_CIM_BOOT_CFG 0x280
725
726#define S_BOOTADDR 2
727#define M_BOOTADDR 0x3fffffff
728#define V_BOOTADDR(x) ((x) << S_BOOTADDR)
729
730#define A_CIM_SDRAM_BASE_ADDR 0x28c
731
732#define A_CIM_SDRAM_ADDR_SIZE 0x290
733
734#define A_CIM_HOST_INT_ENABLE 0x298
735
736#define A_CIM_HOST_INT_CAUSE 0x29c
737
738#define S_BLKWRPLINT 12
739#define V_BLKWRPLINT(x) ((x) << S_BLKWRPLINT)
740#define F_BLKWRPLINT V_BLKWRPLINT(1U)
741
742#define S_BLKRDPLINT 11
743#define V_BLKRDPLINT(x) ((x) << S_BLKRDPLINT)
744#define F_BLKRDPLINT V_BLKRDPLINT(1U)
745
746#define S_BLKWRCTLINT 10
747#define V_BLKWRCTLINT(x) ((x) << S_BLKWRCTLINT)
748#define F_BLKWRCTLINT V_BLKWRCTLINT(1U)
749
750#define S_BLKRDCTLINT 9
751#define V_BLKRDCTLINT(x) ((x) << S_BLKRDCTLINT)
752#define F_BLKRDCTLINT V_BLKRDCTLINT(1U)
753
754#define S_BLKWRFLASHINT 8
755#define V_BLKWRFLASHINT(x) ((x) << S_BLKWRFLASHINT)
756#define F_BLKWRFLASHINT V_BLKWRFLASHINT(1U)
757
758#define S_BLKRDFLASHINT 7
759#define V_BLKRDFLASHINT(x) ((x) << S_BLKRDFLASHINT)
760#define F_BLKRDFLASHINT V_BLKRDFLASHINT(1U)
761
762#define S_SGLWRFLASHINT 6
763#define V_SGLWRFLASHINT(x) ((x) << S_SGLWRFLASHINT)
764#define F_SGLWRFLASHINT V_SGLWRFLASHINT(1U)
765
766#define S_WRBLKFLASHINT 5
767#define V_WRBLKFLASHINT(x) ((x) << S_WRBLKFLASHINT)
768#define F_WRBLKFLASHINT V_WRBLKFLASHINT(1U)
769
770#define S_BLKWRBOOTINT 4
771#define V_BLKWRBOOTINT(x) ((x) << S_BLKWRBOOTINT)
772#define F_BLKWRBOOTINT V_BLKWRBOOTINT(1U)
773
774#define S_FLASHRANGEINT 2
775#define V_FLASHRANGEINT(x) ((x) << S_FLASHRANGEINT)
776#define F_FLASHRANGEINT V_FLASHRANGEINT(1U)
777
778#define S_SDRAMRANGEINT 1
779#define V_SDRAMRANGEINT(x) ((x) << S_SDRAMRANGEINT)
780#define F_SDRAMRANGEINT V_SDRAMRANGEINT(1U)
781
782#define S_RSVDSPACEINT 0
783#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
784#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
785
786#define A_CIM_HOST_ACC_CTRL 0x2b0
787
788#define S_HOSTBUSY 17
789#define V_HOSTBUSY(x) ((x) << S_HOSTBUSY)
790#define F_HOSTBUSY V_HOSTBUSY(1U)
791
792#define A_CIM_HOST_ACC_DATA 0x2b4
793
794#define A_TP_IN_CONFIG 0x300
795
796#define S_NICMODE 14
797#define V_NICMODE(x) ((x) << S_NICMODE)
798#define F_NICMODE V_NICMODE(1U)
799
800#define F_NICMODE V_NICMODE(1U)
801
802#define S_IPV6ENABLE 15
803#define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
804#define F_IPV6ENABLE V_IPV6ENABLE(1U)
805
806#define A_TP_OUT_CONFIG 0x304
807
808#define S_VLANEXTRACTIONENABLE 12
809
810#define A_TP_GLOBAL_CONFIG 0x308
811
812#define S_TXPACINGENABLE 24
813#define V_TXPACINGENABLE(x) ((x) << S_TXPACINGENABLE)
814#define F_TXPACINGENABLE V_TXPACINGENABLE(1U)
815
816#define S_PATHMTU 15
817#define V_PATHMTU(x) ((x) << S_PATHMTU)
818#define F_PATHMTU V_PATHMTU(1U)
819
820#define S_IPCHECKSUMOFFLOAD 13
821#define V_IPCHECKSUMOFFLOAD(x) ((x) << S_IPCHECKSUMOFFLOAD)
822#define F_IPCHECKSUMOFFLOAD V_IPCHECKSUMOFFLOAD(1U)
823
824#define S_UDPCHECKSUMOFFLOAD 12
825#define V_UDPCHECKSUMOFFLOAD(x) ((x) << S_UDPCHECKSUMOFFLOAD)
826#define F_UDPCHECKSUMOFFLOAD V_UDPCHECKSUMOFFLOAD(1U)
827
828#define S_TCPCHECKSUMOFFLOAD 11
829#define V_TCPCHECKSUMOFFLOAD(x) ((x) << S_TCPCHECKSUMOFFLOAD)
830#define F_TCPCHECKSUMOFFLOAD V_TCPCHECKSUMOFFLOAD(1U)
831
832#define S_IPTTL 0
833#define M_IPTTL 0xff
834#define V_IPTTL(x) ((x) << S_IPTTL)
835
836#define A_TP_CMM_MM_BASE 0x314
837
838#define A_TP_CMM_TIMER_BASE 0x318
839
840#define S_CMTIMERMAXNUM 28
841#define M_CMTIMERMAXNUM 0x3
842#define V_CMTIMERMAXNUM(x) ((x) << S_CMTIMERMAXNUM)
843
844#define A_TP_PMM_SIZE 0x31c
845
846#define A_TP_PMM_TX_BASE 0x320
847
848#define A_TP_PMM_RX_BASE 0x328
849
850#define A_TP_PMM_RX_PAGE_SIZE 0x32c
851
852#define A_TP_PMM_RX_MAX_PAGE 0x330
853
854#define A_TP_PMM_TX_PAGE_SIZE 0x334
855
856#define A_TP_PMM_TX_MAX_PAGE 0x338
857
858#define A_TP_TCP_OPTIONS 0x340
859
860#define S_MTUDEFAULT 16
861#define M_MTUDEFAULT 0xffff
862#define V_MTUDEFAULT(x) ((x) << S_MTUDEFAULT)
863
864#define S_MTUENABLE 10
865#define V_MTUENABLE(x) ((x) << S_MTUENABLE)
866#define F_MTUENABLE V_MTUENABLE(1U)
867
868#define S_SACKRX 8
869#define V_SACKRX(x) ((x) << S_SACKRX)
870#define F_SACKRX V_SACKRX(1U)
871
872#define S_SACKMODE 4
873
874#define M_SACKMODE 0x3
875
876#define V_SACKMODE(x) ((x) << S_SACKMODE)
877
878#define S_WINDOWSCALEMODE 2
879#define M_WINDOWSCALEMODE 0x3
880#define V_WINDOWSCALEMODE(x) ((x) << S_WINDOWSCALEMODE)
881
882#define S_TIMESTAMPSMODE 0
883
884#define M_TIMESTAMPSMODE 0x3
885
886#define V_TIMESTAMPSMODE(x) ((x) << S_TIMESTAMPSMODE)
887
888#define A_TP_DACK_CONFIG 0x344
889
890#define S_AUTOSTATE3 30
891#define M_AUTOSTATE3 0x3
892#define V_AUTOSTATE3(x) ((x) << S_AUTOSTATE3)
893
894#define S_AUTOSTATE2 28
895#define M_AUTOSTATE2 0x3
896#define V_AUTOSTATE2(x) ((x) << S_AUTOSTATE2)
897
898#define S_AUTOSTATE1 26
899#define M_AUTOSTATE1 0x3
900#define V_AUTOSTATE1(x) ((x) << S_AUTOSTATE1)
901
902#define S_BYTETHRESHOLD 5
903#define M_BYTETHRESHOLD 0xfffff
904#define V_BYTETHRESHOLD(x) ((x) << S_BYTETHRESHOLD)
905
906#define S_MSSTHRESHOLD 3
907#define M_MSSTHRESHOLD 0x3
908#define V_MSSTHRESHOLD(x) ((x) << S_MSSTHRESHOLD)
909
910#define S_AUTOCAREFUL 2
911#define V_AUTOCAREFUL(x) ((x) << S_AUTOCAREFUL)
912#define F_AUTOCAREFUL V_AUTOCAREFUL(1U)
913
914#define S_AUTOENABLE 1
915#define V_AUTOENABLE(x) ((x) << S_AUTOENABLE)
916#define F_AUTOENABLE V_AUTOENABLE(1U)
917
918#define S_DACK_MODE 0
919#define V_DACK_MODE(x) ((x) << S_DACK_MODE)
920#define F_DACK_MODE V_DACK_MODE(1U)
921
922#define A_TP_PC_CONFIG 0x348
923
924#define S_TXTOSQUEUEMAPMODE 26
925#define V_TXTOSQUEUEMAPMODE(x) ((x) << S_TXTOSQUEUEMAPMODE)
926#define F_TXTOSQUEUEMAPMODE V_TXTOSQUEUEMAPMODE(1U)
927
928#define S_ENABLEEPCMDAFULL 23
929#define V_ENABLEEPCMDAFULL(x) ((x) << S_ENABLEEPCMDAFULL)
930#define F_ENABLEEPCMDAFULL V_ENABLEEPCMDAFULL(1U)
931
932#define S_MODULATEUNIONMODE 22
933#define V_MODULATEUNIONMODE(x) ((x) << S_MODULATEUNIONMODE)
934#define F_MODULATEUNIONMODE V_MODULATEUNIONMODE(1U)
935
936#define S_TXDEFERENABLE 20
937#define V_TXDEFERENABLE(x) ((x) << S_TXDEFERENABLE)
938#define F_TXDEFERENABLE V_TXDEFERENABLE(1U)
939
940#define S_RXCONGESTIONMODE 19
941#define V_RXCONGESTIONMODE(x) ((x) << S_RXCONGESTIONMODE)
942#define F_RXCONGESTIONMODE V_RXCONGESTIONMODE(1U)
943
944#define S_HEARBEATDACK 16
945#define V_HEARBEATDACK(x) ((x) << S_HEARBEATDACK)
946#define F_HEARBEATDACK V_HEARBEATDACK(1U)
947
948#define S_TXCONGESTIONMODE 15
949#define V_TXCONGESTIONMODE(x) ((x) << S_TXCONGESTIONMODE)
950#define F_TXCONGESTIONMODE V_TXCONGESTIONMODE(1U)
951
952#define S_ENABLEOCSPIFULL 30
953#define V_ENABLEOCSPIFULL(x) ((x) << S_ENABLEOCSPIFULL)
954#define F_ENABLEOCSPIFULL V_ENABLEOCSPIFULL(1U)
955
956#define S_LOCKTID 28
957#define V_LOCKTID(x) ((x) << S_LOCKTID)
958#define F_LOCKTID V_LOCKTID(1U)
959
960#define A_TP_PC_CONFIG2 0x34c
961
962#define S_CHDRAFULL 4
963#define V_CHDRAFULL(x) ((x) << S_CHDRAFULL)
964#define F_CHDRAFULL V_CHDRAFULL(1U)
965
966#define A_TP_TCP_BACKOFF_REG0 0x350
967
968#define A_TP_TCP_BACKOFF_REG1 0x354
969
970#define A_TP_TCP_BACKOFF_REG2 0x358
971
972#define A_TP_TCP_BACKOFF_REG3 0x35c
973
974#define A_TP_PARA_REG2 0x368
975
976#define S_MAXRXDATA 16
977#define M_MAXRXDATA 0xffff
978#define V_MAXRXDATA(x) ((x) << S_MAXRXDATA)
979
980#define S_RXCOALESCESIZE 0
981#define M_RXCOALESCESIZE 0xffff
982#define V_RXCOALESCESIZE(x) ((x) << S_RXCOALESCESIZE)
983
984#define A_TP_PARA_REG3 0x36c
985
986#define S_TXDATAACKIDX 16
987#define M_TXDATAACKIDX 0xf
988
989#define V_TXDATAACKIDX(x) ((x) << S_TXDATAACKIDX)
990
991#define S_TXPACEAUTOSTRICT 10
992#define V_TXPACEAUTOSTRICT(x) ((x) << S_TXPACEAUTOSTRICT)
993#define F_TXPACEAUTOSTRICT V_TXPACEAUTOSTRICT(1U)
994
995#define S_TXPACEFIXED 9
996#define V_TXPACEFIXED(x) ((x) << S_TXPACEFIXED)
997#define F_TXPACEFIXED V_TXPACEFIXED(1U)
998
999#define S_TXPACEAUTO 8
1000#define V_TXPACEAUTO(x) ((x) << S_TXPACEAUTO)
1001#define F_TXPACEAUTO V_TXPACEAUTO(1U)
1002
1003#define S_RXCOALESCEENABLE 1
1004#define V_RXCOALESCEENABLE(x) ((x) << S_RXCOALESCEENABLE)
1005#define F_RXCOALESCEENABLE V_RXCOALESCEENABLE(1U)
1006
1007#define S_RXCOALESCEPSHEN 0
1008#define V_RXCOALESCEPSHEN(x) ((x) << S_RXCOALESCEPSHEN)
1009#define F_RXCOALESCEPSHEN V_RXCOALESCEPSHEN(1U)
1010
1011#define A_TP_PARA_REG4 0x370
1012
1013#define A_TP_PARA_REG6 0x378
1014
1015#define S_T3A_ENABLEESND 13
1016#define V_T3A_ENABLEESND(x) ((x) << S_T3A_ENABLEESND)
1017#define F_T3A_ENABLEESND V_T3A_ENABLEESND(1U)
1018
1019#define S_ENABLEESND 11
1020#define V_ENABLEESND(x) ((x) << S_ENABLEESND)
1021#define F_ENABLEESND V_ENABLEESND(1U)
1022
1023#define A_TP_PARA_REG7 0x37c
1024
1025#define S_PMMAXXFERLEN1 16
1026#define M_PMMAXXFERLEN1 0xffff
1027#define V_PMMAXXFERLEN1(x) ((x) << S_PMMAXXFERLEN1)
1028
1029#define S_PMMAXXFERLEN0 0
1030#define M_PMMAXXFERLEN0 0xffff
1031#define V_PMMAXXFERLEN0(x) ((x) << S_PMMAXXFERLEN0)
1032
1033#define A_TP_TIMER_RESOLUTION 0x390
1034
1035#define S_TIMERRESOLUTION 16
1036#define M_TIMERRESOLUTION 0xff
1037#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
1038
1039#define S_TIMESTAMPRESOLUTION 8
1040#define M_TIMESTAMPRESOLUTION 0xff
1041#define V_TIMESTAMPRESOLUTION(x) ((x) << S_TIMESTAMPRESOLUTION)
1042
1043#define S_DELAYEDACKRESOLUTION 0
1044#define M_DELAYEDACKRESOLUTION 0xff
1045#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
1046
1047#define A_TP_MSL 0x394
1048
1049#define A_TP_RXT_MIN 0x398
1050
1051#define A_TP_RXT_MAX 0x39c
1052
1053#define A_TP_PERS_MIN 0x3a0
1054
1055#define A_TP_PERS_MAX 0x3a4
1056
1057#define A_TP_KEEP_IDLE 0x3a8
1058
1059#define A_TP_KEEP_INTVL 0x3ac
1060
1061#define A_TP_INIT_SRTT 0x3b0
1062
1063#define A_TP_DACK_TIMER 0x3b4
1064
1065#define A_TP_FINWAIT2_TIMER 0x3b8
1066
1067#define A_TP_SHIFT_CNT 0x3c0
1068
1069#define S_SYNSHIFTMAX 24
1070
1071#define M_SYNSHIFTMAX 0xff
1072
1073#define V_SYNSHIFTMAX(x) ((x) << S_SYNSHIFTMAX)
1074
1075#define S_RXTSHIFTMAXR1 20
1076
1077#define M_RXTSHIFTMAXR1 0xf
1078
1079#define V_RXTSHIFTMAXR1(x) ((x) << S_RXTSHIFTMAXR1)
1080
1081#define S_RXTSHIFTMAXR2 16
1082
1083#define M_RXTSHIFTMAXR2 0xf
1084
1085#define V_RXTSHIFTMAXR2(x) ((x) << S_RXTSHIFTMAXR2)
1086
1087#define S_PERSHIFTBACKOFFMAX 12
1088#define M_PERSHIFTBACKOFFMAX 0xf
1089#define V_PERSHIFTBACKOFFMAX(x) ((x) << S_PERSHIFTBACKOFFMAX)
1090
1091#define S_PERSHIFTMAX 8
1092#define M_PERSHIFTMAX 0xf
1093#define V_PERSHIFTMAX(x) ((x) << S_PERSHIFTMAX)
1094
1095#define S_KEEPALIVEMAX 0
1096
1097#define M_KEEPALIVEMAX 0xff
1098
1099#define V_KEEPALIVEMAX(x) ((x) << S_KEEPALIVEMAX)
1100
1101#define A_TP_MTU_PORT_TABLE 0x3d0
1102
1103#define A_TP_CCTRL_TABLE 0x3dc
1104
1105#define A_TP_MTU_TABLE 0x3e4
1106
1107#define A_TP_RSS_MAP_TABLE 0x3e8
1108
1109#define A_TP_RSS_LKP_TABLE 0x3ec
1110
1111#define A_TP_RSS_CONFIG 0x3f0
1112
1113#define S_TNL4TUPEN 29
1114#define V_TNL4TUPEN(x) ((x) << S_TNL4TUPEN)
1115#define F_TNL4TUPEN V_TNL4TUPEN(1U)
1116
1117#define S_TNL2TUPEN 28
1118#define V_TNL2TUPEN(x) ((x) << S_TNL2TUPEN)
1119#define F_TNL2TUPEN V_TNL2TUPEN(1U)
1120
1121#define S_TNLPRTEN 26
1122#define V_TNLPRTEN(x) ((x) << S_TNLPRTEN)
1123#define F_TNLPRTEN V_TNLPRTEN(1U)
1124
1125#define S_TNLMAPEN 25
1126#define V_TNLMAPEN(x) ((x) << S_TNLMAPEN)
1127#define F_TNLMAPEN V_TNLMAPEN(1U)
1128
1129#define S_TNLLKPEN 24
1130#define V_TNLLKPEN(x) ((x) << S_TNLLKPEN)
1131#define F_TNLLKPEN V_TNLLKPEN(1U)
1132
1133#define S_RRCPLCPUSIZE 4
1134#define M_RRCPLCPUSIZE 0x7
1135#define V_RRCPLCPUSIZE(x) ((x) << S_RRCPLCPUSIZE)
1136
1137#define S_RQFEEDBACKENABLE 3
1138#define V_RQFEEDBACKENABLE(x) ((x) << S_RQFEEDBACKENABLE)
1139#define F_RQFEEDBACKENABLE V_RQFEEDBACKENABLE(1U)
1140
1141#define S_DISABLE 0
1142
1143#define A_TP_TM_PIO_ADDR 0x418
1144
1145#define A_TP_TM_PIO_DATA 0x41c
1146
1147#define A_TP_TX_MOD_QUE_TABLE 0x420
1148
1149#define A_TP_TX_RESOURCE_LIMIT 0x424
1150
1151#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x428
1152
1153#define S_TX_MOD_QUEUE_REQ_MAP 0
1154#define M_TX_MOD_QUEUE_REQ_MAP 0xff
1155#define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP)
1156
1157#define A_TP_TX_MOD_QUEUE_WEIGHT1 0x42c
1158
1159#define A_TP_TX_MOD_QUEUE_WEIGHT0 0x430
1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162
1163#define A_TP_PIO_ADDR 0x440
1164
1165#define A_TP_PIO_DATA 0x444
1166
1167#define A_TP_RESET 0x44c
1168
1169#define S_FLSTINITENABLE 1
1170#define V_FLSTINITENABLE(x) ((x) << S_FLSTINITENABLE)
1171#define F_FLSTINITENABLE V_FLSTINITENABLE(1U)
1172
1173#define S_TPRESET 0
1174#define V_TPRESET(x) ((x) << S_TPRESET)
1175#define F_TPRESET V_TPRESET(1U)
1176
1177#define A_TP_CMM_MM_RX_FLST_BASE 0x460
1178
1179#define A_TP_CMM_MM_TX_FLST_BASE 0x464
1180
1181#define A_TP_CMM_MM_PS_FLST_BASE 0x468
1182
1183#define A_TP_MIB_INDEX 0x450
1184
1185#define A_TP_MIB_RDATA 0x454
1186
1187#define A_TP_CMM_MM_MAX_PSTRUCT 0x46c
1188
1189#define A_TP_INT_ENABLE 0x470
1190
1191#define A_TP_INT_CAUSE 0x474
1192
1193#define A_TP_TX_MOD_Q1_Q0_RATE_LIMIT 0x8
1194
1195#define A_TP_TX_DROP_CFG_CH0 0x12b
1196
1197#define A_TP_TX_DROP_MODE 0x12f
1198
1199#define A_TP_EGRESS_CONFIG 0x145
1200
1201#define S_REWRITEFORCETOSIZE 0
1202#define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
1203#define F_REWRITEFORCETOSIZE V_REWRITEFORCETOSIZE(1U)
1204
1205#define A_TP_TX_TRC_KEY0 0x20
1206
1207#define A_TP_RX_TRC_KEY0 0x120
1208
1209#define A_ULPRX_CTL 0x500
1210
1211#define S_ROUND_ROBIN 4
1212#define V_ROUND_ROBIN(x) ((x) << S_ROUND_ROBIN)
1213#define F_ROUND_ROBIN V_ROUND_ROBIN(1U)
1214
1215#define A_ULPRX_INT_ENABLE 0x504
1216
1217#define S_PARERR 0
1218#define V_PARERR(x) ((x) << S_PARERR)
1219#define F_PARERR V_PARERR(1U)
1220
1221#define A_ULPRX_INT_CAUSE 0x508
1222
1223#define A_ULPRX_ISCSI_LLIMIT 0x50c
1224
1225#define A_ULPRX_ISCSI_ULIMIT 0x510
1226
1227#define A_ULPRX_ISCSI_TAGMASK 0x514
1228
1229#define A_ULPRX_TDDP_LLIMIT 0x51c
1230
1231#define A_ULPRX_TDDP_ULIMIT 0x520
1232
1233#define A_ULPRX_STAG_LLIMIT 0x52c
1234
1235#define A_ULPRX_STAG_ULIMIT 0x530
1236
1237#define A_ULPRX_RQ_LLIMIT 0x534
1238#define A_ULPRX_RQ_LLIMIT 0x534
1239
1240#define A_ULPRX_RQ_ULIMIT 0x538
1241#define A_ULPRX_RQ_ULIMIT 0x538
1242
1243#define A_ULPRX_PBL_LLIMIT 0x53c
1244
1245#define A_ULPRX_PBL_ULIMIT 0x540
1246#define A_ULPRX_PBL_ULIMIT 0x540
1247
1248#define A_ULPRX_TDDP_TAGMASK 0x524
1249
1250#define A_ULPRX_RQ_LLIMIT 0x534
1251#define A_ULPRX_RQ_LLIMIT 0x534
1252
1253#define A_ULPRX_RQ_ULIMIT 0x538
1254#define A_ULPRX_RQ_ULIMIT 0x538
1255
1256#define A_ULPRX_PBL_ULIMIT 0x540
1257#define A_ULPRX_PBL_ULIMIT 0x540
1258
1259#define A_ULPTX_CONFIG 0x580
1260
1261#define S_CFG_RR_ARB 0
1262#define V_CFG_RR_ARB(x) ((x) << S_CFG_RR_ARB)
1263#define F_CFG_RR_ARB V_CFG_RR_ARB(1U)
1264
1265#define A_ULPTX_INT_ENABLE 0x584
1266
1267#define S_PBL_BOUND_ERR_CH1 1
1268#define V_PBL_BOUND_ERR_CH1(x) ((x) << S_PBL_BOUND_ERR_CH1)
1269#define F_PBL_BOUND_ERR_CH1 V_PBL_BOUND_ERR_CH1(1U)
1270
1271#define S_PBL_BOUND_ERR_CH0 0
1272#define V_PBL_BOUND_ERR_CH0(x) ((x) << S_PBL_BOUND_ERR_CH0)
1273#define F_PBL_BOUND_ERR_CH0 V_PBL_BOUND_ERR_CH0(1U)
1274
1275#define A_ULPTX_INT_CAUSE 0x588
1276
1277#define A_ULPTX_TPT_LLIMIT 0x58c
1278
1279#define A_ULPTX_TPT_ULIMIT 0x590
1280
1281#define A_ULPTX_PBL_LLIMIT 0x594
1282
1283#define A_ULPTX_PBL_ULIMIT 0x598
1284
1285#define A_ULPTX_DMA_WEIGHT 0x5ac
1286
1287#define S_D1_WEIGHT 16
1288#define M_D1_WEIGHT 0xffff
1289#define V_D1_WEIGHT(x) ((x) << S_D1_WEIGHT)
1290
1291#define S_D0_WEIGHT 0
1292#define M_D0_WEIGHT 0xffff
1293#define V_D0_WEIGHT(x) ((x) << S_D0_WEIGHT)
1294
1295#define A_PM1_RX_CFG 0x5c0
1296
1297#define A_PM1_RX_INT_ENABLE 0x5d8
1298
1299#define S_ZERO_E_CMD_ERROR 18
1300#define V_ZERO_E_CMD_ERROR(x) ((x) << S_ZERO_E_CMD_ERROR)
1301#define F_ZERO_E_CMD_ERROR V_ZERO_E_CMD_ERROR(1U)
1302
1303#define S_IESPI0_FIFO2X_RX_FRAMING_ERROR 17
1304#define V_IESPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_FIFO2X_RX_FRAMING_ERROR)
1305#define F_IESPI0_FIFO2X_RX_FRAMING_ERROR V_IESPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1306
1307#define S_IESPI1_FIFO2X_RX_FRAMING_ERROR 16
1308#define V_IESPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_FIFO2X_RX_FRAMING_ERROR)
1309#define F_IESPI1_FIFO2X_RX_FRAMING_ERROR V_IESPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1310
1311#define S_IESPI0_RX_FRAMING_ERROR 15
1312#define V_IESPI0_RX_FRAMING_ERROR(x) ((x) << S_IESPI0_RX_FRAMING_ERROR)
1313#define F_IESPI0_RX_FRAMING_ERROR V_IESPI0_RX_FRAMING_ERROR(1U)
1314
1315#define S_IESPI1_RX_FRAMING_ERROR 14
1316#define V_IESPI1_RX_FRAMING_ERROR(x) ((x) << S_IESPI1_RX_FRAMING_ERROR)
1317#define F_IESPI1_RX_FRAMING_ERROR V_IESPI1_RX_FRAMING_ERROR(1U)
1318
1319#define S_IESPI0_TX_FRAMING_ERROR 13
1320#define V_IESPI0_TX_FRAMING_ERROR(x) ((x) << S_IESPI0_TX_FRAMING_ERROR)
1321#define F_IESPI0_TX_FRAMING_ERROR V_IESPI0_TX_FRAMING_ERROR(1U)
1322
1323#define S_IESPI1_TX_FRAMING_ERROR 12
1324#define V_IESPI1_TX_FRAMING_ERROR(x) ((x) << S_IESPI1_TX_FRAMING_ERROR)
1325#define F_IESPI1_TX_FRAMING_ERROR V_IESPI1_TX_FRAMING_ERROR(1U)
1326
1327#define S_OCSPI0_RX_FRAMING_ERROR 11
1328#define V_OCSPI0_RX_FRAMING_ERROR(x) ((x) << S_OCSPI0_RX_FRAMING_ERROR)
1329#define F_OCSPI0_RX_FRAMING_ERROR V_OCSPI0_RX_FRAMING_ERROR(1U)
1330
1331#define S_OCSPI1_RX_FRAMING_ERROR 10
1332#define V_OCSPI1_RX_FRAMING_ERROR(x) ((x) << S_OCSPI1_RX_FRAMING_ERROR)
1333#define F_OCSPI1_RX_FRAMING_ERROR V_OCSPI1_RX_FRAMING_ERROR(1U)
1334
1335#define S_OCSPI0_TX_FRAMING_ERROR 9
1336#define V_OCSPI0_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_TX_FRAMING_ERROR)
1337#define F_OCSPI0_TX_FRAMING_ERROR V_OCSPI0_TX_FRAMING_ERROR(1U)
1338
1339#define S_OCSPI1_TX_FRAMING_ERROR 8
1340#define V_OCSPI1_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_TX_FRAMING_ERROR)
1341#define F_OCSPI1_TX_FRAMING_ERROR V_OCSPI1_TX_FRAMING_ERROR(1U)
1342
1343#define S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR 7
1344#define V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI0_OFIFO2X_TX_FRAMING_ERROR)
1345#define F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR V_OCSPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1346
1347#define S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR 6
1348#define V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1349#define F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR V_OCSPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1350
1351#define S_IESPI_PAR_ERROR 3
1352#define M_IESPI_PAR_ERROR 0x7
1353
1354#define V_IESPI_PAR_ERROR(x) ((x) << S_IESPI_PAR_ERROR)
1355
1356#define S_OCSPI_PAR_ERROR 0
1357#define M_OCSPI_PAR_ERROR 0x7
1358
1359#define V_OCSPI_PAR_ERROR(x) ((x) << S_OCSPI_PAR_ERROR)
1360
1361#define A_PM1_RX_INT_CAUSE 0x5dc
1362
1363#define A_PM1_TX_CFG 0x5e0
1364
1365#define A_PM1_TX_INT_ENABLE 0x5f8
1366
1367#define S_ZERO_C_CMD_ERROR 18
1368#define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
1369#define F_ZERO_C_CMD_ERROR V_ZERO_C_CMD_ERROR(1U)
1370
1371#define S_ICSPI0_FIFO2X_RX_FRAMING_ERROR 17
1372#define V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
1373#define F_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
1374
1375#define S_ICSPI1_FIFO2X_RX_FRAMING_ERROR 16
1376#define V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
1377#define F_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
1378
1379#define S_ICSPI0_RX_FRAMING_ERROR 15
1380#define V_ICSPI0_RX_FRAMING_ERROR(x) ((x) << S_ICSPI0_RX_FRAMING_ERROR)
1381#define F_ICSPI0_RX_FRAMING_ERROR V_ICSPI0_RX_FRAMING_ERROR(1U)
1382
1383#define S_ICSPI1_RX_FRAMING_ERROR 14
1384#define V_ICSPI1_RX_FRAMING_ERROR(x) ((x) << S_ICSPI1_RX_FRAMING_ERROR)
1385#define F_ICSPI1_RX_FRAMING_ERROR V_ICSPI1_RX_FRAMING_ERROR(1U)
1386
1387#define S_ICSPI0_TX_FRAMING_ERROR 13
1388#define V_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_ICSPI0_TX_FRAMING_ERROR)
1389#define F_ICSPI0_TX_FRAMING_ERROR V_ICSPI0_TX_FRAMING_ERROR(1U)
1390
1391#define S_ICSPI1_TX_FRAMING_ERROR 12
1392#define V_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_ICSPI1_TX_FRAMING_ERROR)
1393#define F_ICSPI1_TX_FRAMING_ERROR V_ICSPI1_TX_FRAMING_ERROR(1U)
1394
1395#define S_OESPI0_RX_FRAMING_ERROR 11
1396#define V_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_OESPI0_RX_FRAMING_ERROR)
1397#define F_OESPI0_RX_FRAMING_ERROR V_OESPI0_RX_FRAMING_ERROR(1U)
1398
1399#define S_OESPI1_RX_FRAMING_ERROR 10
1400#define V_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_OESPI1_RX_FRAMING_ERROR)
1401#define F_OESPI1_RX_FRAMING_ERROR V_OESPI1_RX_FRAMING_ERROR(1U)
1402
1403#define S_OESPI0_TX_FRAMING_ERROR 9
1404#define V_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_TX_FRAMING_ERROR)
1405#define F_OESPI0_TX_FRAMING_ERROR V_OESPI0_TX_FRAMING_ERROR(1U)
1406
1407#define S_OESPI1_TX_FRAMING_ERROR 8
1408#define V_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_TX_FRAMING_ERROR)
1409#define F_OESPI1_TX_FRAMING_ERROR V_OESPI1_TX_FRAMING_ERROR(1U)
1410
1411#define S_OESPI0_OFIFO2X_TX_FRAMING_ERROR 7
1412#define V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
1413#define F_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
1414
1415#define S_OESPI1_OFIFO2X_TX_FRAMING_ERROR 6
1416#define V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1417#define F_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
1418
1419#define S_ICSPI_PAR_ERROR 3
1420#define M_ICSPI_PAR_ERROR 0x7
1421
1422#define V_ICSPI_PAR_ERROR(x) ((x) << S_ICSPI_PAR_ERROR)
1423
1424#define S_OESPI_PAR_ERROR 0
1425#define M_OESPI_PAR_ERROR 0x7
1426
1427#define V_OESPI_PAR_ERROR(x) ((x) << S_OESPI_PAR_ERROR)
1428
1429#define A_PM1_TX_INT_CAUSE 0x5fc
1430
1431#define A_MPS_CFG 0x600
1432
1433#define S_TPRXPORTEN 4
1434#define V_TPRXPORTEN(x) ((x) << S_TPRXPORTEN)
1435#define F_TPRXPORTEN V_TPRXPORTEN(1U)
1436
1437#define S_TPTXPORT1EN 3
1438#define V_TPTXPORT1EN(x) ((x) << S_TPTXPORT1EN)
1439#define F_TPTXPORT1EN V_TPTXPORT1EN(1U)
1440
1441#define S_TPTXPORT0EN 2
1442#define V_TPTXPORT0EN(x) ((x) << S_TPTXPORT0EN)
1443#define F_TPTXPORT0EN V_TPTXPORT0EN(1U)
1444
1445#define S_PORT1ACTIVE 1
1446#define V_PORT1ACTIVE(x) ((x) << S_PORT1ACTIVE)
1447#define F_PORT1ACTIVE V_PORT1ACTIVE(1U)
1448
1449#define S_PORT0ACTIVE 0
1450#define V_PORT0ACTIVE(x) ((x) << S_PORT0ACTIVE)
1451#define F_PORT0ACTIVE V_PORT0ACTIVE(1U)
1452
1453#define S_ENFORCEPKT 11
1454#define V_ENFORCEPKT(x) ((x) << S_ENFORCEPKT)
1455#define F_ENFORCEPKT V_ENFORCEPKT(1U)
1456
1457#define A_MPS_INT_ENABLE 0x61c
1458
1459#define S_MCAPARERRENB 6
1460#define M_MCAPARERRENB 0x7
1461
1462#define V_MCAPARERRENB(x) ((x) << S_MCAPARERRENB)
1463
1464#define S_RXTPPARERRENB 4
1465#define M_RXTPPARERRENB 0x3
1466
1467#define V_RXTPPARERRENB(x) ((x) << S_RXTPPARERRENB)
1468
1469#define S_TX1TPPARERRENB 2
1470#define M_TX1TPPARERRENB 0x3
1471
1472#define V_TX1TPPARERRENB(x) ((x) << S_TX1TPPARERRENB)
1473
1474#define S_TX0TPPARERRENB 0
1475#define M_TX0TPPARERRENB 0x3
1476
1477#define V_TX0TPPARERRENB(x) ((x) << S_TX0TPPARERRENB)
1478
1479#define A_MPS_INT_CAUSE 0x620
1480
1481#define S_MCAPARERR 6
1482#define M_MCAPARERR 0x7
1483
1484#define V_MCAPARERR(x) ((x) << S_MCAPARERR)
1485
1486#define S_RXTPPARERR 4
1487#define M_RXTPPARERR 0x3
1488
1489#define V_RXTPPARERR(x) ((x) << S_RXTPPARERR)
1490
1491#define S_TX1TPPARERR 2
1492#define M_TX1TPPARERR 0x3
1493
1494#define V_TX1TPPARERR(x) ((x) << S_TX1TPPARERR)
1495
1496#define S_TX0TPPARERR 0
1497#define M_TX0TPPARERR 0x3
1498
1499#define V_TX0TPPARERR(x) ((x) << S_TX0TPPARERR)
1500
1501#define A_CPL_SWITCH_CNTRL 0x640
1502
1503#define A_CPL_INTR_ENABLE 0x650
1504
1505#define S_CIM_OVFL_ERROR 4
1506#define V_CIM_OVFL_ERROR(x) ((x) << S_CIM_OVFL_ERROR)
1507#define F_CIM_OVFL_ERROR V_CIM_OVFL_ERROR(1U)
1508
1509#define S_TP_FRAMING_ERROR 3
1510#define V_TP_FRAMING_ERROR(x) ((x) << S_TP_FRAMING_ERROR)
1511#define F_TP_FRAMING_ERROR V_TP_FRAMING_ERROR(1U)
1512
1513#define S_SGE_FRAMING_ERROR 2
1514#define V_SGE_FRAMING_ERROR(x) ((x) << S_SGE_FRAMING_ERROR)
1515#define F_SGE_FRAMING_ERROR V_SGE_FRAMING_ERROR(1U)
1516
1517#define S_CIM_FRAMING_ERROR 1
1518#define V_CIM_FRAMING_ERROR(x) ((x) << S_CIM_FRAMING_ERROR)
1519#define F_CIM_FRAMING_ERROR V_CIM_FRAMING_ERROR(1U)
1520
1521#define S_ZERO_SWITCH_ERROR 0
1522#define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
1523#define F_ZERO_SWITCH_ERROR V_ZERO_SWITCH_ERROR(1U)
1524
1525#define A_CPL_INTR_CAUSE 0x654
1526
1527#define A_CPL_MAP_TBL_DATA 0x65c
1528
1529#define A_SMB_GLOBAL_TIME_CFG 0x660
1530
1531#define A_I2C_CFG 0x6a0
1532
1533#define S_I2C_CLKDIV 0
1534#define M_I2C_CLKDIV 0xfff
1535#define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
1536
1537#define A_MI1_CFG 0x6b0
1538
1539#define S_CLKDIV 5
1540#define M_CLKDIV 0xff
1541#define V_CLKDIV(x) ((x) << S_CLKDIV)
1542
1543#define S_ST 3
1544
1545#define M_ST 0x3
1546
1547#define V_ST(x) ((x) << S_ST)
1548
1549#define G_ST(x) (((x) >> S_ST) & M_ST)
1550
1551#define S_PREEN 2
1552#define V_PREEN(x) ((x) << S_PREEN)
1553#define F_PREEN V_PREEN(1U)
1554
1555#define S_MDIINV 1
1556#define V_MDIINV(x) ((x) << S_MDIINV)
1557#define F_MDIINV V_MDIINV(1U)
1558
1559#define S_MDIEN 0
1560#define V_MDIEN(x) ((x) << S_MDIEN)
1561#define F_MDIEN V_MDIEN(1U)
1562
1563#define A_MI1_ADDR 0x6b4
1564
1565#define S_PHYADDR 5
1566#define M_PHYADDR 0x1f
1567#define V_PHYADDR(x) ((x) << S_PHYADDR)
1568
1569#define S_REGADDR 0
1570#define M_REGADDR 0x1f
1571#define V_REGADDR(x) ((x) << S_REGADDR)
1572
1573#define A_MI1_DATA 0x6b8
1574
1575#define A_MI1_OP 0x6bc
1576
1577#define S_MDI_OP 0
1578#define M_MDI_OP 0x3
1579#define V_MDI_OP(x) ((x) << S_MDI_OP)
1580
1581#define A_SF_DATA 0x6d8
1582
1583#define A_SF_OP 0x6dc
1584
1585#define S_BYTECNT 1
1586#define M_BYTECNT 0x3
1587#define V_BYTECNT(x) ((x) << S_BYTECNT)
1588
1589#define A_PL_INT_ENABLE0 0x6e0
1590
1591#define S_T3DBG 23
1592#define V_T3DBG(x) ((x) << S_T3DBG)
1593#define F_T3DBG V_T3DBG(1U)
1594
1595#define S_XGMAC0_1 20
1596#define V_XGMAC0_1(x) ((x) << S_XGMAC0_1)
1597#define F_XGMAC0_1 V_XGMAC0_1(1U)
1598
1599#define S_XGMAC0_0 19
1600#define V_XGMAC0_0(x) ((x) << S_XGMAC0_0)
1601#define F_XGMAC0_0 V_XGMAC0_0(1U)
1602
1603#define S_MC5A 18
1604#define V_MC5A(x) ((x) << S_MC5A)
1605#define F_MC5A V_MC5A(1U)
1606
1607#define S_CPL_SWITCH 12
1608#define V_CPL_SWITCH(x) ((x) << S_CPL_SWITCH)
1609#define F_CPL_SWITCH V_CPL_SWITCH(1U)
1610
1611#define S_MPS0 11
1612#define V_MPS0(x) ((x) << S_MPS0)
1613#define F_MPS0 V_MPS0(1U)
1614
1615#define S_PM1_TX 10
1616#define V_PM1_TX(x) ((x) << S_PM1_TX)
1617#define F_PM1_TX V_PM1_TX(1U)
1618
1619#define S_PM1_RX 9
1620#define V_PM1_RX(x) ((x) << S_PM1_RX)
1621#define F_PM1_RX V_PM1_RX(1U)
1622
1623#define S_ULP2_TX 8
1624#define V_ULP2_TX(x) ((x) << S_ULP2_TX)
1625#define F_ULP2_TX V_ULP2_TX(1U)
1626
1627#define S_ULP2_RX 7
1628#define V_ULP2_RX(x) ((x) << S_ULP2_RX)
1629#define F_ULP2_RX V_ULP2_RX(1U)
1630
1631#define S_TP1 6
1632#define V_TP1(x) ((x) << S_TP1)
1633#define F_TP1 V_TP1(1U)
1634
1635#define S_CIM 5
1636#define V_CIM(x) ((x) << S_CIM)
1637#define F_CIM V_CIM(1U)
1638
1639#define S_MC7_CM 4
1640#define V_MC7_CM(x) ((x) << S_MC7_CM)
1641#define F_MC7_CM V_MC7_CM(1U)
1642
1643#define S_MC7_PMTX 3
1644#define V_MC7_PMTX(x) ((x) << S_MC7_PMTX)
1645#define F_MC7_PMTX V_MC7_PMTX(1U)
1646
1647#define S_MC7_PMRX 2
1648#define V_MC7_PMRX(x) ((x) << S_MC7_PMRX)
1649#define F_MC7_PMRX V_MC7_PMRX(1U)
1650
1651#define S_PCIM0 1
1652#define V_PCIM0(x) ((x) << S_PCIM0)
1653#define F_PCIM0 V_PCIM0(1U)
1654
1655#define S_SGE3 0
1656#define V_SGE3(x) ((x) << S_SGE3)
1657#define F_SGE3 V_SGE3(1U)
1658
1659#define A_PL_INT_CAUSE0 0x6e4
1660
1661#define A_PL_RST 0x6f0
1662
1663#define S_CRSTWRM 1
1664#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
1665#define F_CRSTWRM V_CRSTWRM(1U)
1666
1667#define A_PL_REV 0x6f4
1668
1669#define A_PL_CLI 0x6f8
1670
1671#define A_MC5_DB_CONFIG 0x704
1672
1673#define S_TMTYPEHI 30
1674#define V_TMTYPEHI(x) ((x) << S_TMTYPEHI)
1675#define F_TMTYPEHI V_TMTYPEHI(1U)
1676
1677#define S_TMPARTSIZE 28
1678#define M_TMPARTSIZE 0x3
1679#define V_TMPARTSIZE(x) ((x) << S_TMPARTSIZE)
1680#define G_TMPARTSIZE(x) (((x) >> S_TMPARTSIZE) & M_TMPARTSIZE)
1681
1682#define S_TMTYPE 26
1683#define M_TMTYPE 0x3
1684#define V_TMTYPE(x) ((x) << S_TMTYPE)
1685#define G_TMTYPE(x) (((x) >> S_TMTYPE) & M_TMTYPE)
1686
1687#define S_COMPEN 17
1688#define V_COMPEN(x) ((x) << S_COMPEN)
1689#define F_COMPEN V_COMPEN(1U)
1690
1691#define S_PRTYEN 6
1692#define V_PRTYEN(x) ((x) << S_PRTYEN)
1693#define F_PRTYEN V_PRTYEN(1U)
1694
1695#define S_MBUSEN 5
1696#define V_MBUSEN(x) ((x) << S_MBUSEN)
1697#define F_MBUSEN V_MBUSEN(1U)
1698
1699#define S_DBGIEN 4
1700#define V_DBGIEN(x) ((x) << S_DBGIEN)
1701#define F_DBGIEN V_DBGIEN(1U)
1702
1703#define S_TMRDY 2
1704#define V_TMRDY(x) ((x) << S_TMRDY)
1705#define F_TMRDY V_TMRDY(1U)
1706
1707#define S_TMRST 1
1708#define V_TMRST(x) ((x) << S_TMRST)
1709#define F_TMRST V_TMRST(1U)
1710
1711#define S_TMMODE 0
1712#define V_TMMODE(x) ((x) << S_TMMODE)
1713#define F_TMMODE V_TMMODE(1U)
1714
1715#define F_TMMODE V_TMMODE(1U)
1716
1717#define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
1718
1719#define A_MC5_DB_FILTER_TABLE 0x710
1720
1721#define A_MC5_DB_SERVER_INDEX 0x714
1722
1723#define A_MC5_DB_RSP_LATENCY 0x720
1724
1725#define S_RDLAT 16
1726#define M_RDLAT 0x1f
1727#define V_RDLAT(x) ((x) << S_RDLAT)
1728
1729#define S_LRNLAT 8
1730#define M_LRNLAT 0x1f
1731#define V_LRNLAT(x) ((x) << S_LRNLAT)
1732
1733#define S_SRCHLAT 0
1734#define M_SRCHLAT 0x1f
1735#define V_SRCHLAT(x) ((x) << S_SRCHLAT)
1736
1737#define A_MC5_DB_PART_ID_INDEX 0x72c
1738
1739#define A_MC5_DB_INT_ENABLE 0x740
1740
1741#define S_DELACTEMPTY 18
1742#define V_DELACTEMPTY(x) ((x) << S_DELACTEMPTY)
1743#define F_DELACTEMPTY V_DELACTEMPTY(1U)
1744
1745#define S_DISPQPARERR 17
1746#define V_DISPQPARERR(x) ((x) << S_DISPQPARERR)
1747#define F_DISPQPARERR V_DISPQPARERR(1U)
1748
1749#define S_REQQPARERR 16
1750#define V_REQQPARERR(x) ((x) << S_REQQPARERR)
1751#define F_REQQPARERR V_REQQPARERR(1U)
1752
1753#define S_UNKNOWNCMD 15
1754#define V_UNKNOWNCMD(x) ((x) << S_UNKNOWNCMD)
1755#define F_UNKNOWNCMD V_UNKNOWNCMD(1U)
1756
1757#define S_NFASRCHFAIL 8
1758#define V_NFASRCHFAIL(x) ((x) << S_NFASRCHFAIL)
1759#define F_NFASRCHFAIL V_NFASRCHFAIL(1U)
1760
1761#define S_ACTRGNFULL 7
1762#define V_ACTRGNFULL(x) ((x) << S_ACTRGNFULL)
1763#define F_ACTRGNFULL V_ACTRGNFULL(1U)
1764
1765#define S_PARITYERR 6
1766#define V_PARITYERR(x) ((x) << S_PARITYERR)
1767#define F_PARITYERR V_PARITYERR(1U)
1768
1769#define A_MC5_DB_INT_CAUSE 0x744
1770
1771#define A_MC5_DB_DBGI_CONFIG 0x774
1772
1773#define A_MC5_DB_DBGI_REQ_CMD 0x778
1774
1775#define A_MC5_DB_DBGI_REQ_ADDR0 0x77c
1776
1777#define A_MC5_DB_DBGI_REQ_ADDR1 0x780
1778
1779#define A_MC5_DB_DBGI_REQ_ADDR2 0x784
1780
1781#define A_MC5_DB_DBGI_REQ_DATA0 0x788
1782
1783#define A_MC5_DB_DBGI_REQ_DATA1 0x78c
1784
1785#define A_MC5_DB_DBGI_REQ_DATA2 0x790
1786
1787#define A_MC5_DB_DBGI_RSP_STATUS 0x7b0
1788
1789#define S_DBGIRSPVALID 0
1790#define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
1791#define F_DBGIRSPVALID V_DBGIRSPVALID(1U)
1792
1793#define A_MC5_DB_DBGI_RSP_DATA0 0x7b4
1794
1795#define A_MC5_DB_DBGI_RSP_DATA1 0x7b8
1796
1797#define A_MC5_DB_DBGI_RSP_DATA2 0x7bc
1798
1799#define A_MC5_DB_POPEN_DATA_WR_CMD 0x7cc
1800
1801#define A_MC5_DB_POPEN_MASK_WR_CMD 0x7d0
1802
1803#define A_MC5_DB_AOPEN_SRCH_CMD 0x7d4
1804
1805#define A_MC5_DB_AOPEN_LRN_CMD 0x7d8
1806
1807#define A_MC5_DB_SYN_SRCH_CMD 0x7dc
1808
1809#define A_MC5_DB_SYN_LRN_CMD 0x7e0
1810
1811#define A_MC5_DB_ACK_SRCH_CMD 0x7e4
1812
1813#define A_MC5_DB_ACK_LRN_CMD 0x7e8
1814
1815#define A_MC5_DB_ILOOKUP_CMD 0x7ec
1816
1817#define A_MC5_DB_ELOOKUP_CMD 0x7f0
1818
1819#define A_MC5_DB_DATA_WRITE_CMD 0x7f4
1820
1821#define A_MC5_DB_DATA_READ_CMD 0x7f8
1822
1823#define XGMAC0_0_BASE_ADDR 0x800
1824
1825#define A_XGM_TX_CTRL 0x800
1826
1827#define S_TXEN 0
1828#define V_TXEN(x) ((x) << S_TXEN)
1829#define F_TXEN V_TXEN(1U)
1830
1831#define A_XGM_TX_CFG 0x804
1832
1833#define S_TXPAUSEEN 0
1834#define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN)
1835#define F_TXPAUSEEN V_TXPAUSEEN(1U)
1836
1837#define A_XGM_RX_CTRL 0x80c
1838
1839#define S_RXEN 0
1840#define V_RXEN(x) ((x) << S_RXEN)
1841#define F_RXEN V_RXEN(1U)
1842
1843#define A_XGM_RX_CFG 0x810
1844
1845#define S_DISPAUSEFRAMES 9
1846#define V_DISPAUSEFRAMES(x) ((x) << S_DISPAUSEFRAMES)
1847#define F_DISPAUSEFRAMES V_DISPAUSEFRAMES(1U)
1848
1849#define S_EN1536BFRAMES 8
1850#define V_EN1536BFRAMES(x) ((x) << S_EN1536BFRAMES)
1851#define F_EN1536BFRAMES V_EN1536BFRAMES(1U)
1852
1853#define S_ENJUMBO 7
1854#define V_ENJUMBO(x) ((x) << S_ENJUMBO)
1855#define F_ENJUMBO V_ENJUMBO(1U)
1856
1857#define S_RMFCS 6
1858#define V_RMFCS(x) ((x) << S_RMFCS)
1859#define F_RMFCS V_RMFCS(1U)
1860
1861#define S_ENHASHMCAST 2
1862#define V_ENHASHMCAST(x) ((x) << S_ENHASHMCAST)
1863#define F_ENHASHMCAST V_ENHASHMCAST(1U)
1864
1865#define S_COPYALLFRAMES 0
1866#define V_COPYALLFRAMES(x) ((x) << S_COPYALLFRAMES)
1867#define F_COPYALLFRAMES V_COPYALLFRAMES(1U)
1868
1869#define A_XGM_RX_HASH_LOW 0x814
1870
1871#define A_XGM_RX_HASH_HIGH 0x818
1872
1873#define A_XGM_RX_EXACT_MATCH_LOW_1 0x81c
1874
1875#define A_XGM_RX_EXACT_MATCH_HIGH_1 0x820
1876
1877#define A_XGM_RX_EXACT_MATCH_LOW_2 0x824
1878
1879#define A_XGM_RX_EXACT_MATCH_LOW_3 0x82c
1880
1881#define A_XGM_RX_EXACT_MATCH_LOW_4 0x834
1882
1883#define A_XGM_RX_EXACT_MATCH_LOW_5 0x83c
1884
1885#define A_XGM_RX_EXACT_MATCH_LOW_6 0x844
1886
1887#define A_XGM_RX_EXACT_MATCH_LOW_7 0x84c
1888
1889#define A_XGM_RX_EXACT_MATCH_LOW_8 0x854
1890
1891#define A_XGM_STAT_CTRL 0x880
1892
1893#define S_CLRSTATS 2
1894#define V_CLRSTATS(x) ((x) << S_CLRSTATS)
1895#define F_CLRSTATS V_CLRSTATS(1U)
1896
1897#define A_XGM_RXFIFO_CFG 0x884
1898
1899#define S_RXFIFOPAUSEHWM 17
1900#define M_RXFIFOPAUSEHWM 0xfff
1901
1902#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
1903
1904#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
1905
1906#define S_RXFIFOPAUSELWM 5
1907#define M_RXFIFOPAUSELWM 0xfff
1908
1909#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
1910
1911#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
1912
1913#define S_RXSTRFRWRD 1
1914#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
1915#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
1916
1917#define S_DISERRFRAMES 0
1918#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
1919#define F_DISERRFRAMES V_DISERRFRAMES(1U)
1920
1921#define A_XGM_TXFIFO_CFG 0x888
1922
1923#define S_TXFIFOTHRESH 4
1924#define M_TXFIFOTHRESH 0x1ff
1925
1926#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
1927
1928#define A_XGM_SERDES_CTRL 0x890
1929#define A_XGM_SERDES_CTRL0 0x8e0
1930
1931#define S_SERDESRESET_ 24
1932#define V_SERDESRESET_(x) ((x) << S_SERDESRESET_)
1933#define F_SERDESRESET_ V_SERDESRESET_(1U)
1934
1935#define S_RXENABLE 4
1936#define V_RXENABLE(x) ((x) << S_RXENABLE)
1937#define F_RXENABLE V_RXENABLE(1U)
1938
1939#define S_TXENABLE 3
1940#define V_TXENABLE(x) ((x) << S_TXENABLE)
1941#define F_TXENABLE V_TXENABLE(1U)
1942
1943#define A_XGM_PAUSE_TIMER 0x890
1944
1945#define A_XGM_RGMII_IMP 0x89c
1946
1947#define S_XGM_IMPSETUPDATE 6
1948#define V_XGM_IMPSETUPDATE(x) ((x) << S_XGM_IMPSETUPDATE)
1949#define F_XGM_IMPSETUPDATE V_XGM_IMPSETUPDATE(1U)
1950
1951#define S_RGMIIIMPPD 3
1952#define M_RGMIIIMPPD 0x7
1953#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
1954
1955#define S_RGMIIIMPPU 0
1956#define M_RGMIIIMPPU 0x7
1957#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
1958
1959#define S_CALRESET 8
1960#define V_CALRESET(x) ((x) << S_CALRESET)
1961#define F_CALRESET V_CALRESET(1U)
1962
1963#define S_CALUPDATE 7
1964#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
1965#define F_CALUPDATE V_CALUPDATE(1U)
1966
1967#define A_XGM_XAUI_IMP 0x8a0
1968
1969#define S_CALBUSY 31
1970#define V_CALBUSY(x) ((x) << S_CALBUSY)
1971#define F_CALBUSY V_CALBUSY(1U)
1972
1973#define S_XGM_CALFAULT 29
1974#define V_XGM_CALFAULT(x) ((x) << S_XGM_CALFAULT)
1975#define F_XGM_CALFAULT V_XGM_CALFAULT(1U)
1976
1977#define S_CALIMP 24
1978#define M_CALIMP 0x1f
1979#define V_CALIMP(x) ((x) << S_CALIMP)
1980#define G_CALIMP(x) (((x) >> S_CALIMP) & M_CALIMP)
1981
1982#define S_XAUIIMP 0
1983#define M_XAUIIMP 0x7
1984#define V_XAUIIMP(x) ((x) << S_XAUIIMP)
1985
1986#define A_XGM_RX_MAX_PKT_SIZE 0x8a8
1987#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
1988
1989#define A_XGM_RESET_CTRL 0x8ac
1990
1991#define S_XG2G_RESET_ 3
1992#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
1993#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
1994
1995#define S_RGMII_RESET_ 2
1996#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
1997#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
1998
1999#define S_PCS_RESET_ 1
2000#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
2001#define F_PCS_RESET_ V_PCS_RESET_(1U)
2002
2003#define S_MAC_RESET_ 0
2004#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
2005#define F_MAC_RESET_ V_MAC_RESET_(1U)
2006
2007#define A_XGM_PORT_CFG 0x8b8
2008
2009#define S_CLKDIVRESET_ 3
2010#define V_CLKDIVRESET_(x) ((x) << S_CLKDIVRESET_)
2011#define F_CLKDIVRESET_ V_CLKDIVRESET_(1U)
2012
2013#define S_PORTSPEED 1
2014#define M_PORTSPEED 0x3
2015
2016#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
2017
2018#define S_ENRGMII 0
2019#define V_ENRGMII(x) ((x) << S_ENRGMII)
2020#define F_ENRGMII V_ENRGMII(1U)
2021
2022#define A_XGM_INT_ENABLE 0x8d4
2023
2024#define S_TXFIFO_PRTY_ERR 17
2025#define M_TXFIFO_PRTY_ERR 0x7
2026
2027#define V_TXFIFO_PRTY_ERR(x) ((x) << S_TXFIFO_PRTY_ERR)
2028
2029#define S_RXFIFO_PRTY_ERR 14
2030#define M_RXFIFO_PRTY_ERR 0x7
2031
2032#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
2033
2034#define S_TXFIFO_UNDERRUN 13
2035#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
2036#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
2037
2038#define S_RXFIFO_OVERFLOW 12
2039#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
2040#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
2041
2042#define S_SERDES_LOS 4
2043#define M_SERDES_LOS 0xf
2044
2045#define V_SERDES_LOS(x) ((x) << S_SERDES_LOS)
2046
2047#define S_XAUIPCSCTCERR 3
2048#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
2049#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
2050
2051#define S_XAUIPCSALIGNCHANGE 2
2052#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
2053#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
2054
2055#define A_XGM_INT_CAUSE 0x8d8
2056
2057#define A_XGM_XAUI_ACT_CTRL 0x8dc
2058
2059#define S_TXACTENABLE 1
2060#define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
2061#define F_TXACTENABLE V_TXACTENABLE(1U)
2062
2063#define A_XGM_SERDES_CTRL0 0x8e0
2064
2065#define S_RESET3 23
2066#define V_RESET3(x) ((x) << S_RESET3)
2067#define F_RESET3 V_RESET3(1U)
2068
2069#define S_RESET2 22
2070#define V_RESET2(x) ((x) << S_RESET2)
2071#define F_RESET2 V_RESET2(1U)
2072
2073#define S_RESET1 21
2074#define V_RESET1(x) ((x) << S_RESET1)
2075#define F_RESET1 V_RESET1(1U)
2076
2077#define S_RESET0 20
2078#define V_RESET0(x) ((x) << S_RESET0)
2079#define F_RESET0 V_RESET0(1U)
2080
2081#define S_PWRDN3 19
2082#define V_PWRDN3(x) ((x) << S_PWRDN3)
2083#define F_PWRDN3 V_PWRDN3(1U)
2084
2085#define S_PWRDN2 18
2086#define V_PWRDN2(x) ((x) << S_PWRDN2)
2087#define F_PWRDN2 V_PWRDN2(1U)
2088
2089#define S_PWRDN1 17
2090#define V_PWRDN1(x) ((x) << S_PWRDN1)
2091#define F_PWRDN1 V_PWRDN1(1U)
2092
2093#define S_PWRDN0 16
2094#define V_PWRDN0(x) ((x) << S_PWRDN0)
2095#define F_PWRDN0 V_PWRDN0(1U)
2096
2097#define S_RESETPLL23 15
2098#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
2099#define F_RESETPLL23 V_RESETPLL23(1U)
2100
2101#define S_RESETPLL01 14
2102#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
2103#define F_RESETPLL01 V_RESETPLL01(1U)
2104
2105#define A_XGM_SERDES_STAT0 0x8f0
2106
2107#define S_LOWSIG0 0
2108#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
2109#define F_LOWSIG0 V_LOWSIG0(1U)
2110
2111#define A_XGM_SERDES_STAT3 0x8fc
2112
2113#define A_XGM_STAT_TX_BYTE_LOW 0x900
2114
2115#define A_XGM_STAT_TX_BYTE_HIGH 0x904
2116
2117#define A_XGM_STAT_TX_FRAME_LOW 0x908
2118
2119#define A_XGM_STAT_TX_FRAME_HIGH 0x90c
2120
2121#define A_XGM_STAT_TX_BCAST 0x910
2122
2123#define A_XGM_STAT_TX_MCAST 0x914
2124
2125#define A_XGM_STAT_TX_PAUSE 0x918
2126
2127#define A_XGM_STAT_TX_64B_FRAMES 0x91c
2128
2129#define A_XGM_STAT_TX_65_127B_FRAMES 0x920
2130
2131#define A_XGM_STAT_TX_128_255B_FRAMES 0x924
2132
2133#define A_XGM_STAT_TX_256_511B_FRAMES 0x928
2134
2135#define A_XGM_STAT_TX_512_1023B_FRAMES 0x92c
2136
2137#define A_XGM_STAT_TX_1024_1518B_FRAMES 0x930
2138
2139#define A_XGM_STAT_TX_1519_MAXB_FRAMES 0x934
2140
2141#define A_XGM_STAT_TX_ERR_FRAMES 0x938
2142
2143#define A_XGM_STAT_RX_BYTES_LOW 0x93c
2144
2145#define A_XGM_STAT_RX_BYTES_HIGH 0x940
2146
2147#define A_XGM_STAT_RX_FRAMES_LOW 0x944
2148
2149#define A_XGM_STAT_RX_FRAMES_HIGH 0x948
2150
2151#define A_XGM_STAT_RX_BCAST_FRAMES 0x94c
2152
2153#define A_XGM_STAT_RX_MCAST_FRAMES 0x950
2154
2155#define A_XGM_STAT_RX_PAUSE_FRAMES 0x954
2156
2157#define A_XGM_STAT_RX_64B_FRAMES 0x958
2158
2159#define A_XGM_STAT_RX_65_127B_FRAMES 0x95c
2160
2161#define A_XGM_STAT_RX_128_255B_FRAMES 0x960
2162
2163#define A_XGM_STAT_RX_256_511B_FRAMES 0x964
2164
2165#define A_XGM_STAT_RX_512_1023B_FRAMES 0x968
2166
2167#define A_XGM_STAT_RX_1024_1518B_FRAMES 0x96c
2168
2169#define A_XGM_STAT_RX_1519_MAXB_FRAMES 0x970
2170
2171#define A_XGM_STAT_RX_SHORT_FRAMES 0x974
2172
2173#define A_XGM_STAT_RX_OVERSIZE_FRAMES 0x978
2174
2175#define A_XGM_STAT_RX_JABBER_FRAMES 0x97c
2176
2177#define A_XGM_STAT_RX_CRC_ERR_FRAMES 0x980
2178
2179#define A_XGM_STAT_RX_LENGTH_ERR_FRAMES 0x984
2180
2181#define A_XGM_STAT_RX_SYM_CODE_ERR_FRAMES 0x988
2182
2183#define A_XGM_SERDES_STATUS0 0x98c
2184
2185#define A_XGM_SERDES_STATUS1 0x990
2186
2187#define S_CMULOCK 31
2188#define V_CMULOCK(x) ((x) << S_CMULOCK)
2189#define F_CMULOCK V_CMULOCK(1U)
2190
2191#define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4
2192
2193#define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac
2194
2195#define XGMAC0_1_BASE_ADDR 0xa00
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
new file mode 100644
index 000000000000..3f2cf8a07c61
--- /dev/null
+++ b/drivers/net/cxgb3/sge.c
@@ -0,0 +1,2681 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include "common.h"
40#include "regs.h"
41#include "sge_defs.h"
42#include "t3_cpl.h"
43#include "firmware_exports.h"
44
45#define USE_GTS 0
46
47#define SGE_RX_SM_BUF_SIZE 1536
48#define SGE_RX_COPY_THRES 256
49
50# define SGE_RX_DROP_THRES 16
51
52/*
53 * Period of the Tx buffer reclaim timer. This timer does not need to run
54 * frequently as Tx buffers are usually reclaimed by new Tx packets.
55 */
56#define TX_RECLAIM_PERIOD (HZ / 4)
57
58/* WR size in bytes */
59#define WR_LEN (WR_FLITS * 8)
60
61/*
62 * Types of Tx queues in each queue set. Order here matters, do not change.
63 */
64enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
65
66/* Values for sge_txq.flags */
67enum {
68 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
69 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
70};
71
72struct tx_desc {
73 u64 flit[TX_DESC_FLITS];
74};
75
76struct rx_desc {
77 __be32 addr_lo;
78 __be32 len_gen;
79 __be32 gen2;
80 __be32 addr_hi;
81};
82
83struct tx_sw_desc { /* SW state per Tx descriptor */
84 struct sk_buff *skb;
85};
86
87struct rx_sw_desc { /* SW state per Rx descriptor */
88 struct sk_buff *skb;
89 DECLARE_PCI_UNMAP_ADDR(dma_addr);
90};
91
92struct rsp_desc { /* response queue descriptor */
93 struct rss_header rss_hdr;
94 __be32 flags;
95 __be32 len_cq;
96 u8 imm_data[47];
97 u8 intr_gen;
98};
99
100struct unmap_info { /* packet unmapping info, overlays skb->cb */
101 int sflit; /* start flit of first SGL entry in Tx descriptor */
102 u16 fragidx; /* first page fragment in current Tx descriptor */
103 u16 addr_idx; /* buffer index of first SGL entry in descriptor */
104 u32 len; /* mapped length of skb main body */
105};
106
107/*
108 * Maps a number of flits to the number of Tx descriptors that can hold them.
109 * The formula is
110 *
111 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
112 *
113 * HW allows up to 4 descriptors to be combined into a WR.
114 */
115static u8 flit_desc_map[] = {
116 0,
117#if SGE_NUM_GENBITS == 1
118 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
120 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
121 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
122#elif SGE_NUM_GENBITS == 2
123 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
124 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
125 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
126 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
127#else
128# error "SGE_NUM_GENBITS must be 1 or 2"
129#endif
130};
131
132static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
133{
134 return container_of(q, struct sge_qset, fl[qidx]);
135}
136
137static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
138{
139 return container_of(q, struct sge_qset, rspq);
140}
141
142static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
143{
144 return container_of(q, struct sge_qset, txq[qidx]);
145}
146
147/**
148 * refill_rspq - replenish an SGE response queue
149 * @adapter: the adapter
150 * @q: the response queue to replenish
151 * @credits: how many new responses to make available
152 *
153 * Replenishes a response queue by making the supplied number of responses
154 * available to HW.
155 */
156static inline void refill_rspq(struct adapter *adapter,
157 const struct sge_rspq *q, unsigned int credits)
158{
159 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
160 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
161}
162
163/**
164 * need_skb_unmap - does the platform need unmapping of sk_buffs?
165 *
166 * Returns true if the platfrom needs sk_buff unmapping. The compiler
167 * optimizes away unecessary code if this returns true.
168 */
169static inline int need_skb_unmap(void)
170{
171 /*
172 * This structure is used to tell if the platfrom needs buffer
173 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
174 */
175 struct dummy {
176 DECLARE_PCI_UNMAP_ADDR(addr);
177 };
178
179 return sizeof(struct dummy) != 0;
180}
181
182/**
183 * unmap_skb - unmap a packet main body and its page fragments
184 * @skb: the packet
185 * @q: the Tx queue containing Tx descriptors for the packet
186 * @cidx: index of Tx descriptor
187 * @pdev: the PCI device
188 *
189 * Unmap the main body of an sk_buff and its page fragments, if any.
190 * Because of the fairly complicated structure of our SGLs and the desire
191 * to conserve space for metadata, we keep the information necessary to
192 * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly
193 * in the Tx descriptors (the physical addresses of the various data
194 * buffers). The send functions initialize the state in skb->cb so we
195 * can unmap the buffers held in the first Tx descriptor here, and we
196 * have enough information at this point to update the state for the next
197 * Tx descriptor.
198 */
199static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
200 unsigned int cidx, struct pci_dev *pdev)
201{
202 const struct sg_ent *sgp;
203 struct unmap_info *ui = (struct unmap_info *)skb->cb;
204 int nfrags, frag_idx, curflit, j = ui->addr_idx;
205
206 sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit];
207
208 if (ui->len) {
209 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len,
210 PCI_DMA_TODEVICE);
211 ui->len = 0; /* so we know for next descriptor for this skb */
212 j = 1;
213 }
214
215 frag_idx = ui->fragidx;
216 curflit = ui->sflit + 1 + j;
217 nfrags = skb_shinfo(skb)->nr_frags;
218
219 while (frag_idx < nfrags && curflit < WR_FLITS) {
220 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
221 skb_shinfo(skb)->frags[frag_idx].size,
222 PCI_DMA_TODEVICE);
223 j ^= 1;
224 if (j == 0) {
225 sgp++;
226 curflit++;
227 }
228 curflit++;
229 frag_idx++;
230 }
231
232 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
233 ui->fragidx = frag_idx;
234 ui->addr_idx = j;
235 ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
236 }
237}
238
239/**
240 * free_tx_desc - reclaims Tx descriptors and their buffers
241 * @adapter: the adapter
242 * @q: the Tx queue to reclaim descriptors from
243 * @n: the number of descriptors to reclaim
244 *
245 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
246 * Tx buffers. Called with the Tx queue lock held.
247 */
248static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
249 unsigned int n)
250{
251 struct tx_sw_desc *d;
252 struct pci_dev *pdev = adapter->pdev;
253 unsigned int cidx = q->cidx;
254
255 d = &q->sdesc[cidx];
256 while (n--) {
257 if (d->skb) { /* an SGL is present */
258 if (need_skb_unmap())
259 unmap_skb(d->skb, q, cidx, pdev);
260 if (d->skb->priority == cidx)
261 kfree_skb(d->skb);
262 }
263 ++d;
264 if (++cidx == q->size) {
265 cidx = 0;
266 d = q->sdesc;
267 }
268 }
269 q->cidx = cidx;
270}
271
272/**
273 * reclaim_completed_tx - reclaims completed Tx descriptors
274 * @adapter: the adapter
275 * @q: the Tx queue to reclaim completed descriptors from
276 *
277 * Reclaims Tx descriptors that the SGE has indicated it has processed,
278 * and frees the associated buffers if possible. Called with the Tx
279 * queue's lock held.
280 */
281static inline void reclaim_completed_tx(struct adapter *adapter,
282 struct sge_txq *q)
283{
284 unsigned int reclaim = q->processed - q->cleaned;
285
286 if (reclaim) {
287 free_tx_desc(adapter, q, reclaim);
288 q->cleaned += reclaim;
289 q->in_use -= reclaim;
290 }
291}
292
293/**
294 * should_restart_tx - are there enough resources to restart a Tx queue?
295 * @q: the Tx queue
296 *
297 * Checks if there are enough descriptors to restart a suspended Tx queue.
298 */
299static inline int should_restart_tx(const struct sge_txq *q)
300{
301 unsigned int r = q->processed - q->cleaned;
302
303 return q->in_use - r < (q->size >> 1);
304}
305
306/**
307 * free_rx_bufs - free the Rx buffers on an SGE free list
308 * @pdev: the PCI device associated with the adapter
309 * @rxq: the SGE free list to clean up
310 *
311 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
312 * this queue should be stopped before calling this function.
313 */
314static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
315{
316 unsigned int cidx = q->cidx;
317
318 while (q->credits--) {
319 struct rx_sw_desc *d = &q->sdesc[cidx];
320
321 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
322 q->buf_size, PCI_DMA_FROMDEVICE);
323 kfree_skb(d->skb);
324 d->skb = NULL;
325 if (++cidx == q->size)
326 cidx = 0;
327 }
328}
329
330/**
331 * add_one_rx_buf - add a packet buffer to a free-buffer list
332 * @skb: the buffer to add
333 * @len: the buffer length
334 * @d: the HW Rx descriptor to write
335 * @sd: the SW Rx descriptor to write
336 * @gen: the generation bit value
337 * @pdev: the PCI device associated with the adapter
338 *
339 * Add a buffer of the given length to the supplied HW and SW Rx
340 * descriptors.
341 */
342static inline void add_one_rx_buf(struct sk_buff *skb, unsigned int len,
343 struct rx_desc *d, struct rx_sw_desc *sd,
344 unsigned int gen, struct pci_dev *pdev)
345{
346 dma_addr_t mapping;
347
348 sd->skb = skb;
349 mapping = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
350 pci_unmap_addr_set(sd, dma_addr, mapping);
351
352 d->addr_lo = cpu_to_be32(mapping);
353 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
354 wmb();
355 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
356 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
357}
358
359/**
360 * refill_fl - refill an SGE free-buffer list
361 * @adapter: the adapter
362 * @q: the free-list to refill
363 * @n: the number of new buffers to allocate
364 * @gfp: the gfp flags for allocating new buffers
365 *
366 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
367 * allocated with the supplied gfp flags. The caller must assure that
368 * @n does not exceed the queue's capacity.
369 */
370static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
371{
372 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
373 struct rx_desc *d = &q->desc[q->pidx];
374
375 while (n--) {
376 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
377
378 if (!skb)
379 break;
380
381 add_one_rx_buf(skb, q->buf_size, d, sd, q->gen, adap->pdev);
382 d++;
383 sd++;
384 if (++q->pidx == q->size) {
385 q->pidx = 0;
386 q->gen ^= 1;
387 sd = q->sdesc;
388 d = q->desc;
389 }
390 q->credits++;
391 }
392
393 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
394}
395
396static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
397{
398 refill_fl(adap, fl, min(16U, fl->size - fl->credits), GFP_ATOMIC);
399}
400
401/**
402 * recycle_rx_buf - recycle a receive buffer
403 * @adapter: the adapter
404 * @q: the SGE free list
405 * @idx: index of buffer to recycle
406 *
407 * Recycles the specified buffer on the given free list by adding it at
408 * the next available slot on the list.
409 */
410static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
411 unsigned int idx)
412{
413 struct rx_desc *from = &q->desc[idx];
414 struct rx_desc *to = &q->desc[q->pidx];
415
416 q->sdesc[q->pidx] = q->sdesc[idx];
417 to->addr_lo = from->addr_lo; /* already big endian */
418 to->addr_hi = from->addr_hi; /* likewise */
419 wmb();
420 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
421 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
422 q->credits++;
423
424 if (++q->pidx == q->size) {
425 q->pidx = 0;
426 q->gen ^= 1;
427 }
428 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
429}
430
431/**
432 * alloc_ring - allocate resources for an SGE descriptor ring
433 * @pdev: the PCI device
434 * @nelem: the number of descriptors
435 * @elem_size: the size of each descriptor
436 * @sw_size: the size of the SW state associated with each ring element
437 * @phys: the physical address of the allocated ring
438 * @metadata: address of the array holding the SW state for the ring
439 *
440 * Allocates resources for an SGE descriptor ring, such as Tx queues,
441 * free buffer lists, or response queues. Each SGE ring requires
442 * space for its HW descriptors plus, optionally, space for the SW state
443 * associated with each HW entry (the metadata). The function returns
444 * three values: the virtual address for the HW ring (the return value
445 * of the function), the physical address of the HW ring, and the address
446 * of the SW ring.
447 */
448static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
449 size_t sw_size, dma_addr_t *phys, void *metadata)
450{
451 size_t len = nelem * elem_size;
452 void *s = NULL;
453 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
454
455 if (!p)
456 return NULL;
457 if (sw_size) {
458 s = kcalloc(nelem, sw_size, GFP_KERNEL);
459
460 if (!s) {
461 dma_free_coherent(&pdev->dev, len, p, *phys);
462 return NULL;
463 }
464 }
465 if (metadata)
466 *(void **)metadata = s;
467 memset(p, 0, len);
468 return p;
469}
470
471/**
472 * free_qset - free the resources of an SGE queue set
473 * @adapter: the adapter owning the queue set
474 * @q: the queue set
475 *
476 * Release the HW and SW resources associated with an SGE queue set, such
477 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
478 * queue set must be quiesced prior to calling this.
479 */
480void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
481{
482 int i;
483 struct pci_dev *pdev = adapter->pdev;
484
485 if (q->tx_reclaim_timer.function)
486 del_timer_sync(&q->tx_reclaim_timer);
487
488 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
489 if (q->fl[i].desc) {
490 spin_lock(&adapter->sge.reg_lock);
491 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
492 spin_unlock(&adapter->sge.reg_lock);
493 free_rx_bufs(pdev, &q->fl[i]);
494 kfree(q->fl[i].sdesc);
495 dma_free_coherent(&pdev->dev,
496 q->fl[i].size *
497 sizeof(struct rx_desc), q->fl[i].desc,
498 q->fl[i].phys_addr);
499 }
500
501 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
502 if (q->txq[i].desc) {
503 spin_lock(&adapter->sge.reg_lock);
504 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
505 spin_unlock(&adapter->sge.reg_lock);
506 if (q->txq[i].sdesc) {
507 free_tx_desc(adapter, &q->txq[i],
508 q->txq[i].in_use);
509 kfree(q->txq[i].sdesc);
510 }
511 dma_free_coherent(&pdev->dev,
512 q->txq[i].size *
513 sizeof(struct tx_desc),
514 q->txq[i].desc, q->txq[i].phys_addr);
515 __skb_queue_purge(&q->txq[i].sendq);
516 }
517
518 if (q->rspq.desc) {
519 spin_lock(&adapter->sge.reg_lock);
520 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
521 spin_unlock(&adapter->sge.reg_lock);
522 dma_free_coherent(&pdev->dev,
523 q->rspq.size * sizeof(struct rsp_desc),
524 q->rspq.desc, q->rspq.phys_addr);
525 }
526
527 if (q->netdev)
528 q->netdev->atalk_ptr = NULL;
529
530 memset(q, 0, sizeof(*q));
531}
532
533/**
534 * init_qset_cntxt - initialize an SGE queue set context info
535 * @qs: the queue set
536 * @id: the queue set id
537 *
538 * Initializes the TIDs and context ids for the queues of a queue set.
539 */
540static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
541{
542 qs->rspq.cntxt_id = id;
543 qs->fl[0].cntxt_id = 2 * id;
544 qs->fl[1].cntxt_id = 2 * id + 1;
545 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
546 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
547 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
548 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
549 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
550}
551
552/**
553 * sgl_len - calculates the size of an SGL of the given capacity
554 * @n: the number of SGL entries
555 *
556 * Calculates the number of flits needed for a scatter/gather list that
557 * can hold the given number of entries.
558 */
559static inline unsigned int sgl_len(unsigned int n)
560{
561 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
562 return (3 * n) / 2 + (n & 1);
563}
564
565/**
566 * flits_to_desc - returns the num of Tx descriptors for the given flits
567 * @n: the number of flits
568 *
569 * Calculates the number of Tx descriptors needed for the supplied number
570 * of flits.
571 */
572static inline unsigned int flits_to_desc(unsigned int n)
573{
574 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
575 return flit_desc_map[n];
576}
577
578/**
579 * get_packet - return the next ingress packet buffer from a free list
580 * @adap: the adapter that received the packet
581 * @fl: the SGE free list holding the packet
582 * @len: the packet length including any SGE padding
583 * @drop_thres: # of remaining buffers before we start dropping packets
584 *
585 * Get the next packet from a free list and complete setup of the
586 * sk_buff. If the packet is small we make a copy and recycle the
587 * original buffer, otherwise we use the original buffer itself. If a
588 * positive drop threshold is supplied packets are dropped and their
589 * buffers recycled if (a) the number of remaining buffers is under the
590 * threshold and the packet is too big to copy, or (b) the packet should
591 * be copied but there is no memory for the copy.
592 */
593static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
594 unsigned int len, unsigned int drop_thres)
595{
596 struct sk_buff *skb = NULL;
597 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
598
599 prefetch(sd->skb->data);
600
601 if (len <= SGE_RX_COPY_THRES) {
602 skb = alloc_skb(len, GFP_ATOMIC);
603 if (likely(skb != NULL)) {
604 __skb_put(skb, len);
605 pci_dma_sync_single_for_cpu(adap->pdev,
606 pci_unmap_addr(sd,
607 dma_addr),
608 len, PCI_DMA_FROMDEVICE);
609 memcpy(skb->data, sd->skb->data, len);
610 pci_dma_sync_single_for_device(adap->pdev,
611 pci_unmap_addr(sd,
612 dma_addr),
613 len, PCI_DMA_FROMDEVICE);
614 } else if (!drop_thres)
615 goto use_orig_buf;
616 recycle:
617 recycle_rx_buf(adap, fl, fl->cidx);
618 return skb;
619 }
620
621 if (unlikely(fl->credits < drop_thres))
622 goto recycle;
623
624 use_orig_buf:
625 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
626 fl->buf_size, PCI_DMA_FROMDEVICE);
627 skb = sd->skb;
628 skb_put(skb, len);
629 __refill_fl(adap, fl);
630 return skb;
631}
632
633/**
634 * get_imm_packet - return the next ingress packet buffer from a response
635 * @resp: the response descriptor containing the packet data
636 *
637 * Return a packet containing the immediate data of the given response.
638 */
639static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
640{
641 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
642
643 if (skb) {
644 __skb_put(skb, IMMED_PKT_SIZE);
645 memcpy(skb->data, resp->imm_data, IMMED_PKT_SIZE);
646 }
647 return skb;
648}
649
650/**
651 * calc_tx_descs - calculate the number of Tx descriptors for a packet
652 * @skb: the packet
653 *
654 * Returns the number of Tx descriptors needed for the given Ethernet
655 * packet. Ethernet packets require addition of WR and CPL headers.
656 */
657static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
658{
659 unsigned int flits;
660
661 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
662 return 1;
663
664 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
665 if (skb_shinfo(skb)->gso_size)
666 flits++;
667 return flits_to_desc(flits);
668}
669
670/**
671 * make_sgl - populate a scatter/gather list for a packet
672 * @skb: the packet
673 * @sgp: the SGL to populate
674 * @start: start address of skb main body data to include in the SGL
675 * @len: length of skb main body data to include in the SGL
676 * @pdev: the PCI device
677 *
678 * Generates a scatter/gather list for the buffers that make up a packet
679 * and returns the SGL size in 8-byte words. The caller must size the SGL
680 * appropriately.
681 */
682static inline unsigned int make_sgl(const struct sk_buff *skb,
683 struct sg_ent *sgp, unsigned char *start,
684 unsigned int len, struct pci_dev *pdev)
685{
686 dma_addr_t mapping;
687 unsigned int i, j = 0, nfrags;
688
689 if (len) {
690 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
691 sgp->len[0] = cpu_to_be32(len);
692 sgp->addr[0] = cpu_to_be64(mapping);
693 j = 1;
694 }
695
696 nfrags = skb_shinfo(skb)->nr_frags;
697 for (i = 0; i < nfrags; i++) {
698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
699
700 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
701 frag->size, PCI_DMA_TODEVICE);
702 sgp->len[j] = cpu_to_be32(frag->size);
703 sgp->addr[j] = cpu_to_be64(mapping);
704 j ^= 1;
705 if (j == 0)
706 ++sgp;
707 }
708 if (j)
709 sgp->len[j] = 0;
710 return ((nfrags + (len != 0)) * 3) / 2 + j;
711}
712
713/**
714 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
715 * @adap: the adapter
716 * @q: the Tx queue
717 *
718 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
719 * where the HW is going to sleep just after we checked, however,
720 * then the interrupt handler will detect the outstanding TX packet
721 * and ring the doorbell for us.
722 *
723 * When GTS is disabled we unconditionally ring the doorbell.
724 */
725static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
726{
727#if USE_GTS
728 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
729 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
730 set_bit(TXQ_LAST_PKT_DB, &q->flags);
731 t3_write_reg(adap, A_SG_KDOORBELL,
732 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
733 }
734#else
735 wmb(); /* write descriptors before telling HW */
736 t3_write_reg(adap, A_SG_KDOORBELL,
737 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
738#endif
739}
740
741static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
742{
743#if SGE_NUM_GENBITS == 2
744 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
745#endif
746}
747
748/**
749 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
750 * @ndesc: number of Tx descriptors spanned by the SGL
751 * @skb: the packet corresponding to the WR
752 * @d: first Tx descriptor to be written
753 * @pidx: index of above descriptors
754 * @q: the SGE Tx queue
755 * @sgl: the SGL
756 * @flits: number of flits to the start of the SGL in the first descriptor
757 * @sgl_flits: the SGL size in flits
758 * @gen: the Tx descriptor generation
759 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
760 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
761 *
762 * Write a work request header and an associated SGL. If the SGL is
763 * small enough to fit into one Tx descriptor it has already been written
764 * and we just need to write the WR header. Otherwise we distribute the
765 * SGL across the number of descriptors it spans.
766 */
767static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
768 struct tx_desc *d, unsigned int pidx,
769 const struct sge_txq *q,
770 const struct sg_ent *sgl,
771 unsigned int flits, unsigned int sgl_flits,
772 unsigned int gen, unsigned int wr_hi,
773 unsigned int wr_lo)
774{
775 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
776 struct tx_sw_desc *sd = &q->sdesc[pidx];
777
778 sd->skb = skb;
779 if (need_skb_unmap()) {
780 struct unmap_info *ui = (struct unmap_info *)skb->cb;
781
782 ui->fragidx = 0;
783 ui->addr_idx = 0;
784 ui->sflit = flits;
785 }
786
787 if (likely(ndesc == 1)) {
788 skb->priority = pidx;
789 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
790 V_WR_SGLSFLT(flits)) | wr_hi;
791 wmb();
792 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
793 V_WR_GEN(gen)) | wr_lo;
794 wr_gen2(d, gen);
795 } else {
796 unsigned int ogen = gen;
797 const u64 *fp = (const u64 *)sgl;
798 struct work_request_hdr *wp = wrp;
799
800 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
801 V_WR_SGLSFLT(flits)) | wr_hi;
802
803 while (sgl_flits) {
804 unsigned int avail = WR_FLITS - flits;
805
806 if (avail > sgl_flits)
807 avail = sgl_flits;
808 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
809 sgl_flits -= avail;
810 ndesc--;
811 if (!sgl_flits)
812 break;
813
814 fp += avail;
815 d++;
816 sd++;
817 if (++pidx == q->size) {
818 pidx = 0;
819 gen ^= 1;
820 d = q->desc;
821 sd = q->sdesc;
822 }
823
824 sd->skb = skb;
825 wrp = (struct work_request_hdr *)d;
826 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
827 V_WR_SGLSFLT(1)) | wr_hi;
828 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
829 sgl_flits + 1)) |
830 V_WR_GEN(gen)) | wr_lo;
831 wr_gen2(d, gen);
832 flits = 1;
833 }
834 skb->priority = pidx;
835 wrp->wr_hi |= htonl(F_WR_EOP);
836 wmb();
837 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
838 wr_gen2((struct tx_desc *)wp, ogen);
839 WARN_ON(ndesc != 0);
840 }
841}
842
843/**
844 * write_tx_pkt_wr - write a TX_PKT work request
845 * @adap: the adapter
846 * @skb: the packet to send
847 * @pi: the egress interface
848 * @pidx: index of the first Tx descriptor to write
849 * @gen: the generation value to use
850 * @q: the Tx queue
851 * @ndesc: number of descriptors the packet will occupy
852 * @compl: the value of the COMPL bit to use
853 *
854 * Generate a TX_PKT work request to send the supplied packet.
855 */
856static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
857 const struct port_info *pi,
858 unsigned int pidx, unsigned int gen,
859 struct sge_txq *q, unsigned int ndesc,
860 unsigned int compl)
861{
862 unsigned int flits, sgl_flits, cntrl, tso_info;
863 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
864 struct tx_desc *d = &q->desc[pidx];
865 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
866
867 cpl->len = htonl(skb->len | 0x80000000);
868 cntrl = V_TXPKT_INTF(pi->port_id);
869
870 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
871 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
872
873 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
874 if (tso_info) {
875 int eth_type;
876 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
877
878 d->flit[2] = 0;
879 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
880 hdr->cntrl = htonl(cntrl);
881 eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
882 CPL_ETH_II : CPL_ETH_II_VLAN;
883 tso_info |= V_LSO_ETH_TYPE(eth_type) |
884 V_LSO_IPHDR_WORDS(skb->nh.iph->ihl) |
885 V_LSO_TCPHDR_WORDS(skb->h.th->doff);
886 hdr->lso_info = htonl(tso_info);
887 flits = 3;
888 } else {
889 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
890 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
891 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
892 cpl->cntrl = htonl(cntrl);
893
894 if (skb->len <= WR_LEN - sizeof(*cpl)) {
895 q->sdesc[pidx].skb = NULL;
896 if (!skb->data_len)
897 memcpy(&d->flit[2], skb->data, skb->len);
898 else
899 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
900
901 flits = (skb->len + 7) / 8 + 2;
902 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
903 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
904 | F_WR_SOP | F_WR_EOP | compl);
905 wmb();
906 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
907 V_WR_TID(q->token));
908 wr_gen2(d, gen);
909 kfree_skb(skb);
910 return;
911 }
912
913 flits = 2;
914 }
915
916 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
917 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
918 if (need_skb_unmap())
919 ((struct unmap_info *)skb->cb)->len = skb_headlen(skb);
920
921 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
922 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
923 htonl(V_WR_TID(q->token)));
924}
925
926/**
927 * eth_xmit - add a packet to the Ethernet Tx queue
928 * @skb: the packet
929 * @dev: the egress net device
930 *
931 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
932 */
933int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
934{
935 unsigned int ndesc, pidx, credits, gen, compl;
936 const struct port_info *pi = netdev_priv(dev);
937 struct adapter *adap = dev->priv;
938 struct sge_qset *qs = dev2qset(dev);
939 struct sge_txq *q = &qs->txq[TXQ_ETH];
940
941 /*
942 * The chip min packet length is 9 octets but play safe and reject
943 * anything shorter than an Ethernet header.
944 */
945 if (unlikely(skb->len < ETH_HLEN)) {
946 dev_kfree_skb(skb);
947 return NETDEV_TX_OK;
948 }
949
950 spin_lock(&q->lock);
951 reclaim_completed_tx(adap, q);
952
953 credits = q->size - q->in_use;
954 ndesc = calc_tx_descs(skb);
955
956 if (unlikely(credits < ndesc)) {
957 if (!netif_queue_stopped(dev)) {
958 netif_stop_queue(dev);
959 set_bit(TXQ_ETH, &qs->txq_stopped);
960 q->stops++;
961 dev_err(&adap->pdev->dev,
962 "%s: Tx ring %u full while queue awake!\n",
963 dev->name, q->cntxt_id & 7);
964 }
965 spin_unlock(&q->lock);
966 return NETDEV_TX_BUSY;
967 }
968
969 q->in_use += ndesc;
970 if (unlikely(credits - ndesc < q->stop_thres)) {
971 q->stops++;
972 netif_stop_queue(dev);
973 set_bit(TXQ_ETH, &qs->txq_stopped);
974#if !USE_GTS
975 if (should_restart_tx(q) &&
976 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
977 q->restarts++;
978 netif_wake_queue(dev);
979 }
980#endif
981 }
982
983 gen = q->gen;
984 q->unacked += ndesc;
985 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
986 q->unacked &= 7;
987 pidx = q->pidx;
988 q->pidx += ndesc;
989 if (q->pidx >= q->size) {
990 q->pidx -= q->size;
991 q->gen ^= 1;
992 }
993
994 /* update port statistics */
995 if (skb->ip_summed == CHECKSUM_COMPLETE)
996 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
997 if (skb_shinfo(skb)->gso_size)
998 qs->port_stats[SGE_PSTAT_TSO]++;
999 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1000 qs->port_stats[SGE_PSTAT_VLANINS]++;
1001
1002 dev->trans_start = jiffies;
1003 spin_unlock(&q->lock);
1004
1005 /*
1006 * We do not use Tx completion interrupts to free DMAd Tx packets.
1007 * This is good for performamce but means that we rely on new Tx
1008 * packets arriving to run the destructors of completed packets,
1009 * which open up space in their sockets' send queues. Sometimes
1010 * we do not get such new packets causing Tx to stall. A single
1011 * UDP transmitter is a good example of this situation. We have
1012 * a clean up timer that periodically reclaims completed packets
1013 * but it doesn't run often enough (nor do we want it to) to prevent
1014 * lengthy stalls. A solution to this problem is to run the
1015 * destructor early, after the packet is queued but before it's DMAd.
1016 * A cons is that we lie to socket memory accounting, but the amount
1017 * of extra memory is reasonable (limited by the number of Tx
1018 * descriptors), the packets do actually get freed quickly by new
1019 * packets almost always, and for protocols like TCP that wait for
1020 * acks to really free up the data the extra memory is even less.
1021 * On the positive side we run the destructors on the sending CPU
1022 * rather than on a potentially different completing CPU, usually a
1023 * good thing. We also run them without holding our Tx queue lock,
1024 * unlike what reclaim_completed_tx() would otherwise do.
1025 *
1026 * Run the destructor before telling the DMA engine about the packet
1027 * to make sure it doesn't complete and get freed prematurely.
1028 */
1029 if (likely(!skb_shared(skb)))
1030 skb_orphan(skb);
1031
1032 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1033 check_ring_tx_db(adap, q);
1034 return NETDEV_TX_OK;
1035}
1036
1037/**
1038 * write_imm - write a packet into a Tx descriptor as immediate data
1039 * @d: the Tx descriptor to write
1040 * @skb: the packet
1041 * @len: the length of packet data to write as immediate data
1042 * @gen: the generation bit value to write
1043 *
1044 * Writes a packet as immediate data into a Tx descriptor. The packet
1045 * contains a work request at its beginning. We must write the packet
1046 * carefully so the SGE doesn't read accidentally before it's written in
1047 * its entirety.
1048 */
1049static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1050 unsigned int len, unsigned int gen)
1051{
1052 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1053 struct work_request_hdr *to = (struct work_request_hdr *)d;
1054
1055 memcpy(&to[1], &from[1], len - sizeof(*from));
1056 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1057 V_WR_BCNTLFLT(len & 7));
1058 wmb();
1059 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1060 V_WR_LEN((len + 7) / 8));
1061 wr_gen2(d, gen);
1062 kfree_skb(skb);
1063}
1064
1065/**
1066 * check_desc_avail - check descriptor availability on a send queue
1067 * @adap: the adapter
1068 * @q: the send queue
1069 * @skb: the packet needing the descriptors
1070 * @ndesc: the number of Tx descriptors needed
1071 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1072 *
1073 * Checks if the requested number of Tx descriptors is available on an
1074 * SGE send queue. If the queue is already suspended or not enough
1075 * descriptors are available the packet is queued for later transmission.
1076 * Must be called with the Tx queue locked.
1077 *
1078 * Returns 0 if enough descriptors are available, 1 if there aren't
1079 * enough descriptors and the packet has been queued, and 2 if the caller
1080 * needs to retry because there weren't enough descriptors at the
1081 * beginning of the call but some freed up in the mean time.
1082 */
1083static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1084 struct sk_buff *skb, unsigned int ndesc,
1085 unsigned int qid)
1086{
1087 if (unlikely(!skb_queue_empty(&q->sendq))) {
1088 addq_exit:__skb_queue_tail(&q->sendq, skb);
1089 return 1;
1090 }
1091 if (unlikely(q->size - q->in_use < ndesc)) {
1092 struct sge_qset *qs = txq_to_qset(q, qid);
1093
1094 set_bit(qid, &qs->txq_stopped);
1095 smp_mb__after_clear_bit();
1096
1097 if (should_restart_tx(q) &&
1098 test_and_clear_bit(qid, &qs->txq_stopped))
1099 return 2;
1100
1101 q->stops++;
1102 goto addq_exit;
1103 }
1104 return 0;
1105}
1106
1107/**
1108 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1109 * @q: the SGE control Tx queue
1110 *
1111 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1112 * that send only immediate data (presently just the control queues) and
1113 * thus do not have any sk_buffs to release.
1114 */
1115static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1116{
1117 unsigned int reclaim = q->processed - q->cleaned;
1118
1119 q->in_use -= reclaim;
1120 q->cleaned += reclaim;
1121}
1122
1123static inline int immediate(const struct sk_buff *skb)
1124{
1125 return skb->len <= WR_LEN && !skb->data_len;
1126}
1127
1128/**
1129 * ctrl_xmit - send a packet through an SGE control Tx queue
1130 * @adap: the adapter
1131 * @q: the control queue
1132 * @skb: the packet
1133 *
1134 * Send a packet through an SGE control Tx queue. Packets sent through
1135 * a control queue must fit entirely as immediate data in a single Tx
1136 * descriptor and have no page fragments.
1137 */
1138static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1139 struct sk_buff *skb)
1140{
1141 int ret;
1142 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1143
1144 if (unlikely(!immediate(skb))) {
1145 WARN_ON(1);
1146 dev_kfree_skb(skb);
1147 return NET_XMIT_SUCCESS;
1148 }
1149
1150 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1151 wrp->wr_lo = htonl(V_WR_TID(q->token));
1152
1153 spin_lock(&q->lock);
1154 again:reclaim_completed_tx_imm(q);
1155
1156 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1157 if (unlikely(ret)) {
1158 if (ret == 1) {
1159 spin_unlock(&q->lock);
1160 return NET_XMIT_CN;
1161 }
1162 goto again;
1163 }
1164
1165 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1166
1167 q->in_use++;
1168 if (++q->pidx >= q->size) {
1169 q->pidx = 0;
1170 q->gen ^= 1;
1171 }
1172 spin_unlock(&q->lock);
1173 wmb();
1174 t3_write_reg(adap, A_SG_KDOORBELL,
1175 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1176 return NET_XMIT_SUCCESS;
1177}
1178
1179/**
1180 * restart_ctrlq - restart a suspended control queue
1181 * @qs: the queue set cotaining the control queue
1182 *
1183 * Resumes transmission on a suspended Tx control queue.
1184 */
1185static void restart_ctrlq(unsigned long data)
1186{
1187 struct sk_buff *skb;
1188 struct sge_qset *qs = (struct sge_qset *)data;
1189 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1190 struct adapter *adap = qs->netdev->priv;
1191
1192 spin_lock(&q->lock);
1193 again:reclaim_completed_tx_imm(q);
1194
1195 while (q->in_use < q->size && (skb = __skb_dequeue(&q->sendq)) != NULL) {
1196
1197 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1198
1199 if (++q->pidx >= q->size) {
1200 q->pidx = 0;
1201 q->gen ^= 1;
1202 }
1203 q->in_use++;
1204 }
1205
1206 if (!skb_queue_empty(&q->sendq)) {
1207 set_bit(TXQ_CTRL, &qs->txq_stopped);
1208 smp_mb__after_clear_bit();
1209
1210 if (should_restart_tx(q) &&
1211 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1212 goto again;
1213 q->stops++;
1214 }
1215
1216 spin_unlock(&q->lock);
1217 t3_write_reg(adap, A_SG_KDOORBELL,
1218 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1219}
1220
1221/*
1222 * Send a management message through control queue 0
1223 */
1224int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1225{
1226 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1227}
1228
1229/**
1230 * write_ofld_wr - write an offload work request
1231 * @adap: the adapter
1232 * @skb: the packet to send
1233 * @q: the Tx queue
1234 * @pidx: index of the first Tx descriptor to write
1235 * @gen: the generation value to use
1236 * @ndesc: number of descriptors the packet will occupy
1237 *
1238 * Write an offload work request to send the supplied packet. The packet
1239 * data already carry the work request with most fields populated.
1240 */
1241static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1242 struct sge_txq *q, unsigned int pidx,
1243 unsigned int gen, unsigned int ndesc)
1244{
1245 unsigned int sgl_flits, flits;
1246 struct work_request_hdr *from;
1247 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1248 struct tx_desc *d = &q->desc[pidx];
1249
1250 if (immediate(skb)) {
1251 q->sdesc[pidx].skb = NULL;
1252 write_imm(d, skb, skb->len, gen);
1253 return;
1254 }
1255
1256 /* Only TX_DATA builds SGLs */
1257
1258 from = (struct work_request_hdr *)skb->data;
1259 memcpy(&d->flit[1], &from[1], skb->h.raw - skb->data - sizeof(*from));
1260
1261 flits = (skb->h.raw - skb->data) / 8;
1262 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1263 sgl_flits = make_sgl(skb, sgp, skb->h.raw, skb->tail - skb->h.raw,
1264 adap->pdev);
1265 if (need_skb_unmap())
1266 ((struct unmap_info *)skb->cb)->len = skb->tail - skb->h.raw;
1267
1268 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1269 gen, from->wr_hi, from->wr_lo);
1270}
1271
1272/**
1273 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1274 * @skb: the packet
1275 *
1276 * Returns the number of Tx descriptors needed for the given offload
1277 * packet. These packets are already fully constructed.
1278 */
1279static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1280{
1281 unsigned int flits, cnt = skb_shinfo(skb)->nr_frags;
1282
1283 if (skb->len <= WR_LEN && cnt == 0)
1284 return 1; /* packet fits as immediate data */
1285
1286 flits = (skb->h.raw - skb->data) / 8; /* headers */
1287 if (skb->tail != skb->h.raw)
1288 cnt++;
1289 return flits_to_desc(flits + sgl_len(cnt));
1290}
1291
1292/**
1293 * ofld_xmit - send a packet through an offload queue
1294 * @adap: the adapter
1295 * @q: the Tx offload queue
1296 * @skb: the packet
1297 *
1298 * Send an offload packet through an SGE offload queue.
1299 */
1300static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1301 struct sk_buff *skb)
1302{
1303 int ret;
1304 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1305
1306 spin_lock(&q->lock);
1307 again:reclaim_completed_tx(adap, q);
1308
1309 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1310 if (unlikely(ret)) {
1311 if (ret == 1) {
1312 skb->priority = ndesc; /* save for restart */
1313 spin_unlock(&q->lock);
1314 return NET_XMIT_CN;
1315 }
1316 goto again;
1317 }
1318
1319 gen = q->gen;
1320 q->in_use += ndesc;
1321 pidx = q->pidx;
1322 q->pidx += ndesc;
1323 if (q->pidx >= q->size) {
1324 q->pidx -= q->size;
1325 q->gen ^= 1;
1326 }
1327 spin_unlock(&q->lock);
1328
1329 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1330 check_ring_tx_db(adap, q);
1331 return NET_XMIT_SUCCESS;
1332}
1333
1334/**
1335 * restart_offloadq - restart a suspended offload queue
1336 * @qs: the queue set cotaining the offload queue
1337 *
1338 * Resumes transmission on a suspended Tx offload queue.
1339 */
1340static void restart_offloadq(unsigned long data)
1341{
1342 struct sk_buff *skb;
1343 struct sge_qset *qs = (struct sge_qset *)data;
1344 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1345 struct adapter *adap = qs->netdev->priv;
1346
1347 spin_lock(&q->lock);
1348 again:reclaim_completed_tx(adap, q);
1349
1350 while ((skb = skb_peek(&q->sendq)) != NULL) {
1351 unsigned int gen, pidx;
1352 unsigned int ndesc = skb->priority;
1353
1354 if (unlikely(q->size - q->in_use < ndesc)) {
1355 set_bit(TXQ_OFLD, &qs->txq_stopped);
1356 smp_mb__after_clear_bit();
1357
1358 if (should_restart_tx(q) &&
1359 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1360 goto again;
1361 q->stops++;
1362 break;
1363 }
1364
1365 gen = q->gen;
1366 q->in_use += ndesc;
1367 pidx = q->pidx;
1368 q->pidx += ndesc;
1369 if (q->pidx >= q->size) {
1370 q->pidx -= q->size;
1371 q->gen ^= 1;
1372 }
1373 __skb_unlink(skb, &q->sendq);
1374 spin_unlock(&q->lock);
1375
1376 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1377 spin_lock(&q->lock);
1378 }
1379 spin_unlock(&q->lock);
1380
1381#if USE_GTS
1382 set_bit(TXQ_RUNNING, &q->flags);
1383 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1384#endif
1385 t3_write_reg(adap, A_SG_KDOORBELL,
1386 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1387}
1388
1389/**
1390 * queue_set - return the queue set a packet should use
1391 * @skb: the packet
1392 *
1393 * Maps a packet to the SGE queue set it should use. The desired queue
1394 * set is carried in bits 1-3 in the packet's priority.
1395 */
1396static inline int queue_set(const struct sk_buff *skb)
1397{
1398 return skb->priority >> 1;
1399}
1400
1401/**
1402 * is_ctrl_pkt - return whether an offload packet is a control packet
1403 * @skb: the packet
1404 *
1405 * Determines whether an offload packet should use an OFLD or a CTRL
1406 * Tx queue. This is indicated by bit 0 in the packet's priority.
1407 */
1408static inline int is_ctrl_pkt(const struct sk_buff *skb)
1409{
1410 return skb->priority & 1;
1411}
1412
1413/**
1414 * t3_offload_tx - send an offload packet
1415 * @tdev: the offload device to send to
1416 * @skb: the packet
1417 *
1418 * Sends an offload packet. We use the packet priority to select the
1419 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1420 * should be sent as regular or control, bits 1-3 select the queue set.
1421 */
1422int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1423{
1424 struct adapter *adap = tdev2adap(tdev);
1425 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1426
1427 if (unlikely(is_ctrl_pkt(skb)))
1428 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1429
1430 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1431}
1432
1433/**
1434 * offload_enqueue - add an offload packet to an SGE offload receive queue
1435 * @q: the SGE response queue
1436 * @skb: the packet
1437 *
1438 * Add a new offload packet to an SGE response queue's offload packet
1439 * queue. If the packet is the first on the queue it schedules the RX
1440 * softirq to process the queue.
1441 */
1442static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1443{
1444 skb->next = skb->prev = NULL;
1445 if (q->rx_tail)
1446 q->rx_tail->next = skb;
1447 else {
1448 struct sge_qset *qs = rspq_to_qset(q);
1449
1450 if (__netif_rx_schedule_prep(qs->netdev))
1451 __netif_rx_schedule(qs->netdev);
1452 q->rx_head = skb;
1453 }
1454 q->rx_tail = skb;
1455}
1456
1457/**
1458 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1459 * @tdev: the offload device that will be receiving the packets
1460 * @q: the SGE response queue that assembled the bundle
1461 * @skbs: the partial bundle
1462 * @n: the number of packets in the bundle
1463 *
1464 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1465 */
1466static inline void deliver_partial_bundle(struct t3cdev *tdev,
1467 struct sge_rspq *q,
1468 struct sk_buff *skbs[], int n)
1469{
1470 if (n) {
1471 q->offload_bundles++;
1472 tdev->recv(tdev, skbs, n);
1473 }
1474}
1475
1476/**
1477 * ofld_poll - NAPI handler for offload packets in interrupt mode
1478 * @dev: the network device doing the polling
1479 * @budget: polling budget
1480 *
1481 * The NAPI handler for offload packets when a response queue is serviced
1482 * by the hard interrupt handler, i.e., when it's operating in non-polling
1483 * mode. Creates small packet batches and sends them through the offload
1484 * receive handler. Batches need to be of modest size as we do prefetches
1485 * on the packets in each.
1486 */
1487static int ofld_poll(struct net_device *dev, int *budget)
1488{
1489 struct adapter *adapter = dev->priv;
1490 struct sge_qset *qs = dev2qset(dev);
1491 struct sge_rspq *q = &qs->rspq;
1492 int work_done, limit = min(*budget, dev->quota), avail = limit;
1493
1494 while (avail) {
1495 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE];
1496 int ngathered;
1497
1498 spin_lock_irq(&q->lock);
1499 head = q->rx_head;
1500 if (!head) {
1501 work_done = limit - avail;
1502 *budget -= work_done;
1503 dev->quota -= work_done;
1504 __netif_rx_complete(dev);
1505 spin_unlock_irq(&q->lock);
1506 return 0;
1507 }
1508
1509 tail = q->rx_tail;
1510 q->rx_head = q->rx_tail = NULL;
1511 spin_unlock_irq(&q->lock);
1512
1513 for (ngathered = 0; avail && head; avail--) {
1514 prefetch(head->data);
1515 skbs[ngathered] = head;
1516 head = head->next;
1517 skbs[ngathered]->next = NULL;
1518 if (++ngathered == RX_BUNDLE_SIZE) {
1519 q->offload_bundles++;
1520 adapter->tdev.recv(&adapter->tdev, skbs,
1521 ngathered);
1522 ngathered = 0;
1523 }
1524 }
1525 if (head) { /* splice remaining packets back onto Rx queue */
1526 spin_lock_irq(&q->lock);
1527 tail->next = q->rx_head;
1528 if (!q->rx_head)
1529 q->rx_tail = tail;
1530 q->rx_head = head;
1531 spin_unlock_irq(&q->lock);
1532 }
1533 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1534 }
1535 work_done = limit - avail;
1536 *budget -= work_done;
1537 dev->quota -= work_done;
1538 return 1;
1539}
1540
1541/**
1542 * rx_offload - process a received offload packet
1543 * @tdev: the offload device receiving the packet
1544 * @rq: the response queue that received the packet
1545 * @skb: the packet
1546 * @rx_gather: a gather list of packets if we are building a bundle
1547 * @gather_idx: index of the next available slot in the bundle
1548 *
1549 * Process an ingress offload pakcet and add it to the offload ingress
1550 * queue. Returns the index of the next available slot in the bundle.
1551 */
1552static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1553 struct sk_buff *skb, struct sk_buff *rx_gather[],
1554 unsigned int gather_idx)
1555{
1556 rq->offload_pkts++;
1557 skb->mac.raw = skb->nh.raw = skb->h.raw = skb->data;
1558
1559 if (rq->polling) {
1560 rx_gather[gather_idx++] = skb;
1561 if (gather_idx == RX_BUNDLE_SIZE) {
1562 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1563 gather_idx = 0;
1564 rq->offload_bundles++;
1565 }
1566 } else
1567 offload_enqueue(rq, skb);
1568
1569 return gather_idx;
1570}
1571
1572/**
1573 * restart_tx - check whether to restart suspended Tx queues
1574 * @qs: the queue set to resume
1575 *
1576 * Restarts suspended Tx queues of an SGE queue set if they have enough
1577 * free resources to resume operation.
1578 */
1579static void restart_tx(struct sge_qset *qs)
1580{
1581 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1582 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1583 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1584 qs->txq[TXQ_ETH].restarts++;
1585 if (netif_running(qs->netdev))
1586 netif_wake_queue(qs->netdev);
1587 }
1588
1589 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1590 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1591 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1592 qs->txq[TXQ_OFLD].restarts++;
1593 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1594 }
1595 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1596 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1597 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1598 qs->txq[TXQ_CTRL].restarts++;
1599 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1600 }
1601}
1602
1603/**
1604 * rx_eth - process an ingress ethernet packet
1605 * @adap: the adapter
1606 * @rq: the response queue that received the packet
1607 * @skb: the packet
1608 * @pad: amount of padding at the start of the buffer
1609 *
1610 * Process an ingress ethernet pakcet and deliver it to the stack.
1611 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1612 * if it was immediate data in a response.
1613 */
1614static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1615 struct sk_buff *skb, int pad)
1616{
1617 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1618 struct port_info *pi;
1619
1620 rq->eth_pkts++;
1621 skb_pull(skb, sizeof(*p) + pad);
1622 skb->dev = adap->port[p->iff];
1623 skb->dev->last_rx = jiffies;
1624 skb->protocol = eth_type_trans(skb, skb->dev);
1625 pi = netdev_priv(skb->dev);
1626 if (pi->rx_csum_offload && p->csum_valid && p->csum == 0xffff &&
1627 !p->fragment) {
1628 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1629 skb->ip_summed = CHECKSUM_UNNECESSARY;
1630 } else
1631 skb->ip_summed = CHECKSUM_NONE;
1632
1633 if (unlikely(p->vlan_valid)) {
1634 struct vlan_group *grp = pi->vlan_grp;
1635
1636 rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++;
1637 if (likely(grp))
1638 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1639 rq->polling);
1640 else
1641 dev_kfree_skb_any(skb);
1642 } else if (rq->polling)
1643 netif_receive_skb(skb);
1644 else
1645 netif_rx(skb);
1646}
1647
1648/**
1649 * handle_rsp_cntrl_info - handles control information in a response
1650 * @qs: the queue set corresponding to the response
1651 * @flags: the response control flags
1652 *
1653 * Handles the control information of an SGE response, such as GTS
1654 * indications and completion credits for the queue set's Tx queues.
1655 * HW coalesces credits, we don't do any extra SW coalescing.
1656 */
1657static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
1658{
1659 unsigned int credits;
1660
1661#if USE_GTS
1662 if (flags & F_RSPD_TXQ0_GTS)
1663 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
1664#endif
1665
1666 credits = G_RSPD_TXQ0_CR(flags);
1667 if (credits)
1668 qs->txq[TXQ_ETH].processed += credits;
1669
1670 credits = G_RSPD_TXQ2_CR(flags);
1671 if (credits)
1672 qs->txq[TXQ_CTRL].processed += credits;
1673
1674# if USE_GTS
1675 if (flags & F_RSPD_TXQ1_GTS)
1676 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
1677# endif
1678 credits = G_RSPD_TXQ1_CR(flags);
1679 if (credits)
1680 qs->txq[TXQ_OFLD].processed += credits;
1681}
1682
1683/**
1684 * check_ring_db - check if we need to ring any doorbells
1685 * @adapter: the adapter
1686 * @qs: the queue set whose Tx queues are to be examined
1687 * @sleeping: indicates which Tx queue sent GTS
1688 *
1689 * Checks if some of a queue set's Tx queues need to ring their doorbells
1690 * to resume transmission after idling while they still have unprocessed
1691 * descriptors.
1692 */
1693static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
1694 unsigned int sleeping)
1695{
1696 if (sleeping & F_RSPD_TXQ0_GTS) {
1697 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1698
1699 if (txq->cleaned + txq->in_use != txq->processed &&
1700 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1701 set_bit(TXQ_RUNNING, &txq->flags);
1702 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1703 V_EGRCNTX(txq->cntxt_id));
1704 }
1705 }
1706
1707 if (sleeping & F_RSPD_TXQ1_GTS) {
1708 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
1709
1710 if (txq->cleaned + txq->in_use != txq->processed &&
1711 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
1712 set_bit(TXQ_RUNNING, &txq->flags);
1713 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
1714 V_EGRCNTX(txq->cntxt_id));
1715 }
1716 }
1717}
1718
1719/**
1720 * is_new_response - check if a response is newly written
1721 * @r: the response descriptor
1722 * @q: the response queue
1723 *
1724 * Returns true if a response descriptor contains a yet unprocessed
1725 * response.
1726 */
1727static inline int is_new_response(const struct rsp_desc *r,
1728 const struct sge_rspq *q)
1729{
1730 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
1731}
1732
1733#define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
1734#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
1735 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
1736 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
1737 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
1738
1739/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
1740#define NOMEM_INTR_DELAY 2500
1741
1742/**
1743 * process_responses - process responses from an SGE response queue
1744 * @adap: the adapter
1745 * @qs: the queue set to which the response queue belongs
1746 * @budget: how many responses can be processed in this round
1747 *
1748 * Process responses from an SGE response queue up to the supplied budget.
1749 * Responses include received packets as well as credits and other events
1750 * for the queues that belong to the response queue's queue set.
1751 * A negative budget is effectively unlimited.
1752 *
1753 * Additionally choose the interrupt holdoff time for the next interrupt
1754 * on this queue. If the system is under memory shortage use a fairly
1755 * long delay to help recovery.
1756 */
1757static int process_responses(struct adapter *adap, struct sge_qset *qs,
1758 int budget)
1759{
1760 struct sge_rspq *q = &qs->rspq;
1761 struct rsp_desc *r = &q->desc[q->cidx];
1762 int budget_left = budget;
1763 unsigned int sleeping = 0;
1764 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
1765 int ngathered = 0;
1766
1767 q->next_holdoff = q->holdoff_tmr;
1768
1769 while (likely(budget_left && is_new_response(r, q))) {
1770 int eth, ethpad = 0;
1771 struct sk_buff *skb = NULL;
1772 u32 len, flags = ntohl(r->flags);
1773 u32 rss_hi = *(const u32 *)r, rss_lo = r->rss_hdr.rss_hash_val;
1774
1775 eth = r->rss_hdr.opcode == CPL_RX_PKT;
1776
1777 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
1778 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
1779 if (!skb)
1780 goto no_mem;
1781
1782 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
1783 skb->data[0] = CPL_ASYNC_NOTIF;
1784 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
1785 q->async_notif++;
1786 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1787 skb = get_imm_packet(r);
1788 if (unlikely(!skb)) {
1789 no_mem:
1790 q->next_holdoff = NOMEM_INTR_DELAY;
1791 q->nomem++;
1792 /* consume one credit since we tried */
1793 budget_left--;
1794 break;
1795 }
1796 q->imm_data++;
1797 } else if ((len = ntohl(r->len_cq)) != 0) {
1798 struct sge_fl *fl;
1799
1800 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1801 fl->credits--;
1802 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1803 eth ? SGE_RX_DROP_THRES : 0);
1804 if (!skb)
1805 q->rx_drops++;
1806 else if (r->rss_hdr.opcode == CPL_TRACE_PKT)
1807 __skb_pull(skb, 2);
1808 ethpad = 2;
1809 if (++fl->cidx == fl->size)
1810 fl->cidx = 0;
1811 } else
1812 q->pure_rsps++;
1813
1814 if (flags & RSPD_CTRL_MASK) {
1815 sleeping |= flags & RSPD_GTS_MASK;
1816 handle_rsp_cntrl_info(qs, flags);
1817 }
1818
1819 r++;
1820 if (unlikely(++q->cidx == q->size)) {
1821 q->cidx = 0;
1822 q->gen ^= 1;
1823 r = q->desc;
1824 }
1825 prefetch(r);
1826
1827 if (++q->credits >= (q->size / 4)) {
1828 refill_rspq(adap, q, q->credits);
1829 q->credits = 0;
1830 }
1831
1832 if (likely(skb != NULL)) {
1833 if (eth)
1834 rx_eth(adap, q, skb, ethpad);
1835 else {
1836 /* Preserve the RSS info in csum & priority */
1837 skb->csum = rss_hi;
1838 skb->priority = rss_lo;
1839 ngathered = rx_offload(&adap->tdev, q, skb,
1840 offload_skbs, ngathered);
1841 }
1842 }
1843
1844 --budget_left;
1845 }
1846
1847 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
1848 if (sleeping)
1849 check_ring_db(adap, qs, sleeping);
1850
1851 smp_mb(); /* commit Tx queue .processed updates */
1852 if (unlikely(qs->txq_stopped != 0))
1853 restart_tx(qs);
1854
1855 budget -= budget_left;
1856 return budget;
1857}
1858
1859static inline int is_pure_response(const struct rsp_desc *r)
1860{
1861 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
1862
1863 return (n | r->len_cq) == 0;
1864}
1865
1866/**
1867 * napi_rx_handler - the NAPI handler for Rx processing
1868 * @dev: the net device
1869 * @budget: how many packets we can process in this round
1870 *
1871 * Handler for new data events when using NAPI.
1872 */
1873static int napi_rx_handler(struct net_device *dev, int *budget)
1874{
1875 struct adapter *adap = dev->priv;
1876 struct sge_qset *qs = dev2qset(dev);
1877 int effective_budget = min(*budget, dev->quota);
1878
1879 int work_done = process_responses(adap, qs, effective_budget);
1880 *budget -= work_done;
1881 dev->quota -= work_done;
1882
1883 if (work_done >= effective_budget)
1884 return 1;
1885
1886 netif_rx_complete(dev);
1887
1888 /*
1889 * Because we don't atomically flush the following write it is
1890 * possible that in very rare cases it can reach the device in a way
1891 * that races with a new response being written plus an error interrupt
1892 * causing the NAPI interrupt handler below to return unhandled status
1893 * to the OS. To protect against this would require flushing the write
1894 * and doing both the write and the flush with interrupts off. Way too
1895 * expensive and unjustifiable given the rarity of the race.
1896 *
1897 * The race cannot happen at all with MSI-X.
1898 */
1899 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
1900 V_NEWTIMER(qs->rspq.next_holdoff) |
1901 V_NEWINDEX(qs->rspq.cidx));
1902 return 0;
1903}
1904
1905/*
1906 * Returns true if the device is already scheduled for polling.
1907 */
1908static inline int napi_is_scheduled(struct net_device *dev)
1909{
1910 return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
1911}
1912
1913/**
1914 * process_pure_responses - process pure responses from a response queue
1915 * @adap: the adapter
1916 * @qs: the queue set owning the response queue
1917 * @r: the first pure response to process
1918 *
1919 * A simpler version of process_responses() that handles only pure (i.e.,
1920 * non data-carrying) responses. Such respones are too light-weight to
1921 * justify calling a softirq under NAPI, so we handle them specially in
1922 * the interrupt handler. The function is called with a pointer to a
1923 * response, which the caller must ensure is a valid pure response.
1924 *
1925 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
1926 */
1927static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
1928 struct rsp_desc *r)
1929{
1930 struct sge_rspq *q = &qs->rspq;
1931 unsigned int sleeping = 0;
1932
1933 do {
1934 u32 flags = ntohl(r->flags);
1935
1936 r++;
1937 if (unlikely(++q->cidx == q->size)) {
1938 q->cidx = 0;
1939 q->gen ^= 1;
1940 r = q->desc;
1941 }
1942 prefetch(r);
1943
1944 if (flags & RSPD_CTRL_MASK) {
1945 sleeping |= flags & RSPD_GTS_MASK;
1946 handle_rsp_cntrl_info(qs, flags);
1947 }
1948
1949 q->pure_rsps++;
1950 if (++q->credits >= (q->size / 4)) {
1951 refill_rspq(adap, q, q->credits);
1952 q->credits = 0;
1953 }
1954 } while (is_new_response(r, q) && is_pure_response(r));
1955
1956 if (sleeping)
1957 check_ring_db(adap, qs, sleeping);
1958
1959 smp_mb(); /* commit Tx queue .processed updates */
1960 if (unlikely(qs->txq_stopped != 0))
1961 restart_tx(qs);
1962
1963 return is_new_response(r, q);
1964}
1965
1966/**
1967 * handle_responses - decide what to do with new responses in NAPI mode
1968 * @adap: the adapter
1969 * @q: the response queue
1970 *
1971 * This is used by the NAPI interrupt handlers to decide what to do with
1972 * new SGE responses. If there are no new responses it returns -1. If
1973 * there are new responses and they are pure (i.e., non-data carrying)
1974 * it handles them straight in hard interrupt context as they are very
1975 * cheap and don't deliver any packets. Finally, if there are any data
1976 * signaling responses it schedules the NAPI handler. Returns 1 if it
1977 * schedules NAPI, 0 if all new responses were pure.
1978 *
1979 * The caller must ascertain NAPI is not already running.
1980 */
1981static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
1982{
1983 struct sge_qset *qs = rspq_to_qset(q);
1984 struct rsp_desc *r = &q->desc[q->cidx];
1985
1986 if (!is_new_response(r, q))
1987 return -1;
1988 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
1989 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
1990 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
1991 return 0;
1992 }
1993 if (likely(__netif_rx_schedule_prep(qs->netdev)))
1994 __netif_rx_schedule(qs->netdev);
1995 return 1;
1996}
1997
1998/*
1999 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2000 * (i.e., response queue serviced in hard interrupt).
2001 */
2002irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2003{
2004 struct sge_qset *qs = cookie;
2005 struct adapter *adap = qs->netdev->priv;
2006 struct sge_rspq *q = &qs->rspq;
2007
2008 spin_lock(&q->lock);
2009 if (process_responses(adap, qs, -1) == 0)
2010 q->unhandled_irqs++;
2011 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2012 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2013 spin_unlock(&q->lock);
2014 return IRQ_HANDLED;
2015}
2016
2017/*
2018 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2019 * (i.e., response queue serviced by NAPI polling).
2020 */
2021irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2022{
2023 struct sge_qset *qs = cookie;
2024 struct adapter *adap = qs->netdev->priv;
2025 struct sge_rspq *q = &qs->rspq;
2026
2027 spin_lock(&q->lock);
2028 BUG_ON(napi_is_scheduled(qs->netdev));
2029
2030 if (handle_responses(adap, q) < 0)
2031 q->unhandled_irqs++;
2032 spin_unlock(&q->lock);
2033 return IRQ_HANDLED;
2034}
2035
2036/*
2037 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2038 * SGE response queues as well as error and other async events as they all use
2039 * the same MSI vector. We use one SGE response queue per port in this mode
2040 * and protect all response queues with queue 0's lock.
2041 */
2042static irqreturn_t t3_intr_msi(int irq, void *cookie)
2043{
2044 int new_packets = 0;
2045 struct adapter *adap = cookie;
2046 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2047
2048 spin_lock(&q->lock);
2049
2050 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2051 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2052 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2053 new_packets = 1;
2054 }
2055
2056 if (adap->params.nports == 2 &&
2057 process_responses(adap, &adap->sge.qs[1], -1)) {
2058 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2059
2060 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2061 V_NEWTIMER(q1->next_holdoff) |
2062 V_NEWINDEX(q1->cidx));
2063 new_packets = 1;
2064 }
2065
2066 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2067 q->unhandled_irqs++;
2068
2069 spin_unlock(&q->lock);
2070 return IRQ_HANDLED;
2071}
2072
2073static int rspq_check_napi(struct net_device *dev, struct sge_rspq *q)
2074{
2075 if (!napi_is_scheduled(dev) && is_new_response(&q->desc[q->cidx], q)) {
2076 if (likely(__netif_rx_schedule_prep(dev)))
2077 __netif_rx_schedule(dev);
2078 return 1;
2079 }
2080 return 0;
2081}
2082
2083/*
2084 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2085 * by NAPI polling). Handles data events from SGE response queues as well as
2086 * error and other async events as they all use the same MSI vector. We use
2087 * one SGE response queue per port in this mode and protect all response
2088 * queues with queue 0's lock.
2089 */
2090irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2091{
2092 int new_packets;
2093 struct adapter *adap = cookie;
2094 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2095
2096 spin_lock(&q->lock);
2097
2098 new_packets = rspq_check_napi(adap->sge.qs[0].netdev, q);
2099 if (adap->params.nports == 2)
2100 new_packets += rspq_check_napi(adap->sge.qs[1].netdev,
2101 &adap->sge.qs[1].rspq);
2102 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2103 q->unhandled_irqs++;
2104
2105 spin_unlock(&q->lock);
2106 return IRQ_HANDLED;
2107}
2108
2109/*
2110 * A helper function that processes responses and issues GTS.
2111 */
2112static inline int process_responses_gts(struct adapter *adap,
2113 struct sge_rspq *rq)
2114{
2115 int work;
2116
2117 work = process_responses(adap, rspq_to_qset(rq), -1);
2118 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2119 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2120 return work;
2121}
2122
2123/*
2124 * The legacy INTx interrupt handler. This needs to handle data events from
2125 * SGE response queues as well as error and other async events as they all use
2126 * the same interrupt pin. We use one SGE response queue per port in this mode
2127 * and protect all response queues with queue 0's lock.
2128 */
2129static irqreturn_t t3_intr(int irq, void *cookie)
2130{
2131 int work_done, w0, w1;
2132 struct adapter *adap = cookie;
2133 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2134 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2135
2136 spin_lock(&q0->lock);
2137
2138 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2139 w1 = adap->params.nports == 2 &&
2140 is_new_response(&q1->desc[q1->cidx], q1);
2141
2142 if (likely(w0 | w1)) {
2143 t3_write_reg(adap, A_PL_CLI, 0);
2144 t3_read_reg(adap, A_PL_CLI); /* flush */
2145
2146 if (likely(w0))
2147 process_responses_gts(adap, q0);
2148
2149 if (w1)
2150 process_responses_gts(adap, q1);
2151
2152 work_done = w0 | w1;
2153 } else
2154 work_done = t3_slow_intr_handler(adap);
2155
2156 spin_unlock(&q0->lock);
2157 return IRQ_RETVAL(work_done != 0);
2158}
2159
2160/*
2161 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2162 * Handles data events from SGE response queues as well as error and other
2163 * async events as they all use the same interrupt pin. We use one SGE
2164 * response queue per port in this mode and protect all response queues with
2165 * queue 0's lock.
2166 */
2167static irqreturn_t t3b_intr(int irq, void *cookie)
2168{
2169 u32 map;
2170 struct adapter *adap = cookie;
2171 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2172
2173 t3_write_reg(adap, A_PL_CLI, 0);
2174 map = t3_read_reg(adap, A_SG_DATA_INTR);
2175
2176 if (unlikely(!map)) /* shared interrupt, most likely */
2177 return IRQ_NONE;
2178
2179 spin_lock(&q0->lock);
2180
2181 if (unlikely(map & F_ERRINTR))
2182 t3_slow_intr_handler(adap);
2183
2184 if (likely(map & 1))
2185 process_responses_gts(adap, q0);
2186
2187 if (map & 2)
2188 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2189
2190 spin_unlock(&q0->lock);
2191 return IRQ_HANDLED;
2192}
2193
2194/*
2195 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2196 * Handles data events from SGE response queues as well as error and other
2197 * async events as they all use the same interrupt pin. We use one SGE
2198 * response queue per port in this mode and protect all response queues with
2199 * queue 0's lock.
2200 */
2201static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2202{
2203 u32 map;
2204 struct net_device *dev;
2205 struct adapter *adap = cookie;
2206 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2207
2208 t3_write_reg(adap, A_PL_CLI, 0);
2209 map = t3_read_reg(adap, A_SG_DATA_INTR);
2210
2211 if (unlikely(!map)) /* shared interrupt, most likely */
2212 return IRQ_NONE;
2213
2214 spin_lock(&q0->lock);
2215
2216 if (unlikely(map & F_ERRINTR))
2217 t3_slow_intr_handler(adap);
2218
2219 if (likely(map & 1)) {
2220 dev = adap->sge.qs[0].netdev;
2221
2222 if (likely(__netif_rx_schedule_prep(dev)))
2223 __netif_rx_schedule(dev);
2224 }
2225 if (map & 2) {
2226 dev = adap->sge.qs[1].netdev;
2227
2228 if (likely(__netif_rx_schedule_prep(dev)))
2229 __netif_rx_schedule(dev);
2230 }
2231
2232 spin_unlock(&q0->lock);
2233 return IRQ_HANDLED;
2234}
2235
2236/**
2237 * t3_intr_handler - select the top-level interrupt handler
2238 * @adap: the adapter
2239 * @polling: whether using NAPI to service response queues
2240 *
2241 * Selects the top-level interrupt handler based on the type of interrupts
2242 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2243 * response queues.
2244 */
2245intr_handler_t t3_intr_handler(struct adapter *adap, int polling)
2246{
2247 if (adap->flags & USING_MSIX)
2248 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2249 if (adap->flags & USING_MSI)
2250 return polling ? t3_intr_msi_napi : t3_intr_msi;
2251 if (adap->params.rev > 0)
2252 return polling ? t3b_intr_napi : t3b_intr;
2253 return t3_intr;
2254}
2255
2256/**
2257 * t3_sge_err_intr_handler - SGE async event interrupt handler
2258 * @adapter: the adapter
2259 *
2260 * Interrupt handler for SGE asynchronous (non-data) events.
2261 */
2262void t3_sge_err_intr_handler(struct adapter *adapter)
2263{
2264 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2265
2266 if (status & F_RSPQCREDITOVERFOW)
2267 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2268
2269 if (status & F_RSPQDISABLED) {
2270 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2271
2272 CH_ALERT(adapter,
2273 "packet delivered to disabled response queue "
2274 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2275 }
2276
2277 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2278 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
2279 t3_fatal_err(adapter);
2280}
2281
2282/**
2283 * sge_timer_cb - perform periodic maintenance of an SGE qset
2284 * @data: the SGE queue set to maintain
2285 *
2286 * Runs periodically from a timer to perform maintenance of an SGE queue
2287 * set. It performs two tasks:
2288 *
2289 * a) Cleans up any completed Tx descriptors that may still be pending.
2290 * Normal descriptor cleanup happens when new packets are added to a Tx
2291 * queue so this timer is relatively infrequent and does any cleanup only
2292 * if the Tx queue has not seen any new packets in a while. We make a
2293 * best effort attempt to reclaim descriptors, in that we don't wait
2294 * around if we cannot get a queue's lock (which most likely is because
2295 * someone else is queueing new packets and so will also handle the clean
2296 * up). Since control queues use immediate data exclusively we don't
2297 * bother cleaning them up here.
2298 *
2299 * b) Replenishes Rx queues that have run out due to memory shortage.
2300 * Normally new Rx buffers are added when existing ones are consumed but
2301 * when out of memory a queue can become empty. We try to add only a few
2302 * buffers here, the queue will be replenished fully as these new buffers
2303 * are used up if memory shortage has subsided.
2304 */
2305static void sge_timer_cb(unsigned long data)
2306{
2307 spinlock_t *lock;
2308 struct sge_qset *qs = (struct sge_qset *)data;
2309 struct adapter *adap = qs->netdev->priv;
2310
2311 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2312 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2313 spin_unlock(&qs->txq[TXQ_ETH].lock);
2314 }
2315 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2316 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2317 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2318 }
2319 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2320 &adap->sge.qs[0].rspq.lock;
2321 if (spin_trylock_irq(lock)) {
2322 if (!napi_is_scheduled(qs->netdev)) {
2323 if (qs->fl[0].credits < qs->fl[0].size)
2324 __refill_fl(adap, &qs->fl[0]);
2325 if (qs->fl[1].credits < qs->fl[1].size)
2326 __refill_fl(adap, &qs->fl[1]);
2327 }
2328 spin_unlock_irq(lock);
2329 }
2330 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2331}
2332
2333/**
2334 * t3_update_qset_coalesce - update coalescing settings for a queue set
2335 * @qs: the SGE queue set
2336 * @p: new queue set parameters
2337 *
2338 * Update the coalescing settings for an SGE queue set. Nothing is done
2339 * if the queue set is not initialized yet.
2340 */
2341void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2342{
2343 if (!qs->netdev)
2344 return;
2345
2346 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2347 qs->rspq.polling = p->polling;
2348 qs->netdev->poll = p->polling ? napi_rx_handler : ofld_poll;
2349}
2350
2351/**
2352 * t3_sge_alloc_qset - initialize an SGE queue set
2353 * @adapter: the adapter
2354 * @id: the queue set id
2355 * @nports: how many Ethernet ports will be using this queue set
2356 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2357 * @p: configuration parameters for this queue set
2358 * @ntxq: number of Tx queues for the queue set
2359 * @netdev: net device associated with this queue set
2360 *
2361 * Allocate resources and initialize an SGE queue set. A queue set
2362 * comprises a response queue, two Rx free-buffer queues, and up to 3
2363 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2364 * queue, offload queue, and control queue.
2365 */
2366int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2367 int irq_vec_idx, const struct qset_params *p,
2368 int ntxq, struct net_device *netdev)
2369{
2370 int i, ret = -ENOMEM;
2371 struct sge_qset *q = &adapter->sge.qs[id];
2372
2373 init_qset_cntxt(q, id);
2374 init_timer(&q->tx_reclaim_timer);
2375 q->tx_reclaim_timer.data = (unsigned long)q;
2376 q->tx_reclaim_timer.function = sge_timer_cb;
2377
2378 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2379 sizeof(struct rx_desc),
2380 sizeof(struct rx_sw_desc),
2381 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2382 if (!q->fl[0].desc)
2383 goto err;
2384
2385 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2386 sizeof(struct rx_desc),
2387 sizeof(struct rx_sw_desc),
2388 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2389 if (!q->fl[1].desc)
2390 goto err;
2391
2392 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2393 sizeof(struct rsp_desc), 0,
2394 &q->rspq.phys_addr, NULL);
2395 if (!q->rspq.desc)
2396 goto err;
2397
2398 for (i = 0; i < ntxq; ++i) {
2399 /*
2400 * The control queue always uses immediate data so does not
2401 * need to keep track of any sk_buffs.
2402 */
2403 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2404
2405 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2406 sizeof(struct tx_desc), sz,
2407 &q->txq[i].phys_addr,
2408 &q->txq[i].sdesc);
2409 if (!q->txq[i].desc)
2410 goto err;
2411
2412 q->txq[i].gen = 1;
2413 q->txq[i].size = p->txq_size[i];
2414 spin_lock_init(&q->txq[i].lock);
2415 skb_queue_head_init(&q->txq[i].sendq);
2416 }
2417
2418 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2419 (unsigned long)q);
2420 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2421 (unsigned long)q);
2422
2423 q->fl[0].gen = q->fl[1].gen = 1;
2424 q->fl[0].size = p->fl_size;
2425 q->fl[1].size = p->jumbo_size;
2426
2427 q->rspq.gen = 1;
2428 q->rspq.size = p->rspq_size;
2429 spin_lock_init(&q->rspq.lock);
2430
2431 q->txq[TXQ_ETH].stop_thres = nports *
2432 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2433
2434 if (ntxq == 1) {
2435 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
2436 sizeof(struct cpl_rx_pkt);
2437 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
2438 sizeof(struct cpl_rx_pkt);
2439 } else {
2440 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2441 sizeof(struct cpl_rx_data);
2442 q->fl[1].buf_size = (16 * 1024) -
2443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2444 }
2445
2446 spin_lock(&adapter->sge.reg_lock);
2447
2448 /* FL threshold comparison uses < */
2449 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2450 q->rspq.phys_addr, q->rspq.size,
2451 q->fl[0].buf_size, 1, 0);
2452 if (ret)
2453 goto err_unlock;
2454
2455 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2456 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2457 q->fl[i].phys_addr, q->fl[i].size,
2458 q->fl[i].buf_size, p->cong_thres, 1,
2459 0);
2460 if (ret)
2461 goto err_unlock;
2462 }
2463
2464 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2465 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2466 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2467 1, 0);
2468 if (ret)
2469 goto err_unlock;
2470
2471 if (ntxq > 1) {
2472 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2473 USE_GTS, SGE_CNTXT_OFLD, id,
2474 q->txq[TXQ_OFLD].phys_addr,
2475 q->txq[TXQ_OFLD].size, 0, 1, 0);
2476 if (ret)
2477 goto err_unlock;
2478 }
2479
2480 if (ntxq > 2) {
2481 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2482 SGE_CNTXT_CTRL, id,
2483 q->txq[TXQ_CTRL].phys_addr,
2484 q->txq[TXQ_CTRL].size,
2485 q->txq[TXQ_CTRL].token, 1, 0);
2486 if (ret)
2487 goto err_unlock;
2488 }
2489
2490 spin_unlock(&adapter->sge.reg_lock);
2491 q->netdev = netdev;
2492 t3_update_qset_coalesce(q, p);
2493
2494 /*
2495 * We use atalk_ptr as a backpointer to a qset. In case a device is
2496 * associated with multiple queue sets only the first one sets
2497 * atalk_ptr.
2498 */
2499 if (netdev->atalk_ptr == NULL)
2500 netdev->atalk_ptr = q;
2501
2502 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2503 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2504 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2505
2506 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2507 V_NEWTIMER(q->rspq.holdoff_tmr));
2508
2509 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2510 return 0;
2511
2512 err_unlock:
2513 spin_unlock(&adapter->sge.reg_lock);
2514 err:
2515 t3_free_qset(adapter, q);
2516 return ret;
2517}
2518
2519/**
2520 * t3_free_sge_resources - free SGE resources
2521 * @adap: the adapter
2522 *
2523 * Frees resources used by the SGE queue sets.
2524 */
2525void t3_free_sge_resources(struct adapter *adap)
2526{
2527 int i;
2528
2529 for (i = 0; i < SGE_QSETS; ++i)
2530 t3_free_qset(adap, &adap->sge.qs[i]);
2531}
2532
2533/**
2534 * t3_sge_start - enable SGE
2535 * @adap: the adapter
2536 *
2537 * Enables the SGE for DMAs. This is the last step in starting packet
2538 * transfers.
2539 */
2540void t3_sge_start(struct adapter *adap)
2541{
2542 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2543}
2544
2545/**
2546 * t3_sge_stop - disable SGE operation
2547 * @adap: the adapter
2548 *
2549 * Disables the DMA engine. This can be called in emeregencies (e.g.,
2550 * from error interrupts) or from normal process context. In the latter
2551 * case it also disables any pending queue restart tasklets. Note that
2552 * if it is called in interrupt context it cannot disable the restart
2553 * tasklets as it cannot wait, however the tasklets will have no effect
2554 * since the doorbells are disabled and the driver will call this again
2555 * later from process context, at which time the tasklets will be stopped
2556 * if they are still running.
2557 */
2558void t3_sge_stop(struct adapter *adap)
2559{
2560 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
2561 if (!in_interrupt()) {
2562 int i;
2563
2564 for (i = 0; i < SGE_QSETS; ++i) {
2565 struct sge_qset *qs = &adap->sge.qs[i];
2566
2567 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
2568 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
2569 }
2570 }
2571}
2572
2573/**
2574 * t3_sge_init - initialize SGE
2575 * @adap: the adapter
2576 * @p: the SGE parameters
2577 *
2578 * Performs SGE initialization needed every time after a chip reset.
2579 * We do not initialize any of the queue sets here, instead the driver
2580 * top-level must request those individually. We also do not enable DMA
2581 * here, that should be done after the queues have been set up.
2582 */
2583void t3_sge_init(struct adapter *adap, struct sge_params *p)
2584{
2585 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
2586
2587 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
2588 F_CQCRDTCTRL |
2589 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
2590 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
2591#if SGE_NUM_GENBITS == 1
2592 ctrl |= F_EGRGENCTRL;
2593#endif
2594 if (adap->params.rev > 0) {
2595 if (!(adap->flags & (USING_MSIX | USING_MSI)))
2596 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
2597 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
2598 }
2599 t3_write_reg(adap, A_SG_CONTROL, ctrl);
2600 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
2601 V_LORCQDRBTHRSH(512));
2602 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
2603 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
2604 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
2605 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
2606 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
2607 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
2608 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
2609 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
2610 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
2611}
2612
2613/**
2614 * t3_sge_prep - one-time SGE initialization
2615 * @adap: the associated adapter
2616 * @p: SGE parameters
2617 *
2618 * Performs one-time initialization of SGE SW state. Includes determining
2619 * defaults for the assorted SGE parameters, which admins can change until
2620 * they are used to initialize the SGE.
2621 */
2622void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
2623{
2624 int i;
2625
2626 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
2627 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2628
2629 for (i = 0; i < SGE_QSETS; ++i) {
2630 struct qset_params *q = p->qset + i;
2631
2632 q->polling = adap->params.rev > 0;
2633 q->coalesce_usecs = 5;
2634 q->rspq_size = 1024;
2635 q->fl_size = 4096;
2636 q->jumbo_size = 512;
2637 q->txq_size[TXQ_ETH] = 1024;
2638 q->txq_size[TXQ_OFLD] = 1024;
2639 q->txq_size[TXQ_CTRL] = 256;
2640 q->cong_thres = 0;
2641 }
2642
2643 spin_lock_init(&adap->sge.reg_lock);
2644}
2645
2646/**
2647 * t3_get_desc - dump an SGE descriptor for debugging purposes
2648 * @qs: the queue set
2649 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
2650 * @idx: the descriptor index in the queue
2651 * @data: where to dump the descriptor contents
2652 *
2653 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
2654 * size of the descriptor.
2655 */
2656int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2657 unsigned char *data)
2658{
2659 if (qnum >= 6)
2660 return -EINVAL;
2661
2662 if (qnum < 3) {
2663 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2664 return -EINVAL;
2665 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2666 return sizeof(struct tx_desc);
2667 }
2668
2669 if (qnum == 3) {
2670 if (!qs->rspq.desc || idx >= qs->rspq.size)
2671 return -EINVAL;
2672 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2673 return sizeof(struct rsp_desc);
2674 }
2675
2676 qnum -= 4;
2677 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2678 return -EINVAL;
2679 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
2680 return sizeof(struct rx_desc);
2681}
diff --git a/drivers/net/cxgb3/sge_defs.h b/drivers/net/cxgb3/sge_defs.h
new file mode 100644
index 000000000000..514869e26a76
--- /dev/null
+++ b/drivers/net/cxgb3/sge_defs.h
@@ -0,0 +1,251 @@
1/*
2 * This file is automatically generated --- any changes will be lost.
3 */
4
5#ifndef _SGE_DEFS_H
6#define _SGE_DEFS_H
7
8#define S_EC_CREDITS 0
9#define M_EC_CREDITS 0x7FFF
10#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
11#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
12
13#define S_EC_GTS 15
14#define V_EC_GTS(x) ((x) << S_EC_GTS)
15#define F_EC_GTS V_EC_GTS(1U)
16
17#define S_EC_INDEX 16
18#define M_EC_INDEX 0xFFFF
19#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
20#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
21
22#define S_EC_SIZE 0
23#define M_EC_SIZE 0xFFFF
24#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
25#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
26
27#define S_EC_BASE_LO 16
28#define M_EC_BASE_LO 0xFFFF
29#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
30#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
31
32#define S_EC_BASE_HI 0
33#define M_EC_BASE_HI 0xF
34#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
35#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
36
37#define S_EC_RESPQ 4
38#define M_EC_RESPQ 0x7
39#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
40#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
41
42#define S_EC_TYPE 7
43#define M_EC_TYPE 0x7
44#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
45#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
46
47#define S_EC_GEN 10
48#define V_EC_GEN(x) ((x) << S_EC_GEN)
49#define F_EC_GEN V_EC_GEN(1U)
50
51#define S_EC_UP_TOKEN 11
52#define M_EC_UP_TOKEN 0xFFFFF
53#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
54#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
55
56#define S_EC_VALID 31
57#define V_EC_VALID(x) ((x) << S_EC_VALID)
58#define F_EC_VALID V_EC_VALID(1U)
59
60#define S_RQ_MSI_VEC 20
61#define M_RQ_MSI_VEC 0x3F
62#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
63#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
64
65#define S_RQ_INTR_EN 26
66#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
67#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
68
69#define S_RQ_GEN 28
70#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
71#define F_RQ_GEN V_RQ_GEN(1U)
72
73#define S_CQ_INDEX 0
74#define M_CQ_INDEX 0xFFFF
75#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
76#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
77
78#define S_CQ_SIZE 16
79#define M_CQ_SIZE 0xFFFF
80#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
81#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
82
83#define S_CQ_BASE_HI 0
84#define M_CQ_BASE_HI 0xFFFFF
85#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
86#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
87
88#define S_CQ_RSPQ 20
89#define M_CQ_RSPQ 0x3F
90#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
91#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
92
93#define S_CQ_ASYNC_NOTIF 26
94#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
95#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
96
97#define S_CQ_ARMED 27
98#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
99#define F_CQ_ARMED V_CQ_ARMED(1U)
100
101#define S_CQ_ASYNC_NOTIF_SOL 28
102#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
103#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
104
105#define S_CQ_GEN 29
106#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
107#define F_CQ_GEN V_CQ_GEN(1U)
108
109#define S_CQ_OVERFLOW_MODE 31
110#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
111#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
112
113#define S_CQ_CREDITS 0
114#define M_CQ_CREDITS 0xFFFF
115#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
116#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
117
118#define S_CQ_CREDIT_THRES 16
119#define M_CQ_CREDIT_THRES 0x1FFF
120#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
121#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
122
123#define S_FL_BASE_HI 0
124#define M_FL_BASE_HI 0xFFFFF
125#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
126#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
127
128#define S_FL_INDEX_LO 20
129#define M_FL_INDEX_LO 0xFFF
130#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
131#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
132
133#define S_FL_INDEX_HI 0
134#define M_FL_INDEX_HI 0xF
135#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
136#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
137
138#define S_FL_SIZE 4
139#define M_FL_SIZE 0xFFFF
140#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
141#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
142
143#define S_FL_GEN 20
144#define V_FL_GEN(x) ((x) << S_FL_GEN)
145#define F_FL_GEN V_FL_GEN(1U)
146
147#define S_FL_ENTRY_SIZE_LO 21
148#define M_FL_ENTRY_SIZE_LO 0x7FF
149#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
150#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
151
152#define S_FL_ENTRY_SIZE_HI 0
153#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
154#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
155#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
156
157#define S_FL_CONG_THRES 21
158#define M_FL_CONG_THRES 0x3FF
159#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
160#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
161
162#define S_FL_GTS 31
163#define V_FL_GTS(x) ((x) << S_FL_GTS)
164#define F_FL_GTS V_FL_GTS(1U)
165
166#define S_FLD_GEN1 31
167#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
168#define F_FLD_GEN1 V_FLD_GEN1(1U)
169
170#define S_FLD_GEN2 0
171#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
172#define F_FLD_GEN2 V_FLD_GEN2(1U)
173
174#define S_RSPD_TXQ1_CR 0
175#define M_RSPD_TXQ1_CR 0x7F
176#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
177#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
178
179#define S_RSPD_TXQ1_GTS 7
180#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
181#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
182
183#define S_RSPD_TXQ2_CR 8
184#define M_RSPD_TXQ2_CR 0x7F
185#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
186#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
187
188#define S_RSPD_TXQ2_GTS 15
189#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
190#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
191
192#define S_RSPD_TXQ0_CR 16
193#define M_RSPD_TXQ0_CR 0x7F
194#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
195#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
196
197#define S_RSPD_TXQ0_GTS 23
198#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
199#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
200
201#define S_RSPD_EOP 24
202#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
203#define F_RSPD_EOP V_RSPD_EOP(1U)
204
205#define S_RSPD_SOP 25
206#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
207#define F_RSPD_SOP V_RSPD_SOP(1U)
208
209#define S_RSPD_ASYNC_NOTIF 26
210#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
211#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
212
213#define S_RSPD_FL0_GTS 27
214#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
215#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
216
217#define S_RSPD_FL1_GTS 28
218#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
219#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
220
221#define S_RSPD_IMM_DATA_VALID 29
222#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
223#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
224
225#define S_RSPD_OFFLOAD 30
226#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
227#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
228
229#define S_RSPD_GEN1 31
230#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
231#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
232
233#define S_RSPD_LEN 0
234#define M_RSPD_LEN 0x7FFFFFFF
235#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
236#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
237
238#define S_RSPD_FLQ 31
239#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
240#define F_RSPD_FLQ V_RSPD_FLQ(1U)
241
242#define S_RSPD_GEN2 0
243#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
244#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
245
246#define S_RSPD_INR_VEC 1
247#define M_RSPD_INR_VEC 0x7F
248#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
249#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
250
251#endif /* _SGE_DEFS_H */
diff --git a/drivers/net/cxgb3/t3_cpl.h b/drivers/net/cxgb3/t3_cpl.h
new file mode 100644
index 000000000000..b7a1a310dfd4
--- /dev/null
+++ b/drivers/net/cxgb3/t3_cpl.h
@@ -0,0 +1,1444 @@
1/*
2 * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef T3_CPL_H
33#define T3_CPL_H
34
35#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
36# include <asm/byteorder.h>
37#endif
38
39enum CPL_opcode {
40 CPL_PASS_OPEN_REQ = 0x1,
41 CPL_PASS_ACCEPT_RPL = 0x2,
42 CPL_ACT_OPEN_REQ = 0x3,
43 CPL_SET_TCB = 0x4,
44 CPL_SET_TCB_FIELD = 0x5,
45 CPL_GET_TCB = 0x6,
46 CPL_PCMD = 0x7,
47 CPL_CLOSE_CON_REQ = 0x8,
48 CPL_CLOSE_LISTSRV_REQ = 0x9,
49 CPL_ABORT_REQ = 0xA,
50 CPL_ABORT_RPL = 0xB,
51 CPL_TX_DATA = 0xC,
52 CPL_RX_DATA_ACK = 0xD,
53 CPL_TX_PKT = 0xE,
54 CPL_RTE_DELETE_REQ = 0xF,
55 CPL_RTE_WRITE_REQ = 0x10,
56 CPL_RTE_READ_REQ = 0x11,
57 CPL_L2T_WRITE_REQ = 0x12,
58 CPL_L2T_READ_REQ = 0x13,
59 CPL_SMT_WRITE_REQ = 0x14,
60 CPL_SMT_READ_REQ = 0x15,
61 CPL_TX_PKT_LSO = 0x16,
62 CPL_PCMD_READ = 0x17,
63 CPL_BARRIER = 0x18,
64 CPL_TID_RELEASE = 0x1A,
65
66 CPL_CLOSE_LISTSRV_RPL = 0x20,
67 CPL_ERROR = 0x21,
68 CPL_GET_TCB_RPL = 0x22,
69 CPL_L2T_WRITE_RPL = 0x23,
70 CPL_PCMD_READ_RPL = 0x24,
71 CPL_PCMD_RPL = 0x25,
72 CPL_PEER_CLOSE = 0x26,
73 CPL_RTE_DELETE_RPL = 0x27,
74 CPL_RTE_WRITE_RPL = 0x28,
75 CPL_RX_DDP_COMPLETE = 0x29,
76 CPL_RX_PHYS_ADDR = 0x2A,
77 CPL_RX_PKT = 0x2B,
78 CPL_RX_URG_NOTIFY = 0x2C,
79 CPL_SET_TCB_RPL = 0x2D,
80 CPL_SMT_WRITE_RPL = 0x2E,
81 CPL_TX_DATA_ACK = 0x2F,
82
83 CPL_ABORT_REQ_RSS = 0x30,
84 CPL_ABORT_RPL_RSS = 0x31,
85 CPL_CLOSE_CON_RPL = 0x32,
86 CPL_ISCSI_HDR = 0x33,
87 CPL_L2T_READ_RPL = 0x34,
88 CPL_RDMA_CQE = 0x35,
89 CPL_RDMA_CQE_READ_RSP = 0x36,
90 CPL_RDMA_CQE_ERR = 0x37,
91 CPL_RTE_READ_RPL = 0x38,
92 CPL_RX_DATA = 0x39,
93
94 CPL_ACT_OPEN_RPL = 0x40,
95 CPL_PASS_OPEN_RPL = 0x41,
96 CPL_RX_DATA_DDP = 0x42,
97 CPL_SMT_READ_RPL = 0x43,
98
99 CPL_ACT_ESTABLISH = 0x50,
100 CPL_PASS_ESTABLISH = 0x51,
101
102 CPL_PASS_ACCEPT_REQ = 0x70,
103
104 CPL_ASYNC_NOTIF = 0x80, /* fake opcode for async notifications */
105
106 CPL_TX_DMA_ACK = 0xA0,
107 CPL_RDMA_READ_REQ = 0xA1,
108 CPL_RDMA_TERMINATE = 0xA2,
109 CPL_TRACE_PKT = 0xA3,
110 CPL_RDMA_EC_STATUS = 0xA5,
111
112 NUM_CPL_CMDS /* must be last and previous entries must be sorted */
113};
114
115enum CPL_error {
116 CPL_ERR_NONE = 0,
117 CPL_ERR_TCAM_PARITY = 1,
118 CPL_ERR_TCAM_FULL = 3,
119 CPL_ERR_CONN_RESET = 20,
120 CPL_ERR_CONN_EXIST = 22,
121 CPL_ERR_ARP_MISS = 23,
122 CPL_ERR_BAD_SYN = 24,
123 CPL_ERR_CONN_TIMEDOUT = 30,
124 CPL_ERR_XMIT_TIMEDOUT = 31,
125 CPL_ERR_PERSIST_TIMEDOUT = 32,
126 CPL_ERR_FINWAIT2_TIMEDOUT = 33,
127 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
128 CPL_ERR_RTX_NEG_ADVICE = 35,
129 CPL_ERR_PERSIST_NEG_ADVICE = 36,
130 CPL_ERR_ABORT_FAILED = 42,
131 CPL_ERR_GENERAL = 99
132};
133
134enum {
135 CPL_CONN_POLICY_AUTO = 0,
136 CPL_CONN_POLICY_ASK = 1,
137 CPL_CONN_POLICY_DENY = 3
138};
139
140enum {
141 ULP_MODE_NONE = 0,
142 ULP_MODE_ISCSI = 2,
143 ULP_MODE_RDMA = 4,
144 ULP_MODE_TCPDDP = 5
145};
146
147enum {
148 ULP_CRC_HEADER = 1 << 0,
149 ULP_CRC_DATA = 1 << 1
150};
151
152enum {
153 CPL_PASS_OPEN_ACCEPT,
154 CPL_PASS_OPEN_REJECT
155};
156
157enum {
158 CPL_ABORT_SEND_RST = 0,
159 CPL_ABORT_NO_RST,
160 CPL_ABORT_POST_CLOSE_REQ = 2
161};
162
163enum { /* TX_PKT_LSO ethernet types */
164 CPL_ETH_II,
165 CPL_ETH_II_VLAN,
166 CPL_ETH_802_3,
167 CPL_ETH_802_3_VLAN
168};
169
170enum { /* TCP congestion control algorithms */
171 CONG_ALG_RENO,
172 CONG_ALG_TAHOE,
173 CONG_ALG_NEWRENO,
174 CONG_ALG_HIGHSPEED
175};
176
177union opcode_tid {
178 __be32 opcode_tid;
179 __u8 opcode;
180};
181
182#define S_OPCODE 24
183#define V_OPCODE(x) ((x) << S_OPCODE)
184#define G_OPCODE(x) (((x) >> S_OPCODE) & 0xFF)
185#define G_TID(x) ((x) & 0xFFFFFF)
186
187/* tid is assumed to be 24-bits */
188#define MK_OPCODE_TID(opcode, tid) (V_OPCODE(opcode) | (tid))
189
190#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
191
192/* extract the TID from a CPL command */
193#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
194
195struct tcp_options {
196 __be16 mss;
197 __u8 wsf;
198#if defined(__LITTLE_ENDIAN_BITFIELD)
199 __u8:5;
200 __u8 ecn:1;
201 __u8 sack:1;
202 __u8 tstamp:1;
203#else
204 __u8 tstamp:1;
205 __u8 sack:1;
206 __u8 ecn:1;
207 __u8:5;
208#endif
209};
210
211struct rss_header {
212 __u8 opcode;
213#if defined(__LITTLE_ENDIAN_BITFIELD)
214 __u8 cpu_idx:6;
215 __u8 hash_type:2;
216#else
217 __u8 hash_type:2;
218 __u8 cpu_idx:6;
219#endif
220 __be16 cq_idx;
221 __be32 rss_hash_val;
222};
223
224#ifndef CHELSIO_FW
225struct work_request_hdr {
226 __be32 wr_hi;
227 __be32 wr_lo;
228};
229
230/* wr_hi fields */
231#define S_WR_SGE_CREDITS 0
232#define M_WR_SGE_CREDITS 0xFF
233#define V_WR_SGE_CREDITS(x) ((x) << S_WR_SGE_CREDITS)
234#define G_WR_SGE_CREDITS(x) (((x) >> S_WR_SGE_CREDITS) & M_WR_SGE_CREDITS)
235
236#define S_WR_SGLSFLT 8
237#define M_WR_SGLSFLT 0xFF
238#define V_WR_SGLSFLT(x) ((x) << S_WR_SGLSFLT)
239#define G_WR_SGLSFLT(x) (((x) >> S_WR_SGLSFLT) & M_WR_SGLSFLT)
240
241#define S_WR_BCNTLFLT 16
242#define M_WR_BCNTLFLT 0xF
243#define V_WR_BCNTLFLT(x) ((x) << S_WR_BCNTLFLT)
244#define G_WR_BCNTLFLT(x) (((x) >> S_WR_BCNTLFLT) & M_WR_BCNTLFLT)
245
246#define S_WR_DATATYPE 20
247#define V_WR_DATATYPE(x) ((x) << S_WR_DATATYPE)
248#define F_WR_DATATYPE V_WR_DATATYPE(1U)
249
250#define S_WR_COMPL 21
251#define V_WR_COMPL(x) ((x) << S_WR_COMPL)
252#define F_WR_COMPL V_WR_COMPL(1U)
253
254#define S_WR_EOP 22
255#define V_WR_EOP(x) ((x) << S_WR_EOP)
256#define F_WR_EOP V_WR_EOP(1U)
257
258#define S_WR_SOP 23
259#define V_WR_SOP(x) ((x) << S_WR_SOP)
260#define F_WR_SOP V_WR_SOP(1U)
261
262#define S_WR_OP 24
263#define M_WR_OP 0xFF
264#define V_WR_OP(x) ((x) << S_WR_OP)
265#define G_WR_OP(x) (((x) >> S_WR_OP) & M_WR_OP)
266
267/* wr_lo fields */
268#define S_WR_LEN 0
269#define M_WR_LEN 0xFF
270#define V_WR_LEN(x) ((x) << S_WR_LEN)
271#define G_WR_LEN(x) (((x) >> S_WR_LEN) & M_WR_LEN)
272
273#define S_WR_TID 8
274#define M_WR_TID 0xFFFFF
275#define V_WR_TID(x) ((x) << S_WR_TID)
276#define G_WR_TID(x) (((x) >> S_WR_TID) & M_WR_TID)
277
278#define S_WR_CR_FLUSH 30
279#define V_WR_CR_FLUSH(x) ((x) << S_WR_CR_FLUSH)
280#define F_WR_CR_FLUSH V_WR_CR_FLUSH(1U)
281
282#define S_WR_GEN 31
283#define V_WR_GEN(x) ((x) << S_WR_GEN)
284#define F_WR_GEN V_WR_GEN(1U)
285
286# define WR_HDR struct work_request_hdr wr
287# define RSS_HDR
288#else
289# define WR_HDR
290# define RSS_HDR struct rss_header rss_hdr;
291#endif
292
293/* option 0 lower-half fields */
294#define S_CPL_STATUS 0
295#define M_CPL_STATUS 0xFF
296#define V_CPL_STATUS(x) ((x) << S_CPL_STATUS)
297#define G_CPL_STATUS(x) (((x) >> S_CPL_STATUS) & M_CPL_STATUS)
298
299#define S_INJECT_TIMER 6
300#define V_INJECT_TIMER(x) ((x) << S_INJECT_TIMER)
301#define F_INJECT_TIMER V_INJECT_TIMER(1U)
302
303#define S_NO_OFFLOAD 7
304#define V_NO_OFFLOAD(x) ((x) << S_NO_OFFLOAD)
305#define F_NO_OFFLOAD V_NO_OFFLOAD(1U)
306
307#define S_ULP_MODE 8
308#define M_ULP_MODE 0xF
309#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
310#define G_ULP_MODE(x) (((x) >> S_ULP_MODE) & M_ULP_MODE)
311
312#define S_RCV_BUFSIZ 12
313#define M_RCV_BUFSIZ 0x3FFF
314#define V_RCV_BUFSIZ(x) ((x) << S_RCV_BUFSIZ)
315#define G_RCV_BUFSIZ(x) (((x) >> S_RCV_BUFSIZ) & M_RCV_BUFSIZ)
316
317#define S_TOS 26
318#define M_TOS 0x3F
319#define V_TOS(x) ((x) << S_TOS)
320#define G_TOS(x) (((x) >> S_TOS) & M_TOS)
321
322/* option 0 upper-half fields */
323#define S_DELACK 0
324#define V_DELACK(x) ((x) << S_DELACK)
325#define F_DELACK V_DELACK(1U)
326
327#define S_NO_CONG 1
328#define V_NO_CONG(x) ((x) << S_NO_CONG)
329#define F_NO_CONG V_NO_CONG(1U)
330
331#define S_SRC_MAC_SEL 2
332#define M_SRC_MAC_SEL 0x3
333#define V_SRC_MAC_SEL(x) ((x) << S_SRC_MAC_SEL)
334#define G_SRC_MAC_SEL(x) (((x) >> S_SRC_MAC_SEL) & M_SRC_MAC_SEL)
335
336#define S_L2T_IDX 4
337#define M_L2T_IDX 0x7FF
338#define V_L2T_IDX(x) ((x) << S_L2T_IDX)
339#define G_L2T_IDX(x) (((x) >> S_L2T_IDX) & M_L2T_IDX)
340
341#define S_TX_CHANNEL 15
342#define V_TX_CHANNEL(x) ((x) << S_TX_CHANNEL)
343#define F_TX_CHANNEL V_TX_CHANNEL(1U)
344
345#define S_TCAM_BYPASS 16
346#define V_TCAM_BYPASS(x) ((x) << S_TCAM_BYPASS)
347#define F_TCAM_BYPASS V_TCAM_BYPASS(1U)
348
349#define S_NAGLE 17
350#define V_NAGLE(x) ((x) << S_NAGLE)
351#define F_NAGLE V_NAGLE(1U)
352
353#define S_WND_SCALE 18
354#define M_WND_SCALE 0xF
355#define V_WND_SCALE(x) ((x) << S_WND_SCALE)
356#define G_WND_SCALE(x) (((x) >> S_WND_SCALE) & M_WND_SCALE)
357
358#define S_KEEP_ALIVE 22
359#define V_KEEP_ALIVE(x) ((x) << S_KEEP_ALIVE)
360#define F_KEEP_ALIVE V_KEEP_ALIVE(1U)
361
362#define S_MAX_RETRANS 23
363#define M_MAX_RETRANS 0xF
364#define V_MAX_RETRANS(x) ((x) << S_MAX_RETRANS)
365#define G_MAX_RETRANS(x) (((x) >> S_MAX_RETRANS) & M_MAX_RETRANS)
366
367#define S_MAX_RETRANS_OVERRIDE 27
368#define V_MAX_RETRANS_OVERRIDE(x) ((x) << S_MAX_RETRANS_OVERRIDE)
369#define F_MAX_RETRANS_OVERRIDE V_MAX_RETRANS_OVERRIDE(1U)
370
371#define S_MSS_IDX 28
372#define M_MSS_IDX 0xF
373#define V_MSS_IDX(x) ((x) << S_MSS_IDX)
374#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
375
376/* option 1 fields */
377#define S_RSS_ENABLE 0
378#define V_RSS_ENABLE(x) ((x) << S_RSS_ENABLE)
379#define F_RSS_ENABLE V_RSS_ENABLE(1U)
380
381#define S_RSS_MASK_LEN 1
382#define M_RSS_MASK_LEN 0x7
383#define V_RSS_MASK_LEN(x) ((x) << S_RSS_MASK_LEN)
384#define G_RSS_MASK_LEN(x) (((x) >> S_RSS_MASK_LEN) & M_RSS_MASK_LEN)
385
386#define S_CPU_IDX 4
387#define M_CPU_IDX 0x3F
388#define V_CPU_IDX(x) ((x) << S_CPU_IDX)
389#define G_CPU_IDX(x) (((x) >> S_CPU_IDX) & M_CPU_IDX)
390
391#define S_MAC_MATCH_VALID 18
392#define V_MAC_MATCH_VALID(x) ((x) << S_MAC_MATCH_VALID)
393#define F_MAC_MATCH_VALID V_MAC_MATCH_VALID(1U)
394
395#define S_CONN_POLICY 19
396#define M_CONN_POLICY 0x3
397#define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
398#define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
399
400#define S_SYN_DEFENSE 21
401#define V_SYN_DEFENSE(x) ((x) << S_SYN_DEFENSE)
402#define F_SYN_DEFENSE V_SYN_DEFENSE(1U)
403
404#define S_VLAN_PRI 22
405#define M_VLAN_PRI 0x3
406#define V_VLAN_PRI(x) ((x) << S_VLAN_PRI)
407#define G_VLAN_PRI(x) (((x) >> S_VLAN_PRI) & M_VLAN_PRI)
408
409#define S_VLAN_PRI_VALID 24
410#define V_VLAN_PRI_VALID(x) ((x) << S_VLAN_PRI_VALID)
411#define F_VLAN_PRI_VALID V_VLAN_PRI_VALID(1U)
412
413#define S_PKT_TYPE 25
414#define M_PKT_TYPE 0x3
415#define V_PKT_TYPE(x) ((x) << S_PKT_TYPE)
416#define G_PKT_TYPE(x) (((x) >> S_PKT_TYPE) & M_PKT_TYPE)
417
418#define S_MAC_MATCH 27
419#define M_MAC_MATCH 0x1F
420#define V_MAC_MATCH(x) ((x) << S_MAC_MATCH)
421#define G_MAC_MATCH(x) (((x) >> S_MAC_MATCH) & M_MAC_MATCH)
422
423/* option 2 fields */
424#define S_CPU_INDEX 0
425#define M_CPU_INDEX 0x7F
426#define V_CPU_INDEX(x) ((x) << S_CPU_INDEX)
427#define G_CPU_INDEX(x) (((x) >> S_CPU_INDEX) & M_CPU_INDEX)
428
429#define S_CPU_INDEX_VALID 7
430#define V_CPU_INDEX_VALID(x) ((x) << S_CPU_INDEX_VALID)
431#define F_CPU_INDEX_VALID V_CPU_INDEX_VALID(1U)
432
433#define S_RX_COALESCE 8
434#define M_RX_COALESCE 0x3
435#define V_RX_COALESCE(x) ((x) << S_RX_COALESCE)
436#define G_RX_COALESCE(x) (((x) >> S_RX_COALESCE) & M_RX_COALESCE)
437
438#define S_RX_COALESCE_VALID 10
439#define V_RX_COALESCE_VALID(x) ((x) << S_RX_COALESCE_VALID)
440#define F_RX_COALESCE_VALID V_RX_COALESCE_VALID(1U)
441
442#define S_CONG_CONTROL_FLAVOR 11
443#define M_CONG_CONTROL_FLAVOR 0x3
444#define V_CONG_CONTROL_FLAVOR(x) ((x) << S_CONG_CONTROL_FLAVOR)
445#define G_CONG_CONTROL_FLAVOR(x) (((x) >> S_CONG_CONTROL_FLAVOR) & M_CONG_CONTROL_FLAVOR)
446
447#define S_PACING_FLAVOR 13
448#define M_PACING_FLAVOR 0x3
449#define V_PACING_FLAVOR(x) ((x) << S_PACING_FLAVOR)
450#define G_PACING_FLAVOR(x) (((x) >> S_PACING_FLAVOR) & M_PACING_FLAVOR)
451
452#define S_FLAVORS_VALID 15
453#define V_FLAVORS_VALID(x) ((x) << S_FLAVORS_VALID)
454#define F_FLAVORS_VALID V_FLAVORS_VALID(1U)
455
456#define S_RX_FC_DISABLE 16
457#define V_RX_FC_DISABLE(x) ((x) << S_RX_FC_DISABLE)
458#define F_RX_FC_DISABLE V_RX_FC_DISABLE(1U)
459
460#define S_RX_FC_VALID 17
461#define V_RX_FC_VALID(x) ((x) << S_RX_FC_VALID)
462#define F_RX_FC_VALID V_RX_FC_VALID(1U)
463
464struct cpl_pass_open_req {
465 WR_HDR;
466 union opcode_tid ot;
467 __be16 local_port;
468 __be16 peer_port;
469 __be32 local_ip;
470 __be32 peer_ip;
471 __be32 opt0h;
472 __be32 opt0l;
473 __be32 peer_netmask;
474 __be32 opt1;
475};
476
477struct cpl_pass_open_rpl {
478 RSS_HDR union opcode_tid ot;
479 __be16 local_port;
480 __be16 peer_port;
481 __be32 local_ip;
482 __be32 peer_ip;
483 __u8 resvd[7];
484 __u8 status;
485};
486
487struct cpl_pass_establish {
488 RSS_HDR union opcode_tid ot;
489 __be16 local_port;
490 __be16 peer_port;
491 __be32 local_ip;
492 __be32 peer_ip;
493 __be32 tos_tid;
494 __be16 l2t_idx;
495 __be16 tcp_opt;
496 __be32 snd_isn;
497 __be32 rcv_isn;
498};
499
500/* cpl_pass_establish.tos_tid fields */
501#define S_PASS_OPEN_TID 0
502#define M_PASS_OPEN_TID 0xFFFFFF
503#define V_PASS_OPEN_TID(x) ((x) << S_PASS_OPEN_TID)
504#define G_PASS_OPEN_TID(x) (((x) >> S_PASS_OPEN_TID) & M_PASS_OPEN_TID)
505
506#define S_PASS_OPEN_TOS 24
507#define M_PASS_OPEN_TOS 0xFF
508#define V_PASS_OPEN_TOS(x) ((x) << S_PASS_OPEN_TOS)
509#define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
510
511/* cpl_pass_establish.l2t_idx fields */
512#define S_L2T_IDX16 5
513#define M_L2T_IDX16 0x7FF
514#define V_L2T_IDX16(x) ((x) << S_L2T_IDX16)
515#define G_L2T_IDX16(x) (((x) >> S_L2T_IDX16) & M_L2T_IDX16)
516
517/* cpl_pass_establish.tcp_opt fields (also applies act_open_establish) */
518#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1)
519#define G_TCPOPT_SACK(x) (((x) >> 6) & 1)
520#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1)
521#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
522#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf)
523
524struct cpl_pass_accept_req {
525 RSS_HDR union opcode_tid ot;
526 __be16 local_port;
527 __be16 peer_port;
528 __be32 local_ip;
529 __be32 peer_ip;
530 __be32 tos_tid;
531 struct tcp_options tcp_options;
532 __u8 dst_mac[6];
533 __be16 vlan_tag;
534 __u8 src_mac[6];
535#if defined(__LITTLE_ENDIAN_BITFIELD)
536 __u8:3;
537 __u8 addr_idx:3;
538 __u8 port_idx:1;
539 __u8 exact_match:1;
540#else
541 __u8 exact_match:1;
542 __u8 port_idx:1;
543 __u8 addr_idx:3;
544 __u8:3;
545#endif
546 __u8 rsvd;
547 __be32 rcv_isn;
548 __be32 rsvd2;
549};
550
551struct cpl_pass_accept_rpl {
552 WR_HDR;
553 union opcode_tid ot;
554 __be32 opt2;
555 __be32 rsvd;
556 __be32 peer_ip;
557 __be32 opt0h;
558 __be32 opt0l_status;
559};
560
561struct cpl_act_open_req {
562 WR_HDR;
563 union opcode_tid ot;
564 __be16 local_port;
565 __be16 peer_port;
566 __be32 local_ip;
567 __be32 peer_ip;
568 __be32 opt0h;
569 __be32 opt0l;
570 __be32 params;
571 __be32 opt2;
572};
573
574/* cpl_act_open_req.params fields */
575#define S_AOPEN_VLAN_PRI 9
576#define M_AOPEN_VLAN_PRI 0x3
577#define V_AOPEN_VLAN_PRI(x) ((x) << S_AOPEN_VLAN_PRI)
578#define G_AOPEN_VLAN_PRI(x) (((x) >> S_AOPEN_VLAN_PRI) & M_AOPEN_VLAN_PRI)
579
580#define S_AOPEN_VLAN_PRI_VALID 11
581#define V_AOPEN_VLAN_PRI_VALID(x) ((x) << S_AOPEN_VLAN_PRI_VALID)
582#define F_AOPEN_VLAN_PRI_VALID V_AOPEN_VLAN_PRI_VALID(1U)
583
584#define S_AOPEN_PKT_TYPE 12
585#define M_AOPEN_PKT_TYPE 0x3
586#define V_AOPEN_PKT_TYPE(x) ((x) << S_AOPEN_PKT_TYPE)
587#define G_AOPEN_PKT_TYPE(x) (((x) >> S_AOPEN_PKT_TYPE) & M_AOPEN_PKT_TYPE)
588
589#define S_AOPEN_MAC_MATCH 14
590#define M_AOPEN_MAC_MATCH 0x1F
591#define V_AOPEN_MAC_MATCH(x) ((x) << S_AOPEN_MAC_MATCH)
592#define G_AOPEN_MAC_MATCH(x) (((x) >> S_AOPEN_MAC_MATCH) & M_AOPEN_MAC_MATCH)
593
594#define S_AOPEN_MAC_MATCH_VALID 19
595#define V_AOPEN_MAC_MATCH_VALID(x) ((x) << S_AOPEN_MAC_MATCH_VALID)
596#define F_AOPEN_MAC_MATCH_VALID V_AOPEN_MAC_MATCH_VALID(1U)
597
598#define S_AOPEN_IFF_VLAN 20
599#define M_AOPEN_IFF_VLAN 0xFFF
600#define V_AOPEN_IFF_VLAN(x) ((x) << S_AOPEN_IFF_VLAN)
601#define G_AOPEN_IFF_VLAN(x) (((x) >> S_AOPEN_IFF_VLAN) & M_AOPEN_IFF_VLAN)
602
603struct cpl_act_open_rpl {
604 RSS_HDR union opcode_tid ot;
605 __be16 local_port;
606 __be16 peer_port;
607 __be32 local_ip;
608 __be32 peer_ip;
609 __be32 atid;
610 __u8 rsvd[3];
611 __u8 status;
612};
613
614struct cpl_act_establish {
615 RSS_HDR union opcode_tid ot;
616 __be16 local_port;
617 __be16 peer_port;
618 __be32 local_ip;
619 __be32 peer_ip;
620 __be32 tos_tid;
621 __be16 l2t_idx;
622 __be16 tcp_opt;
623 __be32 snd_isn;
624 __be32 rcv_isn;
625};
626
627struct cpl_get_tcb {
628 WR_HDR;
629 union opcode_tid ot;
630 __be16 cpuno;
631 __be16 rsvd;
632};
633
634struct cpl_get_tcb_rpl {
635 RSS_HDR union opcode_tid ot;
636 __u8 rsvd;
637 __u8 status;
638 __be16 len;
639};
640
641struct cpl_set_tcb {
642 WR_HDR;
643 union opcode_tid ot;
644 __u8 reply;
645 __u8 cpu_idx;
646 __be16 len;
647};
648
649/* cpl_set_tcb.reply fields */
650#define S_NO_REPLY 7
651#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
652#define F_NO_REPLY V_NO_REPLY(1U)
653
654struct cpl_set_tcb_field {
655 WR_HDR;
656 union opcode_tid ot;
657 __u8 reply;
658 __u8 cpu_idx;
659 __be16 word;
660 __be64 mask;
661 __be64 val;
662};
663
664struct cpl_set_tcb_rpl {
665 RSS_HDR union opcode_tid ot;
666 __u8 rsvd[3];
667 __u8 status;
668};
669
670struct cpl_pcmd {
671 WR_HDR;
672 union opcode_tid ot;
673 __u8 rsvd[3];
674#if defined(__LITTLE_ENDIAN_BITFIELD)
675 __u8 src:1;
676 __u8 bundle:1;
677 __u8 channel:1;
678 __u8:5;
679#else
680 __u8:5;
681 __u8 channel:1;
682 __u8 bundle:1;
683 __u8 src:1;
684#endif
685 __be32 pcmd_parm[2];
686};
687
688struct cpl_pcmd_reply {
689 RSS_HDR union opcode_tid ot;
690 __u8 status;
691 __u8 rsvd;
692 __be16 len;
693};
694
695struct cpl_close_con_req {
696 WR_HDR;
697 union opcode_tid ot;
698 __be32 rsvd;
699};
700
701struct cpl_close_con_rpl {
702 RSS_HDR union opcode_tid ot;
703 __u8 rsvd[3];
704 __u8 status;
705 __be32 snd_nxt;
706 __be32 rcv_nxt;
707};
708
709struct cpl_close_listserv_req {
710 WR_HDR;
711 union opcode_tid ot;
712 __u8 rsvd0;
713 __u8 cpu_idx;
714 __be16 rsvd1;
715};
716
717struct cpl_close_listserv_rpl {
718 RSS_HDR union opcode_tid ot;
719 __u8 rsvd[3];
720 __u8 status;
721};
722
723struct cpl_abort_req_rss {
724 RSS_HDR union opcode_tid ot;
725 __be32 rsvd0;
726 __u8 rsvd1;
727 __u8 status;
728 __u8 rsvd2[6];
729};
730
731struct cpl_abort_req {
732 WR_HDR;
733 union opcode_tid ot;
734 __be32 rsvd0;
735 __u8 rsvd1;
736 __u8 cmd;
737 __u8 rsvd2[6];
738};
739
740struct cpl_abort_rpl_rss {
741 RSS_HDR union opcode_tid ot;
742 __be32 rsvd0;
743 __u8 rsvd1;
744 __u8 status;
745 __u8 rsvd2[6];
746};
747
748struct cpl_abort_rpl {
749 WR_HDR;
750 union opcode_tid ot;
751 __be32 rsvd0;
752 __u8 rsvd1;
753 __u8 cmd;
754 __u8 rsvd2[6];
755};
756
757struct cpl_peer_close {
758 RSS_HDR union opcode_tid ot;
759 __be32 rcv_nxt;
760};
761
762struct tx_data_wr {
763 __be32 wr_hi;
764 __be32 wr_lo;
765 __be32 len;
766 __be32 flags;
767 __be32 sndseq;
768 __be32 param;
769};
770
771/* tx_data_wr.param fields */
772#define S_TX_PORT 0
773#define M_TX_PORT 0x7
774#define V_TX_PORT(x) ((x) << S_TX_PORT)
775#define G_TX_PORT(x) (((x) >> S_TX_PORT) & M_TX_PORT)
776
777#define S_TX_MSS 4
778#define M_TX_MSS 0xF
779#define V_TX_MSS(x) ((x) << S_TX_MSS)
780#define G_TX_MSS(x) (((x) >> S_TX_MSS) & M_TX_MSS)
781
782#define S_TX_QOS 8
783#define M_TX_QOS 0xFF
784#define V_TX_QOS(x) ((x) << S_TX_QOS)
785#define G_TX_QOS(x) (((x) >> S_TX_QOS) & M_TX_QOS)
786
787#define S_TX_SNDBUF 16
788#define M_TX_SNDBUF 0xFFFF
789#define V_TX_SNDBUF(x) ((x) << S_TX_SNDBUF)
790#define G_TX_SNDBUF(x) (((x) >> S_TX_SNDBUF) & M_TX_SNDBUF)
791
792struct cpl_tx_data {
793 union opcode_tid ot;
794 __be32 len;
795 __be32 rsvd;
796 __be16 urg;
797 __be16 flags;
798};
799
800/* cpl_tx_data.flags fields */
801#define S_TX_ULP_SUBMODE 6
802#define M_TX_ULP_SUBMODE 0xF
803#define V_TX_ULP_SUBMODE(x) ((x) << S_TX_ULP_SUBMODE)
804#define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
805
806#define S_TX_ULP_MODE 10
807#define M_TX_ULP_MODE 0xF
808#define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
809#define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
810
811#define S_TX_SHOVE 14
812#define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
813#define F_TX_SHOVE V_TX_SHOVE(1U)
814
815#define S_TX_MORE 15
816#define V_TX_MORE(x) ((x) << S_TX_MORE)
817#define F_TX_MORE V_TX_MORE(1U)
818
819/* additional tx_data_wr.flags fields */
820#define S_TX_CPU_IDX 0
821#define M_TX_CPU_IDX 0x3F
822#define V_TX_CPU_IDX(x) ((x) << S_TX_CPU_IDX)
823#define G_TX_CPU_IDX(x) (((x) >> S_TX_CPU_IDX) & M_TX_CPU_IDX)
824
825#define S_TX_URG 16
826#define V_TX_URG(x) ((x) << S_TX_URG)
827#define F_TX_URG V_TX_URG(1U)
828
829#define S_TX_CLOSE 17
830#define V_TX_CLOSE(x) ((x) << S_TX_CLOSE)
831#define F_TX_CLOSE V_TX_CLOSE(1U)
832
833#define S_TX_INIT 18
834#define V_TX_INIT(x) ((x) << S_TX_INIT)
835#define F_TX_INIT V_TX_INIT(1U)
836
837#define S_TX_IMM_ACK 19
838#define V_TX_IMM_ACK(x) ((x) << S_TX_IMM_ACK)
839#define F_TX_IMM_ACK V_TX_IMM_ACK(1U)
840
841#define S_TX_IMM_DMA 20
842#define V_TX_IMM_DMA(x) ((x) << S_TX_IMM_DMA)
843#define F_TX_IMM_DMA V_TX_IMM_DMA(1U)
844
845struct cpl_tx_data_ack {
846 RSS_HDR union opcode_tid ot;
847 __be32 ack_seq;
848};
849
850struct cpl_wr_ack {
851 RSS_HDR union opcode_tid ot;
852 __be16 credits;
853 __be16 rsvd;
854 __be32 snd_nxt;
855 __be32 snd_una;
856};
857
858struct cpl_rdma_ec_status {
859 RSS_HDR union opcode_tid ot;
860 __u8 rsvd[3];
861 __u8 status;
862};
863
864struct mngt_pktsched_wr {
865 __be32 wr_hi;
866 __be32 wr_lo;
867 __u8 mngt_opcode;
868 __u8 rsvd[7];
869 __u8 sched;
870 __u8 idx;
871 __u8 min;
872 __u8 max;
873 __u8 binding;
874 __u8 rsvd1[3];
875};
876
877struct cpl_iscsi_hdr {
878 RSS_HDR union opcode_tid ot;
879 __be16 pdu_len_ddp;
880 __be16 len;
881 __be32 seq;
882 __be16 urg;
883 __u8 rsvd;
884 __u8 status;
885};
886
887/* cpl_iscsi_hdr.pdu_len_ddp fields */
888#define S_ISCSI_PDU_LEN 0
889#define M_ISCSI_PDU_LEN 0x7FFF
890#define V_ISCSI_PDU_LEN(x) ((x) << S_ISCSI_PDU_LEN)
891#define G_ISCSI_PDU_LEN(x) (((x) >> S_ISCSI_PDU_LEN) & M_ISCSI_PDU_LEN)
892
893#define S_ISCSI_DDP 15
894#define V_ISCSI_DDP(x) ((x) << S_ISCSI_DDP)
895#define F_ISCSI_DDP V_ISCSI_DDP(1U)
896
897struct cpl_rx_data {
898 RSS_HDR union opcode_tid ot;
899 __be16 rsvd;
900 __be16 len;
901 __be32 seq;
902 __be16 urg;
903#if defined(__LITTLE_ENDIAN_BITFIELD)
904 __u8 dack_mode:2;
905 __u8 psh:1;
906 __u8 heartbeat:1;
907 __u8:4;
908#else
909 __u8:4;
910 __u8 heartbeat:1;
911 __u8 psh:1;
912 __u8 dack_mode:2;
913#endif
914 __u8 status;
915};
916
917struct cpl_rx_data_ack {
918 WR_HDR;
919 union opcode_tid ot;
920 __be32 credit_dack;
921};
922
923/* cpl_rx_data_ack.ack_seq fields */
924#define S_RX_CREDITS 0
925#define M_RX_CREDITS 0x7FFFFFF
926#define V_RX_CREDITS(x) ((x) << S_RX_CREDITS)
927#define G_RX_CREDITS(x) (((x) >> S_RX_CREDITS) & M_RX_CREDITS)
928
929#define S_RX_MODULATE 27
930#define V_RX_MODULATE(x) ((x) << S_RX_MODULATE)
931#define F_RX_MODULATE V_RX_MODULATE(1U)
932
933#define S_RX_FORCE_ACK 28
934#define V_RX_FORCE_ACK(x) ((x) << S_RX_FORCE_ACK)
935#define F_RX_FORCE_ACK V_RX_FORCE_ACK(1U)
936
937#define S_RX_DACK_MODE 29
938#define M_RX_DACK_MODE 0x3
939#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
940#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
941
942#define S_RX_DACK_CHANGE 31
943#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
944#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
945
946struct cpl_rx_urg_notify {
947 RSS_HDR union opcode_tid ot;
948 __be32 seq;
949};
950
951struct cpl_rx_ddp_complete {
952 RSS_HDR union opcode_tid ot;
953 __be32 ddp_report;
954};
955
956struct cpl_rx_data_ddp {
957 RSS_HDR union opcode_tid ot;
958 __be16 urg;
959 __be16 len;
960 __be32 seq;
961 union {
962 __be32 nxt_seq;
963 __be32 ddp_report;
964 };
965 __be32 ulp_crc;
966 __be32 ddpvld_status;
967};
968
969/* cpl_rx_data_ddp.ddpvld_status fields */
970#define S_DDP_STATUS 0
971#define M_DDP_STATUS 0xFF
972#define V_DDP_STATUS(x) ((x) << S_DDP_STATUS)
973#define G_DDP_STATUS(x) (((x) >> S_DDP_STATUS) & M_DDP_STATUS)
974
975#define S_DDP_VALID 15
976#define M_DDP_VALID 0x1FFFF
977#define V_DDP_VALID(x) ((x) << S_DDP_VALID)
978#define G_DDP_VALID(x) (((x) >> S_DDP_VALID) & M_DDP_VALID)
979
980#define S_DDP_PPOD_MISMATCH 15
981#define V_DDP_PPOD_MISMATCH(x) ((x) << S_DDP_PPOD_MISMATCH)
982#define F_DDP_PPOD_MISMATCH V_DDP_PPOD_MISMATCH(1U)
983
984#define S_DDP_PDU 16
985#define V_DDP_PDU(x) ((x) << S_DDP_PDU)
986#define F_DDP_PDU V_DDP_PDU(1U)
987
988#define S_DDP_LLIMIT_ERR 17
989#define V_DDP_LLIMIT_ERR(x) ((x) << S_DDP_LLIMIT_ERR)
990#define F_DDP_LLIMIT_ERR V_DDP_LLIMIT_ERR(1U)
991
992#define S_DDP_PPOD_PARITY_ERR 18
993#define V_DDP_PPOD_PARITY_ERR(x) ((x) << S_DDP_PPOD_PARITY_ERR)
994#define F_DDP_PPOD_PARITY_ERR V_DDP_PPOD_PARITY_ERR(1U)
995
996#define S_DDP_PADDING_ERR 19
997#define V_DDP_PADDING_ERR(x) ((x) << S_DDP_PADDING_ERR)
998#define F_DDP_PADDING_ERR V_DDP_PADDING_ERR(1U)
999
1000#define S_DDP_HDRCRC_ERR 20
1001#define V_DDP_HDRCRC_ERR(x) ((x) << S_DDP_HDRCRC_ERR)
1002#define F_DDP_HDRCRC_ERR V_DDP_HDRCRC_ERR(1U)
1003
1004#define S_DDP_DATACRC_ERR 21
1005#define V_DDP_DATACRC_ERR(x) ((x) << S_DDP_DATACRC_ERR)
1006#define F_DDP_DATACRC_ERR V_DDP_DATACRC_ERR(1U)
1007
1008#define S_DDP_INVALID_TAG 22
1009#define V_DDP_INVALID_TAG(x) ((x) << S_DDP_INVALID_TAG)
1010#define F_DDP_INVALID_TAG V_DDP_INVALID_TAG(1U)
1011
1012#define S_DDP_ULIMIT_ERR 23
1013#define V_DDP_ULIMIT_ERR(x) ((x) << S_DDP_ULIMIT_ERR)
1014#define F_DDP_ULIMIT_ERR V_DDP_ULIMIT_ERR(1U)
1015
1016#define S_DDP_OFFSET_ERR 24
1017#define V_DDP_OFFSET_ERR(x) ((x) << S_DDP_OFFSET_ERR)
1018#define F_DDP_OFFSET_ERR V_DDP_OFFSET_ERR(1U)
1019
1020#define S_DDP_COLOR_ERR 25
1021#define V_DDP_COLOR_ERR(x) ((x) << S_DDP_COLOR_ERR)
1022#define F_DDP_COLOR_ERR V_DDP_COLOR_ERR(1U)
1023
1024#define S_DDP_TID_MISMATCH 26
1025#define V_DDP_TID_MISMATCH(x) ((x) << S_DDP_TID_MISMATCH)
1026#define F_DDP_TID_MISMATCH V_DDP_TID_MISMATCH(1U)
1027
1028#define S_DDP_INVALID_PPOD 27
1029#define V_DDP_INVALID_PPOD(x) ((x) << S_DDP_INVALID_PPOD)
1030#define F_DDP_INVALID_PPOD V_DDP_INVALID_PPOD(1U)
1031
1032#define S_DDP_ULP_MODE 28
1033#define M_DDP_ULP_MODE 0xF
1034#define V_DDP_ULP_MODE(x) ((x) << S_DDP_ULP_MODE)
1035#define G_DDP_ULP_MODE(x) (((x) >> S_DDP_ULP_MODE) & M_DDP_ULP_MODE)
1036
1037/* cpl_rx_data_ddp.ddp_report fields */
1038#define S_DDP_OFFSET 0
1039#define M_DDP_OFFSET 0x3FFFFF
1040#define V_DDP_OFFSET(x) ((x) << S_DDP_OFFSET)
1041#define G_DDP_OFFSET(x) (((x) >> S_DDP_OFFSET) & M_DDP_OFFSET)
1042
1043#define S_DDP_URG 24
1044#define V_DDP_URG(x) ((x) << S_DDP_URG)
1045#define F_DDP_URG V_DDP_URG(1U)
1046
1047#define S_DDP_PSH 25
1048#define V_DDP_PSH(x) ((x) << S_DDP_PSH)
1049#define F_DDP_PSH V_DDP_PSH(1U)
1050
1051#define S_DDP_BUF_COMPLETE 26
1052#define V_DDP_BUF_COMPLETE(x) ((x) << S_DDP_BUF_COMPLETE)
1053#define F_DDP_BUF_COMPLETE V_DDP_BUF_COMPLETE(1U)
1054
1055#define S_DDP_BUF_TIMED_OUT 27
1056#define V_DDP_BUF_TIMED_OUT(x) ((x) << S_DDP_BUF_TIMED_OUT)
1057#define F_DDP_BUF_TIMED_OUT V_DDP_BUF_TIMED_OUT(1U)
1058
1059#define S_DDP_BUF_IDX 28
1060#define V_DDP_BUF_IDX(x) ((x) << S_DDP_BUF_IDX)
1061#define F_DDP_BUF_IDX V_DDP_BUF_IDX(1U)
1062
1063struct cpl_tx_pkt {
1064 WR_HDR;
1065 __be32 cntrl;
1066 __be32 len;
1067};
1068
1069struct cpl_tx_pkt_lso {
1070 WR_HDR;
1071 __be32 cntrl;
1072 __be32 len;
1073
1074 __be32 rsvd;
1075 __be32 lso_info;
1076};
1077
1078/* cpl_tx_pkt*.cntrl fields */
1079#define S_TXPKT_VLAN 0
1080#define M_TXPKT_VLAN 0xFFFF
1081#define V_TXPKT_VLAN(x) ((x) << S_TXPKT_VLAN)
1082#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
1083
1084#define S_TXPKT_INTF 16
1085#define M_TXPKT_INTF 0xF
1086#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
1087#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
1088
1089#define S_TXPKT_IPCSUM_DIS 20
1090#define V_TXPKT_IPCSUM_DIS(x) ((x) << S_TXPKT_IPCSUM_DIS)
1091#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1U)
1092
1093#define S_TXPKT_L4CSUM_DIS 21
1094#define V_TXPKT_L4CSUM_DIS(x) ((x) << S_TXPKT_L4CSUM_DIS)
1095#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1U)
1096
1097#define S_TXPKT_VLAN_VLD 22
1098#define V_TXPKT_VLAN_VLD(x) ((x) << S_TXPKT_VLAN_VLD)
1099#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1U)
1100
1101#define S_TXPKT_LOOPBACK 23
1102#define V_TXPKT_LOOPBACK(x) ((x) << S_TXPKT_LOOPBACK)
1103#define F_TXPKT_LOOPBACK V_TXPKT_LOOPBACK(1U)
1104
1105#define S_TXPKT_OPCODE 24
1106#define M_TXPKT_OPCODE 0xFF
1107#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
1108#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
1109
1110/* cpl_tx_pkt_lso.lso_info fields */
1111#define S_LSO_MSS 0
1112#define M_LSO_MSS 0x3FFF
1113#define V_LSO_MSS(x) ((x) << S_LSO_MSS)
1114#define G_LSO_MSS(x) (((x) >> S_LSO_MSS) & M_LSO_MSS)
1115
1116#define S_LSO_ETH_TYPE 14
1117#define M_LSO_ETH_TYPE 0x3
1118#define V_LSO_ETH_TYPE(x) ((x) << S_LSO_ETH_TYPE)
1119#define G_LSO_ETH_TYPE(x) (((x) >> S_LSO_ETH_TYPE) & M_LSO_ETH_TYPE)
1120
1121#define S_LSO_TCPHDR_WORDS 16
1122#define M_LSO_TCPHDR_WORDS 0xF
1123#define V_LSO_TCPHDR_WORDS(x) ((x) << S_LSO_TCPHDR_WORDS)
1124#define G_LSO_TCPHDR_WORDS(x) (((x) >> S_LSO_TCPHDR_WORDS) & M_LSO_TCPHDR_WORDS)
1125
1126#define S_LSO_IPHDR_WORDS 20
1127#define M_LSO_IPHDR_WORDS 0xF
1128#define V_LSO_IPHDR_WORDS(x) ((x) << S_LSO_IPHDR_WORDS)
1129#define G_LSO_IPHDR_WORDS(x) (((x) >> S_LSO_IPHDR_WORDS) & M_LSO_IPHDR_WORDS)
1130
1131#define S_LSO_IPV6 24
1132#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
1133#define F_LSO_IPV6 V_LSO_IPV6(1U)
1134
1135struct cpl_trace_pkt {
1136#ifdef CHELSIO_FW
1137 __u8 rss_opcode;
1138#if defined(__LITTLE_ENDIAN_BITFIELD)
1139 __u8 err:1;
1140 __u8:7;
1141#else
1142 __u8:7;
1143 __u8 err:1;
1144#endif
1145 __u8 rsvd0;
1146#if defined(__LITTLE_ENDIAN_BITFIELD)
1147 __u8 qid:4;
1148 __u8:4;
1149#else
1150 __u8:4;
1151 __u8 qid:4;
1152#endif
1153 __be32 tstamp;
1154#endif /* CHELSIO_FW */
1155
1156 __u8 opcode;
1157#if defined(__LITTLE_ENDIAN_BITFIELD)
1158 __u8 iff:4;
1159 __u8:4;
1160#else
1161 __u8:4;
1162 __u8 iff:4;
1163#endif
1164 __u8 rsvd[4];
1165 __be16 len;
1166};
1167
1168struct cpl_rx_pkt {
1169 RSS_HDR __u8 opcode;
1170#if defined(__LITTLE_ENDIAN_BITFIELD)
1171 __u8 iff:4;
1172 __u8 csum_valid:1;
1173 __u8 ipmi_pkt:1;
1174 __u8 vlan_valid:1;
1175 __u8 fragment:1;
1176#else
1177 __u8 fragment:1;
1178 __u8 vlan_valid:1;
1179 __u8 ipmi_pkt:1;
1180 __u8 csum_valid:1;
1181 __u8 iff:4;
1182#endif
1183 __be16 csum;
1184 __be16 vlan;
1185 __be16 len;
1186};
1187
1188struct cpl_l2t_write_req {
1189 WR_HDR;
1190 union opcode_tid ot;
1191 __be32 params;
1192 __u8 rsvd[2];
1193 __u8 dst_mac[6];
1194};
1195
1196/* cpl_l2t_write_req.params fields */
1197#define S_L2T_W_IDX 0
1198#define M_L2T_W_IDX 0x7FF
1199#define V_L2T_W_IDX(x) ((x) << S_L2T_W_IDX)
1200#define G_L2T_W_IDX(x) (((x) >> S_L2T_W_IDX) & M_L2T_W_IDX)
1201
1202#define S_L2T_W_VLAN 11
1203#define M_L2T_W_VLAN 0xFFF
1204#define V_L2T_W_VLAN(x) ((x) << S_L2T_W_VLAN)
1205#define G_L2T_W_VLAN(x) (((x) >> S_L2T_W_VLAN) & M_L2T_W_VLAN)
1206
1207#define S_L2T_W_IFF 23
1208#define M_L2T_W_IFF 0xF
1209#define V_L2T_W_IFF(x) ((x) << S_L2T_W_IFF)
1210#define G_L2T_W_IFF(x) (((x) >> S_L2T_W_IFF) & M_L2T_W_IFF)
1211
1212#define S_L2T_W_PRIO 27
1213#define M_L2T_W_PRIO 0x7
1214#define V_L2T_W_PRIO(x) ((x) << S_L2T_W_PRIO)
1215#define G_L2T_W_PRIO(x) (((x) >> S_L2T_W_PRIO) & M_L2T_W_PRIO)
1216
1217struct cpl_l2t_write_rpl {
1218 RSS_HDR union opcode_tid ot;
1219 __u8 status;
1220 __u8 rsvd[3];
1221};
1222
1223struct cpl_l2t_read_req {
1224 WR_HDR;
1225 union opcode_tid ot;
1226 __be16 rsvd;
1227 __be16 l2t_idx;
1228};
1229
1230struct cpl_l2t_read_rpl {
1231 RSS_HDR union opcode_tid ot;
1232 __be32 params;
1233 __u8 rsvd[2];
1234 __u8 dst_mac[6];
1235};
1236
1237/* cpl_l2t_read_rpl.params fields */
1238#define S_L2T_R_PRIO 0
1239#define M_L2T_R_PRIO 0x7
1240#define V_L2T_R_PRIO(x) ((x) << S_L2T_R_PRIO)
1241#define G_L2T_R_PRIO(x) (((x) >> S_L2T_R_PRIO) & M_L2T_R_PRIO)
1242
1243#define S_L2T_R_VLAN 8
1244#define M_L2T_R_VLAN 0xFFF
1245#define V_L2T_R_VLAN(x) ((x) << S_L2T_R_VLAN)
1246#define G_L2T_R_VLAN(x) (((x) >> S_L2T_R_VLAN) & M_L2T_R_VLAN)
1247
1248#define S_L2T_R_IFF 20
1249#define M_L2T_R_IFF 0xF
1250#define V_L2T_R_IFF(x) ((x) << S_L2T_R_IFF)
1251#define G_L2T_R_IFF(x) (((x) >> S_L2T_R_IFF) & M_L2T_R_IFF)
1252
1253#define S_L2T_STATUS 24
1254#define M_L2T_STATUS 0xFF
1255#define V_L2T_STATUS(x) ((x) << S_L2T_STATUS)
1256#define G_L2T_STATUS(x) (((x) >> S_L2T_STATUS) & M_L2T_STATUS)
1257
1258struct cpl_smt_write_req {
1259 WR_HDR;
1260 union opcode_tid ot;
1261 __u8 rsvd0;
1262#if defined(__LITTLE_ENDIAN_BITFIELD)
1263 __u8 mtu_idx:4;
1264 __u8 iff:4;
1265#else
1266 __u8 iff:4;
1267 __u8 mtu_idx:4;
1268#endif
1269 __be16 rsvd2;
1270 __be16 rsvd3;
1271 __u8 src_mac1[6];
1272 __be16 rsvd4;
1273 __u8 src_mac0[6];
1274};
1275
1276struct cpl_smt_write_rpl {
1277 RSS_HDR union opcode_tid ot;
1278 __u8 status;
1279 __u8 rsvd[3];
1280};
1281
1282struct cpl_smt_read_req {
1283 WR_HDR;
1284 union opcode_tid ot;
1285 __u8 rsvd0;
1286#if defined(__LITTLE_ENDIAN_BITFIELD)
1287 __u8:4;
1288 __u8 iff:4;
1289#else
1290 __u8 iff:4;
1291 __u8:4;
1292#endif
1293 __be16 rsvd2;
1294};
1295
1296struct cpl_smt_read_rpl {
1297 RSS_HDR union opcode_tid ot;
1298 __u8 status;
1299#if defined(__LITTLE_ENDIAN_BITFIELD)
1300 __u8 mtu_idx:4;
1301 __u8:4;
1302#else
1303 __u8:4;
1304 __u8 mtu_idx:4;
1305#endif
1306 __be16 rsvd2;
1307 __be16 rsvd3;
1308 __u8 src_mac1[6];
1309 __be16 rsvd4;
1310 __u8 src_mac0[6];
1311};
1312
1313struct cpl_rte_delete_req {
1314 WR_HDR;
1315 union opcode_tid ot;
1316 __be32 params;
1317};
1318
1319/* { cpl_rte_delete_req, cpl_rte_read_req }.params fields */
1320#define S_RTE_REQ_LUT_IX 8
1321#define M_RTE_REQ_LUT_IX 0x7FF
1322#define V_RTE_REQ_LUT_IX(x) ((x) << S_RTE_REQ_LUT_IX)
1323#define G_RTE_REQ_LUT_IX(x) (((x) >> S_RTE_REQ_LUT_IX) & M_RTE_REQ_LUT_IX)
1324
1325#define S_RTE_REQ_LUT_BASE 19
1326#define M_RTE_REQ_LUT_BASE 0x7FF
1327#define V_RTE_REQ_LUT_BASE(x) ((x) << S_RTE_REQ_LUT_BASE)
1328#define G_RTE_REQ_LUT_BASE(x) (((x) >> S_RTE_REQ_LUT_BASE) & M_RTE_REQ_LUT_BASE)
1329
1330#define S_RTE_READ_REQ_SELECT 31
1331#define V_RTE_READ_REQ_SELECT(x) ((x) << S_RTE_READ_REQ_SELECT)
1332#define F_RTE_READ_REQ_SELECT V_RTE_READ_REQ_SELECT(1U)
1333
1334struct cpl_rte_delete_rpl {
1335 RSS_HDR union opcode_tid ot;
1336 __u8 status;
1337 __u8 rsvd[3];
1338};
1339
1340struct cpl_rte_write_req {
1341 WR_HDR;
1342 union opcode_tid ot;
1343#if defined(__LITTLE_ENDIAN_BITFIELD)
1344 __u8:6;
1345 __u8 write_tcam:1;
1346 __u8 write_l2t_lut:1;
1347#else
1348 __u8 write_l2t_lut:1;
1349 __u8 write_tcam:1;
1350 __u8:6;
1351#endif
1352 __u8 rsvd[3];
1353 __be32 lut_params;
1354 __be16 rsvd2;
1355 __be16 l2t_idx;
1356 __be32 netmask;
1357 __be32 faddr;
1358};
1359
1360/* cpl_rte_write_req.lut_params fields */
1361#define S_RTE_WRITE_REQ_LUT_IX 10
1362#define M_RTE_WRITE_REQ_LUT_IX 0x7FF
1363#define V_RTE_WRITE_REQ_LUT_IX(x) ((x) << S_RTE_WRITE_REQ_LUT_IX)
1364#define G_RTE_WRITE_REQ_LUT_IX(x) (((x) >> S_RTE_WRITE_REQ_LUT_IX) & M_RTE_WRITE_REQ_LUT_IX)
1365
1366#define S_RTE_WRITE_REQ_LUT_BASE 21
1367#define M_RTE_WRITE_REQ_LUT_BASE 0x7FF
1368#define V_RTE_WRITE_REQ_LUT_BASE(x) ((x) << S_RTE_WRITE_REQ_LUT_BASE)
1369#define G_RTE_WRITE_REQ_LUT_BASE(x) (((x) >> S_RTE_WRITE_REQ_LUT_BASE) & M_RTE_WRITE_REQ_LUT_BASE)
1370
1371struct cpl_rte_write_rpl {
1372 RSS_HDR union opcode_tid ot;
1373 __u8 status;
1374 __u8 rsvd[3];
1375};
1376
1377struct cpl_rte_read_req {
1378 WR_HDR;
1379 union opcode_tid ot;
1380 __be32 params;
1381};
1382
1383struct cpl_rte_read_rpl {
1384 RSS_HDR union opcode_tid ot;
1385 __u8 status;
1386 __u8 rsvd0;
1387 __be16 l2t_idx;
1388#if defined(__LITTLE_ENDIAN_BITFIELD)
1389 __u8:7;
1390 __u8 select:1;
1391#else
1392 __u8 select:1;
1393 __u8:7;
1394#endif
1395 __u8 rsvd2[3];
1396 __be32 addr;
1397};
1398
1399struct cpl_tid_release {
1400 WR_HDR;
1401 union opcode_tid ot;
1402 __be32 rsvd;
1403};
1404
1405struct cpl_barrier {
1406 WR_HDR;
1407 __u8 opcode;
1408 __u8 rsvd[7];
1409};
1410
1411struct cpl_rdma_read_req {
1412 __u8 opcode;
1413 __u8 rsvd[15];
1414};
1415
1416struct cpl_rdma_terminate {
1417#ifdef CHELSIO_FW
1418 __u8 opcode;
1419 __u8 rsvd[2];
1420#if defined(__LITTLE_ENDIAN_BITFIELD)
1421 __u8 rspq:3;
1422 __u8:5;
1423#else
1424 __u8:5;
1425 __u8 rspq:3;
1426#endif
1427 __be32 tid_len;
1428#endif
1429 __be32 msn;
1430 __be32 mo;
1431 __u8 data[0];
1432};
1433
1434/* cpl_rdma_terminate.tid_len fields */
1435#define S_FLIT_CNT 0
1436#define M_FLIT_CNT 0xFF
1437#define V_FLIT_CNT(x) ((x) << S_FLIT_CNT)
1438#define G_FLIT_CNT(x) (((x) >> S_FLIT_CNT) & M_FLIT_CNT)
1439
1440#define S_TERM_TID 8
1441#define M_TERM_TID 0xFFFFF
1442#define V_TERM_TID(x) ((x) << S_TERM_TID)
1443#define G_TERM_TID(x) (((x) >> S_TERM_TID) & M_TERM_TID)
1444#endif /* T3_CPL_H */
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
new file mode 100644
index 000000000000..365a7f5b1f94
--- /dev/null
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -0,0 +1,3375 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
685};
686
687/**
688 * sf1_read - read data from the serial flash
689 * @adapter: the adapter
690 * @byte_cnt: number of bytes to read
691 * @cont: whether another operation will be chained
692 * @valp: where to store the read data
693 *
694 * Reads up to 4 bytes of data from the serial flash. The location of
695 * the read needs to be specified prior to calling this by issuing the
696 * appropriate commands to the serial flash.
697 */
698static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
699 u32 *valp)
700{
701 int ret;
702
703 if (!byte_cnt || byte_cnt > 4)
704 return -EINVAL;
705 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
706 return -EBUSY;
707 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
708 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
709 if (!ret)
710 *valp = t3_read_reg(adapter, A_SF_DATA);
711 return ret;
712}
713
714/**
715 * sf1_write - write data to the serial flash
716 * @adapter: the adapter
717 * @byte_cnt: number of bytes to write
718 * @cont: whether another operation will be chained
719 * @val: value to write
720 *
721 * Writes up to 4 bytes of data to the serial flash. The location of
722 * the write needs to be specified prior to calling this by issuing the
723 * appropriate commands to the serial flash.
724 */
725static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
726 u32 val)
727{
728 if (!byte_cnt || byte_cnt > 4)
729 return -EINVAL;
730 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
731 return -EBUSY;
732 t3_write_reg(adapter, A_SF_DATA, val);
733 t3_write_reg(adapter, A_SF_OP,
734 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
735 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
736}
737
738/**
739 * flash_wait_op - wait for a flash operation to complete
740 * @adapter: the adapter
741 * @attempts: max number of polls of the status register
742 * @delay: delay between polls in ms
743 *
744 * Wait for a flash operation to complete by polling the status register.
745 */
746static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
747{
748 int ret;
749 u32 status;
750
751 while (1) {
752 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
753 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
754 return ret;
755 if (!(status & 1))
756 return 0;
757 if (--attempts == 0)
758 return -EAGAIN;
759 if (delay)
760 msleep(delay);
761 }
762}
763
764/**
765 * t3_read_flash - read words from serial flash
766 * @adapter: the adapter
767 * @addr: the start address for the read
768 * @nwords: how many 32-bit words to read
769 * @data: where to store the read data
770 * @byte_oriented: whether to store data as bytes or as words
771 *
772 * Read the specified number of 32-bit words from the serial flash.
773 * If @byte_oriented is set the read data is stored as a byte array
774 * (i.e., big-endian), otherwise as 32-bit words in the platform's
775 * natural endianess.
776 */
777int t3_read_flash(struct adapter *adapter, unsigned int addr,
778 unsigned int nwords, u32 *data, int byte_oriented)
779{
780 int ret;
781
782 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
783 return -EINVAL;
784
785 addr = swab32(addr) | SF_RD_DATA_FAST;
786
787 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
788 (ret = sf1_read(adapter, 1, 1, data)) != 0)
789 return ret;
790
791 for (; nwords; nwords--, data++) {
792 ret = sf1_read(adapter, 4, nwords > 1, data);
793 if (ret)
794 return ret;
795 if (byte_oriented)
796 *data = htonl(*data);
797 }
798 return 0;
799}
800
801/**
802 * t3_write_flash - write up to a page of data to the serial flash
803 * @adapter: the adapter
804 * @addr: the start address to write
805 * @n: length of data to write
806 * @data: the data to write
807 *
808 * Writes up to a page of data (256 bytes) to the serial flash starting
809 * at the given address.
810 */
811static int t3_write_flash(struct adapter *adapter, unsigned int addr,
812 unsigned int n, const u8 *data)
813{
814 int ret;
815 u32 buf[64];
816 unsigned int i, c, left, val, offset = addr & 0xff;
817
818 if (addr + n > SF_SIZE || offset + n > 256)
819 return -EINVAL;
820
821 val = swab32(addr) | SF_PROG_PAGE;
822
823 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
824 (ret = sf1_write(adapter, 4, 1, val)) != 0)
825 return ret;
826
827 for (left = n; left; left -= c) {
828 c = min(left, 4U);
829 for (val = 0, i = 0; i < c; ++i)
830 val = (val << 8) + *data++;
831
832 ret = sf1_write(adapter, c, c != left, val);
833 if (ret)
834 return ret;
835 }
836 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
837 return ret;
838
839 /* Read the page to verify the write succeeded */
840 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
841 if (ret)
842 return ret;
843
844 if (memcmp(data - n, (u8 *) buf + offset, n))
845 return -EIO;
846 return 0;
847}
848
849enum fw_version_type {
850 FW_VERSION_N3,
851 FW_VERSION_T3
852};
853
854/**
855 * t3_get_fw_version - read the firmware version
856 * @adapter: the adapter
857 * @vers: where to place the version
858 *
859 * Reads the FW version from flash.
860 */
861int t3_get_fw_version(struct adapter *adapter, u32 *vers)
862{
863 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
864}
865
866/**
867 * t3_check_fw_version - check if the FW is compatible with this driver
868 * @adapter: the adapter
869 *
870 * Checks if an adapter's FW is compatible with the driver. Returns 0
871 * if the versions are compatible, a negative error otherwise.
872 */
873int t3_check_fw_version(struct adapter *adapter)
874{
875 int ret;
876 u32 vers;
877 unsigned int type, major, minor;
878
879 ret = t3_get_fw_version(adapter, &vers);
880 if (ret)
881 return ret;
882
883 type = G_FW_VERSION_TYPE(vers);
884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers);
886
887 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
888 return 0;
889
890 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor);
892 return -EINVAL;
893}
894
895/**
896 * t3_flash_erase_sectors - erase a range of flash sectors
897 * @adapter: the adapter
898 * @start: the first sector to erase
899 * @end: the last sector to erase
900 *
901 * Erases the sectors in the given range.
902 */
903static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
904{
905 while (start <= end) {
906 int ret;
907
908 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
909 (ret = sf1_write(adapter, 4, 0,
910 SF_ERASE_SECTOR | (start << 8))) != 0 ||
911 (ret = flash_wait_op(adapter, 5, 500)) != 0)
912 return ret;
913 start++;
914 }
915 return 0;
916}
917
918/*
919 * t3_load_fw - download firmware
920 * @adapter: the adapter
921 * @fw_data: the firrware image to write
922 * @size: image size
923 *
924 * Write the supplied firmware image to the card's serial flash.
925 * The FW image has the following sections: @size - 8 bytes of code and
926 * data, followed by 4 bytes of FW version, followed by the 32-bit
927 * 1's complement checksum of the whole image.
928 */
929int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
930{
931 u32 csum;
932 unsigned int i;
933 const u32 *p = (const u32 *)fw_data;
934 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
935
936 if (size & 3)
937 return -EINVAL;
938 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
939 return -EFBIG;
940
941 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
942 csum += ntohl(p[i]);
943 if (csum != 0xffffffff) {
944 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
945 csum);
946 return -EINVAL;
947 }
948
949 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
950 if (ret)
951 goto out;
952
953 size -= 8; /* trim off version and checksum */
954 for (addr = FW_FLASH_BOOT_ADDR; size;) {
955 unsigned int chunk_size = min(size, 256U);
956
957 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
958 if (ret)
959 goto out;
960
961 addr += chunk_size;
962 fw_data += chunk_size;
963 size -= chunk_size;
964 }
965
966 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
967out:
968 if (ret)
969 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
970 return ret;
971}
972
973#define CIM_CTL_BASE 0x2000
974
975/**
976 * t3_cim_ctl_blk_read - read a block from CIM control region
977 *
978 * @adap: the adapter
979 * @addr: the start address within the CIM control region
980 * @n: number of words to read
981 * @valp: where to store the result
982 *
983 * Reads a block of 4-byte words from the CIM control region.
984 */
985int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
986 unsigned int n, unsigned int *valp)
987{
988 int ret = 0;
989
990 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
991 return -EBUSY;
992
993 for ( ; !ret && n--; addr += 4) {
994 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
995 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
996 0, 5, 2);
997 if (!ret)
998 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
999 }
1000 return ret;
1001}
1002
1003
1004/**
1005 * t3_link_changed - handle interface link changes
1006 * @adapter: the adapter
1007 * @port_id: the port index that changed link state
1008 *
1009 * Called when a port's link settings change to propagate the new values
1010 * to the associated PHY and MAC. After performing the common tasks it
1011 * invokes an OS-specific handler.
1012 */
1013void t3_link_changed(struct adapter *adapter, int port_id)
1014{
1015 int link_ok, speed, duplex, fc;
1016 struct port_info *pi = adap2pinfo(adapter, port_id);
1017 struct cphy *phy = &pi->phy;
1018 struct cmac *mac = &pi->mac;
1019 struct link_config *lc = &pi->link_config;
1020
1021 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1022
1023 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1024 uses_xaui(adapter)) {
1025 if (link_ok)
1026 t3b_pcs_reset(mac);
1027 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1028 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1029 }
1030 lc->link_ok = link_ok;
1031 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1032 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1033 if (lc->requested_fc & PAUSE_AUTONEG)
1034 fc &= lc->requested_fc;
1035 else
1036 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1037
1038 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1039 /* Set MAC speed, duplex, and flow control to match PHY. */
1040 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1041 lc->fc = fc;
1042 }
1043
1044 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1045}
1046
1047/**
1048 * t3_link_start - apply link configuration to MAC/PHY
1049 * @phy: the PHY to setup
1050 * @mac: the MAC to setup
1051 * @lc: the requested link configuration
1052 *
1053 * Set up a port's MAC and PHY according to a desired link configuration.
1054 * - If the PHY can auto-negotiate first decide what to advertise, then
1055 * enable/disable auto-negotiation as desired, and reset.
1056 * - If the PHY does not auto-negotiate just reset it.
1057 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1058 * otherwise do it later based on the outcome of auto-negotiation.
1059 */
1060int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1061{
1062 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1063
1064 lc->link_ok = 0;
1065 if (lc->supported & SUPPORTED_Autoneg) {
1066 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1067 if (fc) {
1068 lc->advertising |= ADVERTISED_Asym_Pause;
1069 if (fc & PAUSE_RX)
1070 lc->advertising |= ADVERTISED_Pause;
1071 }
1072 phy->ops->advertise(phy, lc->advertising);
1073
1074 if (lc->autoneg == AUTONEG_DISABLE) {
1075 lc->speed = lc->requested_speed;
1076 lc->duplex = lc->requested_duplex;
1077 lc->fc = (unsigned char)fc;
1078 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1079 fc);
1080 /* Also disables autoneg */
1081 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1082 phy->ops->reset(phy, 0);
1083 } else
1084 phy->ops->autoneg_enable(phy);
1085 } else {
1086 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1087 lc->fc = (unsigned char)fc;
1088 phy->ops->reset(phy, 0);
1089 }
1090 return 0;
1091}
1092
1093/**
1094 * t3_set_vlan_accel - control HW VLAN extraction
1095 * @adapter: the adapter
1096 * @ports: bitmap of adapter ports to operate on
1097 * @on: enable (1) or disable (0) HW VLAN extraction
1098 *
1099 * Enables or disables HW extraction of VLAN tags for the given port.
1100 */
1101void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1102{
1103 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1104 ports << S_VLANEXTRACTIONENABLE,
1105 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1106}
1107
1108struct intr_info {
1109 unsigned int mask; /* bits to check in interrupt status */
1110 const char *msg; /* message to print or NULL */
1111 short stat_idx; /* stat counter to increment or -1 */
1112 unsigned short fatal:1; /* whether the condition reported is fatal */
1113};
1114
1115/**
1116 * t3_handle_intr_status - table driven interrupt handler
1117 * @adapter: the adapter that generated the interrupt
1118 * @reg: the interrupt status register to process
1119 * @mask: a mask to apply to the interrupt status
1120 * @acts: table of interrupt actions
1121 * @stats: statistics counters tracking interrupt occurences
1122 *
1123 * A table driven interrupt handler that applies a set of masks to an
1124 * interrupt status word and performs the corresponding actions if the
1125 * interrupts described by the mask have occured. The actions include
1126 * optionally printing a warning or alert message, and optionally
1127 * incrementing a stat counter. The table is terminated by an entry
1128 * specifying mask 0. Returns the number of fatal interrupt conditions.
1129 */
1130static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1131 unsigned int mask,
1132 const struct intr_info *acts,
1133 unsigned long *stats)
1134{
1135 int fatal = 0;
1136 unsigned int status = t3_read_reg(adapter, reg) & mask;
1137
1138 for (; acts->mask; ++acts) {
1139 if (!(status & acts->mask))
1140 continue;
1141 if (acts->fatal) {
1142 fatal++;
1143 CH_ALERT(adapter, "%s (0x%x)\n",
1144 acts->msg, status & acts->mask);
1145 } else if (acts->msg)
1146 CH_WARN(adapter, "%s (0x%x)\n",
1147 acts->msg, status & acts->mask);
1148 if (acts->stat_idx >= 0)
1149 stats[acts->stat_idx]++;
1150 }
1151 if (status) /* clear processed interrupts */
1152 t3_write_reg(adapter, reg, status);
1153 return fatal;
1154}
1155
1156#define SGE_INTR_MASK (F_RSPQDISABLED)
1157#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1158 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1159 F_NFASRCHFAIL)
1160#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1161#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1162 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1163 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1164#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1165 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1166 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1167 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1168 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1169 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1170#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1171 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1172 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1173 V_BISTERR(M_BISTERR) | F_PEXERR)
1174#define ULPRX_INTR_MASK F_PARERR
1175#define ULPTX_INTR_MASK 0
1176#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1177 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1178 F_ZERO_SWITCH_ERROR)
1179#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1180 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1181 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1182 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1183#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1184 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1185 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1186#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1187 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1188 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1189#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1190 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1191 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1192 V_MCAPARERRENB(M_MCAPARERRENB))
1193#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1194 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1195 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1196 F_MPS0 | F_CPL_SWITCH)
1197
1198/*
1199 * Interrupt handler for the PCIX1 module.
1200 */
1201static void pci_intr_handler(struct adapter *adapter)
1202{
1203 static const struct intr_info pcix1_intr_info[] = {
1204 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1205 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1206 {F_RCVTARABT, "PCI received target abort", -1, 1},
1207 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1208 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1209 {F_DETPARERR, "PCI detected parity error", -1, 1},
1210 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1211 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1212 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1213 1},
1214 {F_DETCORECCERR, "PCI correctable ECC error",
1215 STAT_PCI_CORR_ECC, 0},
1216 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1217 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1218 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1219 1},
1220 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1221 1},
1222 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1223 1},
1224 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1225 "error", -1, 1},
1226 {0}
1227 };
1228
1229 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1230 pcix1_intr_info, adapter->irq_stats))
1231 t3_fatal_err(adapter);
1232}
1233
1234/*
1235 * Interrupt handler for the PCIE module.
1236 */
1237static void pcie_intr_handler(struct adapter *adapter)
1238{
1239 static const struct intr_info pcie_intr_info[] = {
1240 {F_PEXERR, "PCI PEX error", -1, 1},
1241 {F_UNXSPLCPLERRR,
1242 "PCI unexpected split completion DMA read error", -1, 1},
1243 {F_UNXSPLCPLERRC,
1244 "PCI unexpected split completion DMA command error", -1, 1},
1245 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1246 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1247 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1248 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1249 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1250 "PCI MSI-X table/PBA parity error", -1, 1},
1251 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1252 {0}
1253 };
1254
1255 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1256 pcie_intr_info, adapter->irq_stats))
1257 t3_fatal_err(adapter);
1258}
1259
1260/*
1261 * TP interrupt handler.
1262 */
1263static void tp_intr_handler(struct adapter *adapter)
1264{
1265 static const struct intr_info tp_intr_info[] = {
1266 {0xffffff, "TP parity error", -1, 1},
1267 {0x1000000, "TP out of Rx pages", -1, 1},
1268 {0x2000000, "TP out of Tx pages", -1, 1},
1269 {0}
1270 };
1271
1272 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1273 tp_intr_info, NULL))
1274 t3_fatal_err(adapter);
1275}
1276
1277/*
1278 * CIM interrupt handler.
1279 */
1280static void cim_intr_handler(struct adapter *adapter)
1281{
1282 static const struct intr_info cim_intr_info[] = {
1283 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1284 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1285 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1286 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1287 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1288 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1289 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1290 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1291 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1292 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1293 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1294 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1295 {0}
1296 };
1297
1298 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1299 cim_intr_info, NULL))
1300 t3_fatal_err(adapter);
1301}
1302
1303/*
1304 * ULP RX interrupt handler.
1305 */
1306static void ulprx_intr_handler(struct adapter *adapter)
1307{
1308 static const struct intr_info ulprx_intr_info[] = {
1309 {F_PARERR, "ULP RX parity error", -1, 1},
1310 {0}
1311 };
1312
1313 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1314 ulprx_intr_info, NULL))
1315 t3_fatal_err(adapter);
1316}
1317
1318/*
1319 * ULP TX interrupt handler.
1320 */
1321static void ulptx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info ulptx_intr_info[] = {
1324 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1325 STAT_ULP_CH0_PBL_OOB, 0},
1326 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1327 STAT_ULP_CH1_PBL_OOB, 0},
1328 {0}
1329 };
1330
1331 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1332 ulptx_intr_info, adapter->irq_stats))
1333 t3_fatal_err(adapter);
1334}
1335
1336#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1337 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1338 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1339 F_ICSPI1_TX_FRAMING_ERROR)
1340#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1341 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1342 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1343 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1344
1345/*
1346 * PM TX interrupt handler.
1347 */
1348static void pmtx_intr_handler(struct adapter *adapter)
1349{
1350 static const struct intr_info pmtx_intr_info[] = {
1351 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1352 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1353 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1354 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1355 "PMTX ispi parity error", -1, 1},
1356 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1357 "PMTX ospi parity error", -1, 1},
1358 {0}
1359 };
1360
1361 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1362 pmtx_intr_info, NULL))
1363 t3_fatal_err(adapter);
1364}
1365
1366#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1367 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1368 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1369 F_IESPI1_TX_FRAMING_ERROR)
1370#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1371 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1372 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1373 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1374
1375/*
1376 * PM RX interrupt handler.
1377 */
1378static void pmrx_intr_handler(struct adapter *adapter)
1379{
1380 static const struct intr_info pmrx_intr_info[] = {
1381 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1382 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1383 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1384 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1385 "PMRX ispi parity error", -1, 1},
1386 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1387 "PMRX ospi parity error", -1, 1},
1388 {0}
1389 };
1390
1391 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1392 pmrx_intr_info, NULL))
1393 t3_fatal_err(adapter);
1394}
1395
1396/*
1397 * CPL switch interrupt handler.
1398 */
1399static void cplsw_intr_handler(struct adapter *adapter)
1400{
1401 static const struct intr_info cplsw_intr_info[] = {
1402/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1403 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1404 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1405 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1406 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1407 {0}
1408 };
1409
1410 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1411 cplsw_intr_info, NULL))
1412 t3_fatal_err(adapter);
1413}
1414
1415/*
1416 * MPS interrupt handler.
1417 */
1418static void mps_intr_handler(struct adapter *adapter)
1419{
1420 static const struct intr_info mps_intr_info[] = {
1421 {0x1ff, "MPS parity error", -1, 1},
1422 {0}
1423 };
1424
1425 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1426 mps_intr_info, NULL))
1427 t3_fatal_err(adapter);
1428}
1429
1430#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1431
1432/*
1433 * MC7 interrupt handler.
1434 */
1435static void mc7_intr_handler(struct mc7 *mc7)
1436{
1437 struct adapter *adapter = mc7->adapter;
1438 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1439
1440 if (cause & F_CE) {
1441 mc7->stats.corr_err++;
1442 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1443 "data 0x%x 0x%x 0x%x\n", mc7->name,
1444 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1445 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1448 }
1449
1450 if (cause & F_UE) {
1451 mc7->stats.uncorr_err++;
1452 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1453 "data 0x%x 0x%x 0x%x\n", mc7->name,
1454 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1455 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1458 }
1459
1460 if (G_PE(cause)) {
1461 mc7->stats.parity_err++;
1462 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1463 mc7->name, G_PE(cause));
1464 }
1465
1466 if (cause & F_AE) {
1467 u32 addr = 0;
1468
1469 if (adapter->params.rev > 0)
1470 addr = t3_read_reg(adapter,
1471 mc7->offset + A_MC7_ERR_ADDR);
1472 mc7->stats.addr_err++;
1473 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1474 mc7->name, addr);
1475 }
1476
1477 if (cause & MC7_INTR_FATAL)
1478 t3_fatal_err(adapter);
1479
1480 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1481}
1482
1483#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1484 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1485/*
1486 * XGMAC interrupt handler.
1487 */
1488static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1489{
1490 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1491 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1492
1493 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1494 mac->stats.tx_fifo_parity_err++;
1495 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1496 }
1497 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1498 mac->stats.rx_fifo_parity_err++;
1499 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1500 }
1501 if (cause & F_TXFIFO_UNDERRUN)
1502 mac->stats.tx_fifo_urun++;
1503 if (cause & F_RXFIFO_OVERFLOW)
1504 mac->stats.rx_fifo_ovfl++;
1505 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1506 mac->stats.serdes_signal_loss++;
1507 if (cause & F_XAUIPCSCTCERR)
1508 mac->stats.xaui_pcs_ctc_err++;
1509 if (cause & F_XAUIPCSALIGNCHANGE)
1510 mac->stats.xaui_pcs_align_change++;
1511
1512 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1513 if (cause & XGM_INTR_FATAL)
1514 t3_fatal_err(adap);
1515 return cause != 0;
1516}
1517
1518/*
1519 * Interrupt handler for PHY events.
1520 */
1521int t3_phy_intr_handler(struct adapter *adapter)
1522{
1523 static const int intr_gpio_bits[] = { 8, 0x20 };
1524
1525 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1526
1527 for_each_port(adapter, i) {
1528 if (cause & intr_gpio_bits[i]) {
1529 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1530 int phy_cause = phy->ops->intr_handler(phy);
1531
1532 if (phy_cause & cphy_cause_link_change)
1533 t3_link_changed(adapter, i);
1534 if (phy_cause & cphy_cause_fifo_error)
1535 phy->fifo_errors++;
1536 }
1537 }
1538
1539 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1540 return 0;
1541}
1542
1543/*
1544 * T3 slow path (non-data) interrupt handler.
1545 */
1546int t3_slow_intr_handler(struct adapter *adapter)
1547{
1548 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1549
1550 cause &= adapter->slow_intr_mask;
1551 if (!cause)
1552 return 0;
1553 if (cause & F_PCIM0) {
1554 if (is_pcie(adapter))
1555 pcie_intr_handler(adapter);
1556 else
1557 pci_intr_handler(adapter);
1558 }
1559 if (cause & F_SGE3)
1560 t3_sge_err_intr_handler(adapter);
1561 if (cause & F_MC7_PMRX)
1562 mc7_intr_handler(&adapter->pmrx);
1563 if (cause & F_MC7_PMTX)
1564 mc7_intr_handler(&adapter->pmtx);
1565 if (cause & F_MC7_CM)
1566 mc7_intr_handler(&adapter->cm);
1567 if (cause & F_CIM)
1568 cim_intr_handler(adapter);
1569 if (cause & F_TP1)
1570 tp_intr_handler(adapter);
1571 if (cause & F_ULP2_RX)
1572 ulprx_intr_handler(adapter);
1573 if (cause & F_ULP2_TX)
1574 ulptx_intr_handler(adapter);
1575 if (cause & F_PM1_RX)
1576 pmrx_intr_handler(adapter);
1577 if (cause & F_PM1_TX)
1578 pmtx_intr_handler(adapter);
1579 if (cause & F_CPL_SWITCH)
1580 cplsw_intr_handler(adapter);
1581 if (cause & F_MPS0)
1582 mps_intr_handler(adapter);
1583 if (cause & F_MC5A)
1584 t3_mc5_intr_handler(&adapter->mc5);
1585 if (cause & F_XGMAC0_0)
1586 mac_intr_handler(adapter, 0);
1587 if (cause & F_XGMAC0_1)
1588 mac_intr_handler(adapter, 1);
1589 if (cause & F_T3DBG)
1590 t3_os_ext_intr_handler(adapter);
1591
1592 /* Clear the interrupts just processed. */
1593 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1594 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1595 return 1;
1596}
1597
1598/**
1599 * t3_intr_enable - enable interrupts
1600 * @adapter: the adapter whose interrupts should be enabled
1601 *
1602 * Enable interrupts by setting the interrupt enable registers of the
1603 * various HW modules and then enabling the top-level interrupt
1604 * concentrator.
1605 */
1606void t3_intr_enable(struct adapter *adapter)
1607{
1608 static const struct addr_val_pair intr_en_avp[] = {
1609 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1610 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1611 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1612 MC7_INTR_MASK},
1613 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1614 MC7_INTR_MASK},
1615 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1616 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1617 {A_TP_INT_ENABLE, 0x3bfffff},
1618 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1619 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1620 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1621 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1622 };
1623
1624 adapter->slow_intr_mask = PL_INTR_MASK;
1625
1626 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1627
1628 if (adapter->params.rev > 0) {
1629 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1630 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1631 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1632 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1633 F_PBL_BOUND_ERR_CH1);
1634 } else {
1635 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1636 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1637 }
1638
1639 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1640 adapter_info(adapter)->gpio_intr);
1641 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1642 adapter_info(adapter)->gpio_intr);
1643 if (is_pcie(adapter))
1644 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1645 else
1646 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1647 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1648 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1649}
1650
1651/**
1652 * t3_intr_disable - disable a card's interrupts
1653 * @adapter: the adapter whose interrupts should be disabled
1654 *
1655 * Disable interrupts. We only disable the top-level interrupt
1656 * concentrator and the SGE data interrupts.
1657 */
1658void t3_intr_disable(struct adapter *adapter)
1659{
1660 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1661 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1662 adapter->slow_intr_mask = 0;
1663}
1664
1665/**
1666 * t3_intr_clear - clear all interrupts
1667 * @adapter: the adapter whose interrupts should be cleared
1668 *
1669 * Clears all interrupts.
1670 */
1671void t3_intr_clear(struct adapter *adapter)
1672{
1673 static const unsigned int cause_reg_addr[] = {
1674 A_SG_INT_CAUSE,
1675 A_SG_RSPQ_FL_STATUS,
1676 A_PCIX_INT_CAUSE,
1677 A_MC7_INT_CAUSE,
1678 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1679 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1680 A_CIM_HOST_INT_CAUSE,
1681 A_TP_INT_CAUSE,
1682 A_MC5_DB_INT_CAUSE,
1683 A_ULPRX_INT_CAUSE,
1684 A_ULPTX_INT_CAUSE,
1685 A_CPL_INTR_CAUSE,
1686 A_PM1_TX_INT_CAUSE,
1687 A_PM1_RX_INT_CAUSE,
1688 A_MPS_INT_CAUSE,
1689 A_T3DBG_INT_CAUSE,
1690 };
1691 unsigned int i;
1692
1693 /* Clear PHY and MAC interrupts for each port. */
1694 for_each_port(adapter, i)
1695 t3_port_intr_clear(adapter, i);
1696
1697 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1698 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1699
1700 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1701 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1702}
1703
1704/**
1705 * t3_port_intr_enable - enable port-specific interrupts
1706 * @adapter: associated adapter
1707 * @idx: index of port whose interrupts should be enabled
1708 *
1709 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1710 * adapter port.
1711 */
1712void t3_port_intr_enable(struct adapter *adapter, int idx)
1713{
1714 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1715
1716 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1717 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1718 phy->ops->intr_enable(phy);
1719}
1720
1721/**
1722 * t3_port_intr_disable - disable port-specific interrupts
1723 * @adapter: associated adapter
1724 * @idx: index of port whose interrupts should be disabled
1725 *
1726 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1727 * adapter port.
1728 */
1729void t3_port_intr_disable(struct adapter *adapter, int idx)
1730{
1731 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1732
1733 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1734 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1735 phy->ops->intr_disable(phy);
1736}
1737
1738/**
1739 * t3_port_intr_clear - clear port-specific interrupts
1740 * @adapter: associated adapter
1741 * @idx: index of port whose interrupts to clear
1742 *
1743 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1744 * adapter port.
1745 */
1746void t3_port_intr_clear(struct adapter *adapter, int idx)
1747{
1748 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1749
1750 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1751 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1752 phy->ops->intr_clear(phy);
1753}
1754
1755/**
1756 * t3_sge_write_context - write an SGE context
1757 * @adapter: the adapter
1758 * @id: the context id
1759 * @type: the context type
1760 *
1761 * Program an SGE context with the values already loaded in the
1762 * CONTEXT_DATA? registers.
1763 */
1764static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1765 unsigned int type)
1766{
1767 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1768 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1772 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1773 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1774 0, 5, 1);
1775}
1776
1777/**
1778 * t3_sge_init_ecntxt - initialize an SGE egress context
1779 * @adapter: the adapter to configure
1780 * @id: the context id
1781 * @gts_enable: whether to enable GTS for the context
1782 * @type: the egress context type
1783 * @respq: associated response queue
1784 * @base_addr: base address of queue
1785 * @size: number of queue entries
1786 * @token: uP token
1787 * @gen: initial generation value for the context
1788 * @cidx: consumer pointer
1789 *
1790 * Initialize an SGE egress context and make it ready for use. If the
1791 * platform allows concurrent context operations, the caller is
1792 * responsible for appropriate locking.
1793 */
1794int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1795 enum sge_context_type type, int respq, u64 base_addr,
1796 unsigned int size, unsigned int token, int gen,
1797 unsigned int cidx)
1798{
1799 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1800
1801 if (base_addr & 0xfff) /* must be 4K aligned */
1802 return -EINVAL;
1803 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1804 return -EBUSY;
1805
1806 base_addr >>= 12;
1807 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1808 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1810 V_EC_BASE_LO(base_addr & 0xffff));
1811 base_addr >>= 16;
1812 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1813 base_addr >>= 32;
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1815 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1816 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1817 F_EC_VALID);
1818 return t3_sge_write_context(adapter, id, F_EGRESS);
1819}
1820
1821/**
1822 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1823 * @adapter: the adapter to configure
1824 * @id: the context id
1825 * @gts_enable: whether to enable GTS for the context
1826 * @base_addr: base address of queue
1827 * @size: number of queue entries
1828 * @bsize: size of each buffer for this queue
1829 * @cong_thres: threshold to signal congestion to upstream producers
1830 * @gen: initial generation value for the context
1831 * @cidx: consumer pointer
1832 *
1833 * Initialize an SGE free list context and make it ready for use. The
1834 * caller is responsible for ensuring only one context operation occurs
1835 * at a time.
1836 */
1837int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1838 int gts_enable, u64 base_addr, unsigned int size,
1839 unsigned int bsize, unsigned int cong_thres, int gen,
1840 unsigned int cidx)
1841{
1842 if (base_addr & 0xfff) /* must be 4K aligned */
1843 return -EINVAL;
1844 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1845 return -EBUSY;
1846
1847 base_addr >>= 12;
1848 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1849 base_addr >>= 32;
1850 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1851 V_FL_BASE_HI((u32) base_addr) |
1852 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1853 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1854 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1855 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1856 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1857 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1858 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1859 return t3_sge_write_context(adapter, id, F_FREELIST);
1860}
1861
1862/**
1863 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1864 * @adapter: the adapter to configure
1865 * @id: the context id
1866 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1867 * @base_addr: base address of queue
1868 * @size: number of queue entries
1869 * @fl_thres: threshold for selecting the normal or jumbo free list
1870 * @gen: initial generation value for the context
1871 * @cidx: consumer pointer
1872 *
1873 * Initialize an SGE response queue context and make it ready for use.
1874 * The caller is responsible for ensuring only one context operation
1875 * occurs at a time.
1876 */
1877int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1878 int irq_vec_idx, u64 base_addr, unsigned int size,
1879 unsigned int fl_thres, int gen, unsigned int cidx)
1880{
1881 unsigned int intr = 0;
1882
1883 if (base_addr & 0xfff) /* must be 4K aligned */
1884 return -EINVAL;
1885 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1886 return -EBUSY;
1887
1888 base_addr >>= 12;
1889 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1890 V_CQ_INDEX(cidx));
1891 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1892 base_addr >>= 32;
1893 if (irq_vec_idx >= 0)
1894 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1895 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1896 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1897 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1898 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1899}
1900
1901/**
1902 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1903 * @adapter: the adapter to configure
1904 * @id: the context id
1905 * @base_addr: base address of queue
1906 * @size: number of queue entries
1907 * @rspq: response queue for async notifications
1908 * @ovfl_mode: CQ overflow mode
1909 * @credits: completion queue credits
1910 * @credit_thres: the credit threshold
1911 *
1912 * Initialize an SGE completion queue context and make it ready for use.
1913 * The caller is responsible for ensuring only one context operation
1914 * occurs at a time.
1915 */
1916int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1917 unsigned int size, int rspq, int ovfl_mode,
1918 unsigned int credits, unsigned int credit_thres)
1919{
1920 if (base_addr & 0xfff) /* must be 4K aligned */
1921 return -EINVAL;
1922 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1923 return -EBUSY;
1924
1925 base_addr >>= 12;
1926 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1928 base_addr >>= 32;
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1930 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1931 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1932 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1933 V_CQ_CREDIT_THRES(credit_thres));
1934 return t3_sge_write_context(adapter, id, F_CQ);
1935}
1936
1937/**
1938 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1939 * @adapter: the adapter
1940 * @id: the egress context id
1941 * @enable: enable (1) or disable (0) the context
1942 *
1943 * Enable or disable an SGE egress context. The caller is responsible for
1944 * ensuring only one context operation occurs at a time.
1945 */
1946int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1947{
1948 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1949 return -EBUSY;
1950
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1955 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1956 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1957 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1958 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1959 0, 5, 1);
1960}
1961
1962/**
1963 * t3_sge_disable_fl - disable an SGE free-buffer list
1964 * @adapter: the adapter
1965 * @id: the free list context id
1966 *
1967 * Disable an SGE free-buffer list. The caller is responsible for
1968 * ensuring only one context operation occurs at a time.
1969 */
1970int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1971{
1972 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1973 return -EBUSY;
1974
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1979 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1980 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1981 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1982 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1983 0, 5, 1);
1984}
1985
1986/**
1987 * t3_sge_disable_rspcntxt - disable an SGE response queue
1988 * @adapter: the adapter
1989 * @id: the response queue context id
1990 *
1991 * Disable an SGE response queue. The caller is responsible for
1992 * ensuring only one context operation occurs at a time.
1993 */
1994int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1995{
1996 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1997 return -EBUSY;
1998
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2005 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2006 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2007 0, 5, 1);
2008}
2009
2010/**
2011 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2012 * @adapter: the adapter
2013 * @id: the completion queue context id
2014 *
2015 * Disable an SGE completion queue. The caller is responsible for
2016 * ensuring only one context operation occurs at a time.
2017 */
2018int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2019{
2020 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2021 return -EBUSY;
2022
2023 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2024 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2029 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2030 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2031 0, 5, 1);
2032}
2033
2034/**
2035 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2036 * @adapter: the adapter
2037 * @id: the context id
2038 * @op: the operation to perform
2039 *
2040 * Perform the selected operation on an SGE completion queue context.
2041 * The caller is responsible for ensuring only one context operation
2042 * occurs at a time.
2043 */
2044int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2045 unsigned int credits)
2046{
2047 u32 val;
2048
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2050 return -EBUSY;
2051
2052 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2053 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2054 V_CONTEXT(id) | F_CQ);
2055 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2056 0, 5, 1, &val))
2057 return -EIO;
2058
2059 if (op >= 2 && op < 7) {
2060 if (adapter->params.rev > 0)
2061 return G_CQ_INDEX(val);
2062
2063 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2064 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2065 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2066 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2067 return -EIO;
2068 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2069 }
2070 return 0;
2071}
2072
2073/**
2074 * t3_sge_read_context - read an SGE context
2075 * @type: the context type
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2084 unsigned int id, u32 data[4])
2085{
2086 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 return -EBUSY;
2088
2089 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2090 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2091 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2092 5, 1))
2093 return -EIO;
2094 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2095 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2096 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2097 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2098 return 0;
2099}
2100
2101/**
2102 * t3_sge_read_ecntxt - read an SGE egress context
2103 * @adapter: the adapter
2104 * @id: the context id
2105 * @data: holds the retrieved context
2106 *
2107 * Read an SGE egress context. The caller is responsible for ensuring
2108 * only one context operation occurs at a time.
2109 */
2110int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2111{
2112 if (id >= 65536)
2113 return -EINVAL;
2114 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2115}
2116
2117/**
2118 * t3_sge_read_cq - read an SGE CQ context
2119 * @adapter: the adapter
2120 * @id: the context id
2121 * @data: holds the retrieved context
2122 *
2123 * Read an SGE CQ context. The caller is responsible for ensuring
2124 * only one context operation occurs at a time.
2125 */
2126int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2127{
2128 if (id >= 65536)
2129 return -EINVAL;
2130 return t3_sge_read_context(F_CQ, adapter, id, data);
2131}
2132
2133/**
2134 * t3_sge_read_fl - read an SGE free-list context
2135 * @adapter: the adapter
2136 * @id: the context id
2137 * @data: holds the retrieved context
2138 *
2139 * Read an SGE free-list context. The caller is responsible for ensuring
2140 * only one context operation occurs at a time.
2141 */
2142int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2143{
2144 if (id >= SGE_QSETS * 2)
2145 return -EINVAL;
2146 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2147}
2148
2149/**
2150 * t3_sge_read_rspq - read an SGE response queue context
2151 * @adapter: the adapter
2152 * @id: the context id
2153 * @data: holds the retrieved context
2154 *
2155 * Read an SGE response queue context. The caller is responsible for
2156 * ensuring only one context operation occurs at a time.
2157 */
2158int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2159{
2160 if (id >= SGE_QSETS)
2161 return -EINVAL;
2162 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2163}
2164
2165/**
2166 * t3_config_rss - configure Rx packet steering
2167 * @adapter: the adapter
2168 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2169 * @cpus: values for the CPU lookup table (0xff terminated)
2170 * @rspq: values for the response queue lookup table (0xffff terminated)
2171 *
2172 * Programs the receive packet steering logic. @cpus and @rspq provide
2173 * the values for the CPU and response queue lookup tables. If they
2174 * provide fewer values than the size of the tables the supplied values
2175 * are used repeatedly until the tables are fully populated.
2176 */
2177void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2178 const u8 * cpus, const u16 *rspq)
2179{
2180 int i, j, cpu_idx = 0, q_idx = 0;
2181
2182 if (cpus)
2183 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2184 u32 val = i << 16;
2185
2186 for (j = 0; j < 2; ++j) {
2187 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2188 if (cpus[cpu_idx] == 0xff)
2189 cpu_idx = 0;
2190 }
2191 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2192 }
2193
2194 if (rspq)
2195 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2196 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2197 (i << 16) | rspq[q_idx++]);
2198 if (rspq[q_idx] == 0xffff)
2199 q_idx = 0;
2200 }
2201
2202 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2203}
2204
2205/**
2206 * t3_read_rss - read the contents of the RSS tables
2207 * @adapter: the adapter
2208 * @lkup: holds the contents of the RSS lookup table
2209 * @map: holds the contents of the RSS map table
2210 *
2211 * Reads the contents of the receive packet steering tables.
2212 */
2213int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2214{
2215 int i;
2216 u32 val;
2217
2218 if (lkup)
2219 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2220 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2221 0xffff0000 | i);
2222 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2223 if (!(val & 0x80000000))
2224 return -EAGAIN;
2225 *lkup++ = val;
2226 *lkup++ = (val >> 8);
2227 }
2228
2229 if (map)
2230 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2231 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2232 0xffff0000 | i);
2233 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2234 if (!(val & 0x80000000))
2235 return -EAGAIN;
2236 *map++ = val;
2237 }
2238 return 0;
2239}
2240
2241/**
2242 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2243 * @adap: the adapter
2244 * @enable: 1 to select offload mode, 0 for regular NIC
2245 *
2246 * Switches TP to NIC/offload mode.
2247 */
2248void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2249{
2250 if (is_offload(adap) || !enable)
2251 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2252 V_NICMODE(!enable));
2253}
2254
2255/**
2256 * pm_num_pages - calculate the number of pages of the payload memory
2257 * @mem_size: the size of the payload memory
2258 * @pg_size: the size of each payload memory page
2259 *
2260 * Calculate the number of pages, each of the given size, that fit in a
2261 * memory of the specified size, respecting the HW requirement that the
2262 * number of pages must be a multiple of 24.
2263 */
2264static inline unsigned int pm_num_pages(unsigned int mem_size,
2265 unsigned int pg_size)
2266{
2267 unsigned int n = mem_size / pg_size;
2268
2269 return n - n % 24;
2270}
2271
2272#define mem_region(adap, start, size, reg) \
2273 t3_write_reg((adap), A_ ## reg, (start)); \
2274 start += size
2275
2276/*
2277 * partition_mem - partition memory and configure TP memory settings
2278 * @adap: the adapter
2279 * @p: the TP parameters
2280 *
2281 * Partitions context and payload memory and configures TP's memory
2282 * registers.
2283 */
2284static void partition_mem(struct adapter *adap, const struct tp_params *p)
2285{
2286 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2287 unsigned int timers = 0, timers_shift = 22;
2288
2289 if (adap->params.rev > 0) {
2290 if (tids <= 16 * 1024) {
2291 timers = 1;
2292 timers_shift = 16;
2293 } else if (tids <= 64 * 1024) {
2294 timers = 2;
2295 timers_shift = 18;
2296 } else if (tids <= 256 * 1024) {
2297 timers = 3;
2298 timers_shift = 20;
2299 }
2300 }
2301
2302 t3_write_reg(adap, A_TP_PMM_SIZE,
2303 p->chan_rx_size | (p->chan_tx_size >> 16));
2304
2305 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2306 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2307 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2308 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2309 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2310
2311 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2312 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2313 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2314
2315 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2316 /* Add a bit of headroom and make multiple of 24 */
2317 pstructs += 48;
2318 pstructs -= pstructs % 24;
2319 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2320
2321 m = tids * TCB_SIZE;
2322 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2323 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2324 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2325 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2326 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2327 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2328 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2329 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2330
2331 m = (m + 4095) & ~0xfff;
2332 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2333 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2334
2335 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2336 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2337 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2338 if (tids < m)
2339 adap->params.mc5.nservers += m - tids;
2340}
2341
2342static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2343 u32 val)
2344{
2345 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2346 t3_write_reg(adap, A_TP_PIO_DATA, val);
2347}
2348
2349static void tp_config(struct adapter *adap, const struct tp_params *p)
2350{
2351 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2352 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2353 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2354 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2355 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2356 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2357 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2358 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2359 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2360 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2361 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2362 F_IPV6ENABLE | F_NICMODE);
2363 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2364 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2365 t3_set_reg_field(adap, A_TP_PARA_REG6,
2366 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2367 0);
2368
2369 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2370 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2371 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2372 F_RXCONGESTIONMODE);
2373 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2374
2375 if (adap->params.rev > 0) {
2376 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2377 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 F_TXPACEAUTO);
2379 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2380 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 } else
2382 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383
2384 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2385 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2386 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2387}
2388
2389/* Desired TP timer resolution in usec */
2390#define TP_TMR_RES 50
2391
2392/* TCP timer values in ms */
2393#define TP_DACK_TIMER 50
2394#define TP_RTO_MIN 250
2395
2396/**
2397 * tp_set_timers - set TP timing parameters
2398 * @adap: the adapter to set
2399 * @core_clk: the core clock frequency in Hz
2400 *
2401 * Set TP's timing parameters, such as the various timer resolutions and
2402 * the TCP timer values.
2403 */
2404static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2405{
2406 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2407 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2408 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2409 unsigned int tps = core_clk >> tre;
2410
2411 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2412 V_DELAYEDACKRESOLUTION(dack_re) |
2413 V_TIMESTAMPRESOLUTION(tstamp_re));
2414 t3_write_reg(adap, A_TP_DACK_TIMER,
2415 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2416 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2417 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2419 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2420 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2421 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2422 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2423 V_KEEPALIVEMAX(9));
2424
2425#define SECONDS * tps
2426
2427 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2428 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2429 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2430 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2431 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2433 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2434 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2435 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2436
2437#undef SECONDS
2438}
2439
2440/**
2441 * t3_tp_set_coalescing_size - set receive coalescing size
2442 * @adap: the adapter
2443 * @size: the receive coalescing size
2444 * @psh: whether a set PSH bit should deliver coalesced data
2445 *
2446 * Set the receive coalescing size and PSH bit handling.
2447 */
2448int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2449{
2450 u32 val;
2451
2452 if (size > MAX_RX_COALESCING_LEN)
2453 return -EINVAL;
2454
2455 val = t3_read_reg(adap, A_TP_PARA_REG3);
2456 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2457
2458 if (size) {
2459 val |= F_RXCOALESCEENABLE;
2460 if (psh)
2461 val |= F_RXCOALESCEPSHEN;
2462 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2463 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2464 }
2465 t3_write_reg(adap, A_TP_PARA_REG3, val);
2466 return 0;
2467}
2468
2469/**
2470 * t3_tp_set_max_rxsize - set the max receive size
2471 * @adap: the adapter
2472 * @size: the max receive size
2473 *
2474 * Set TP's max receive size. This is the limit that applies when
2475 * receive coalescing is disabled.
2476 */
2477void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2478{
2479 t3_write_reg(adap, A_TP_PARA_REG7,
2480 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2481}
2482
2483static void __devinit init_mtus(unsigned short mtus[])
2484{
2485 /*
2486 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2487 * it can accomodate max size TCP/IP headers when SACK and timestamps
2488 * are enabled and still have at least 8 bytes of payload.
2489 */
2490 mtus[0] = 88;
2491 mtus[1] = 256;
2492 mtus[2] = 512;
2493 mtus[3] = 576;
2494 mtus[4] = 808;
2495 mtus[5] = 1024;
2496 mtus[6] = 1280;
2497 mtus[7] = 1492;
2498 mtus[8] = 1500;
2499 mtus[9] = 2002;
2500 mtus[10] = 2048;
2501 mtus[11] = 4096;
2502 mtus[12] = 4352;
2503 mtus[13] = 8192;
2504 mtus[14] = 9000;
2505 mtus[15] = 9600;
2506}
2507
2508/*
2509 * Initial congestion control parameters.
2510 */
2511static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2512{
2513 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2514 a[9] = 2;
2515 a[10] = 3;
2516 a[11] = 4;
2517 a[12] = 5;
2518 a[13] = 6;
2519 a[14] = 7;
2520 a[15] = 8;
2521 a[16] = 9;
2522 a[17] = 10;
2523 a[18] = 14;
2524 a[19] = 17;
2525 a[20] = 21;
2526 a[21] = 25;
2527 a[22] = 30;
2528 a[23] = 35;
2529 a[24] = 45;
2530 a[25] = 60;
2531 a[26] = 80;
2532 a[27] = 100;
2533 a[28] = 200;
2534 a[29] = 300;
2535 a[30] = 400;
2536 a[31] = 500;
2537
2538 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2539 b[9] = b[10] = 1;
2540 b[11] = b[12] = 2;
2541 b[13] = b[14] = b[15] = b[16] = 3;
2542 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2543 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2544 b[28] = b[29] = 6;
2545 b[30] = b[31] = 7;
2546}
2547
2548/* The minimum additive increment value for the congestion control table */
2549#define CC_MIN_INCR 2U
2550
2551/**
2552 * t3_load_mtus - write the MTU and congestion control HW tables
2553 * @adap: the adapter
2554 * @mtus: the unrestricted values for the MTU table
2555 * @alphs: the values for the congestion control alpha parameter
2556 * @beta: the values for the congestion control beta parameter
2557 * @mtu_cap: the maximum permitted effective MTU
2558 *
2559 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2560 * Update the high-speed congestion control table with the supplied alpha,
2561 * beta, and MTUs.
2562 */
2563void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2564 unsigned short alpha[NCCTRL_WIN],
2565 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2566{
2567 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2568 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2569 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2570 28672, 40960, 57344, 81920, 114688, 163840, 229376
2571 };
2572
2573 unsigned int i, w;
2574
2575 for (i = 0; i < NMTUS; ++i) {
2576 unsigned int mtu = min(mtus[i], mtu_cap);
2577 unsigned int log2 = fls(mtu);
2578
2579 if (!(mtu & ((1 << log2) >> 2))) /* round */
2580 log2--;
2581 t3_write_reg(adap, A_TP_MTU_TABLE,
2582 (i << 24) | (log2 << 16) | mtu);
2583
2584 for (w = 0; w < NCCTRL_WIN; ++w) {
2585 unsigned int inc;
2586
2587 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2588 CC_MIN_INCR);
2589
2590 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2591 (w << 16) | (beta[w] << 13) | inc);
2592 }
2593 }
2594}
2595
2596/**
2597 * t3_read_hw_mtus - returns the values in the HW MTU table
2598 * @adap: the adapter
2599 * @mtus: where to store the HW MTU values
2600 *
2601 * Reads the HW MTU table.
2602 */
2603void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2604{
2605 int i;
2606
2607 for (i = 0; i < NMTUS; ++i) {
2608 unsigned int val;
2609
2610 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2611 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2612 mtus[i] = val & 0x3fff;
2613 }
2614}
2615
2616/**
2617 * t3_get_cong_cntl_tab - reads the congestion control table
2618 * @adap: the adapter
2619 * @incr: where to store the alpha values
2620 *
2621 * Reads the additive increments programmed into the HW congestion
2622 * control table.
2623 */
2624void t3_get_cong_cntl_tab(struct adapter *adap,
2625 unsigned short incr[NMTUS][NCCTRL_WIN])
2626{
2627 unsigned int mtu, w;
2628
2629 for (mtu = 0; mtu < NMTUS; ++mtu)
2630 for (w = 0; w < NCCTRL_WIN; ++w) {
2631 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2632 0xffff0000 | (mtu << 5) | w);
2633 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2634 0x1fff;
2635 }
2636}
2637
2638/**
2639 * t3_tp_get_mib_stats - read TP's MIB counters
2640 * @adap: the adapter
2641 * @tps: holds the returned counter values
2642 *
2643 * Returns the values of TP's MIB counters.
2644 */
2645void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2646{
2647 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2648 sizeof(*tps) / sizeof(u32), 0);
2649}
2650
2651#define ulp_region(adap, name, start, len) \
2652 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2653 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2654 (start) + (len) - 1); \
2655 start += len
2656
2657#define ulptx_region(adap, name, start, len) \
2658 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2659 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2660 (start) + (len) - 1)
2661
2662static void ulp_config(struct adapter *adap, const struct tp_params *p)
2663{
2664 unsigned int m = p->chan_rx_size;
2665
2666 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2667 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2668 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2669 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2670 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2671 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2672 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2673 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2674}
2675
2676void t3_config_trace_filter(struct adapter *adapter,
2677 const struct trace_params *tp, int filter_index,
2678 int invert, int enable)
2679{
2680 u32 addr, key[4], mask[4];
2681
2682 key[0] = tp->sport | (tp->sip << 16);
2683 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2684 key[2] = tp->dip;
2685 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2686
2687 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2688 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2689 mask[2] = tp->dip_mask;
2690 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2691
2692 if (invert)
2693 key[3] |= (1 << 29);
2694 if (enable)
2695 key[3] |= (1 << 28);
2696
2697 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2698 tp_wr_indirect(adapter, addr++, key[0]);
2699 tp_wr_indirect(adapter, addr++, mask[0]);
2700 tp_wr_indirect(adapter, addr++, key[1]);
2701 tp_wr_indirect(adapter, addr++, mask[1]);
2702 tp_wr_indirect(adapter, addr++, key[2]);
2703 tp_wr_indirect(adapter, addr++, mask[2]);
2704 tp_wr_indirect(adapter, addr++, key[3]);
2705 tp_wr_indirect(adapter, addr, mask[3]);
2706 t3_read_reg(adapter, A_TP_PIO_DATA);
2707}
2708
2709/**
2710 * t3_config_sched - configure a HW traffic scheduler
2711 * @adap: the adapter
2712 * @kbps: target rate in Kbps
2713 * @sched: the scheduler index
2714 *
2715 * Configure a HW scheduler for the target rate
2716 */
2717int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2718{
2719 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2720 unsigned int clk = adap->params.vpd.cclk * 1000;
2721 unsigned int selected_cpt = 0, selected_bpt = 0;
2722
2723 if (kbps > 0) {
2724 kbps *= 125; /* -> bytes */
2725 for (cpt = 1; cpt <= 255; cpt++) {
2726 tps = clk / cpt;
2727 bpt = (kbps + tps / 2) / tps;
2728 if (bpt > 0 && bpt <= 255) {
2729 v = bpt * tps;
2730 delta = v >= kbps ? v - kbps : kbps - v;
2731 if (delta <= mindelta) {
2732 mindelta = delta;
2733 selected_cpt = cpt;
2734 selected_bpt = bpt;
2735 }
2736 } else if (selected_cpt)
2737 break;
2738 }
2739 if (!selected_cpt)
2740 return -EINVAL;
2741 }
2742 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2743 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2745 if (sched & 1)
2746 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2747 else
2748 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2750 return 0;
2751}
2752
2753static int tp_init(struct adapter *adap, const struct tp_params *p)
2754{
2755 int busy = 0;
2756
2757 tp_config(adap, p);
2758 t3_set_vlan_accel(adap, 3, 0);
2759
2760 if (is_offload(adap)) {
2761 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2762 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2763 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2764 0, 1000, 5);
2765 if (busy)
2766 CH_ERR(adap, "TP initialization timed out\n");
2767 }
2768
2769 if (!busy)
2770 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2771 return busy;
2772}
2773
2774int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2775{
2776 if (port_mask & ~((1 << adap->params.nports) - 1))
2777 return -EINVAL;
2778 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2779 port_mask << S_PORT0ACTIVE);
2780 return 0;
2781}
2782
2783/*
2784 * Perform the bits of HW initialization that are dependent on the number
2785 * of available ports.
2786 */
2787static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2788{
2789 int i;
2790
2791 if (nports == 1) {
2792 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2793 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2794 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2795 F_PORT0ACTIVE | F_ENFORCEPKT);
2796 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2797 } else {
2798 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2799 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2800 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2801 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2802 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2803 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2804 F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2808 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2809 for (i = 0; i < 16; i++)
2810 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2811 (i << 16) | 0x1010);
2812 }
2813}
2814
2815static int calibrate_xgm(struct adapter *adapter)
2816{
2817 if (uses_xaui(adapter)) {
2818 unsigned int v, i;
2819
2820 for (i = 0; i < 5; ++i) {
2821 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2822 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2823 msleep(1);
2824 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2826 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2827 V_XAUIIMP(G_CALIMP(v) >> 2));
2828 return 0;
2829 }
2830 }
2831 CH_ERR(adapter, "MAC calibration failed\n");
2832 return -1;
2833 } else {
2834 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2835 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2837 F_XGM_IMPSETUPDATE);
2838 }
2839 return 0;
2840}
2841
2842static void calibrate_xgm_t3b(struct adapter *adapter)
2843{
2844 if (!uses_xaui(adapter)) {
2845 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2846 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2847 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2848 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2849 F_XGM_IMPSETUPDATE);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2851 0);
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2853 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2854 }
2855}
2856
2857struct mc7_timing_params {
2858 unsigned char ActToPreDly;
2859 unsigned char ActToRdWrDly;
2860 unsigned char PreCyc;
2861 unsigned char RefCyc[5];
2862 unsigned char BkCyc;
2863 unsigned char WrToRdDly;
2864 unsigned char RdToWrDly;
2865};
2866
2867/*
2868 * Write a value to a register and check that the write completed. These
2869 * writes normally complete in a cycle or two, so one read should suffice.
2870 * The very first read exists to flush the posted write to the device.
2871 */
2872static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2873{
2874 t3_write_reg(adapter, addr, val);
2875 t3_read_reg(adapter, addr); /* flush */
2876 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2877 return 0;
2878 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2879 return -EIO;
2880}
2881
2882static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2883{
2884 static const unsigned int mc7_mode[] = {
2885 0x632, 0x642, 0x652, 0x432, 0x442
2886 };
2887 static const struct mc7_timing_params mc7_timings[] = {
2888 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2889 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2890 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2891 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2892 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2893 };
2894
2895 u32 val;
2896 unsigned int width, density, slow, attempts;
2897 struct adapter *adapter = mc7->adapter;
2898 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2899
2900 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2901 slow = val & F_SLOW;
2902 width = G_WIDTH(val);
2903 density = G_DEN(val);
2904
2905 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2906 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2907 msleep(1);
2908
2909 if (!slow) {
2910 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2911 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2912 msleep(1);
2913 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2914 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2915 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2916 mc7->name);
2917 goto out_fail;
2918 }
2919 }
2920
2921 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2922 V_ACTTOPREDLY(p->ActToPreDly) |
2923 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2924 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2925 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2926
2927 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2928 val | F_CLKEN | F_TERM150);
2929 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2930
2931 if (!slow)
2932 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2933 F_DLLENB);
2934 udelay(1);
2935
2936 val = slow ? 3 : 6;
2937 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2939 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2941 goto out_fail;
2942
2943 if (!slow) {
2944 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2945 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2946 udelay(5);
2947 }
2948
2949 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2950 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2953 mc7_mode[mem_type]) ||
2954 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2955 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2956 goto out_fail;
2957
2958 /* clock value is in KHz */
2959 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2960 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2961
2962 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2963 F_PERREFEN | V_PREREFDIV(mc7_clock));
2964 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2965
2966 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2967 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2968 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2970 (mc7->size << width) - 1);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2972 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2973
2974 attempts = 50;
2975 do {
2976 msleep(250);
2977 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2978 } while ((val & F_BUSY) && --attempts);
2979 if (val & F_BUSY) {
2980 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2981 goto out_fail;
2982 }
2983
2984 /* Enable normal memory accesses. */
2985 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2986 return 0;
2987
2988out_fail:
2989 return -1;
2990}
2991
2992static void config_pcie(struct adapter *adap)
2993{
2994 static const u16 ack_lat[4][6] = {
2995 {237, 416, 559, 1071, 2095, 4143},
2996 {128, 217, 289, 545, 1057, 2081},
2997 {73, 118, 154, 282, 538, 1050},
2998 {67, 107, 86, 150, 278, 534}
2999 };
3000 static const u16 rpl_tmr[4][6] = {
3001 {711, 1248, 1677, 3213, 6285, 12429},
3002 {384, 651, 867, 1635, 3171, 6243},
3003 {219, 354, 462, 846, 1614, 3150},
3004 {201, 321, 258, 450, 834, 1602}
3005 };
3006
3007 u16 val;
3008 unsigned int log2_width, pldsize;
3009 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3010
3011 pci_read_config_word(adap->pdev,
3012 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3013 &val);
3014 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3015 pci_read_config_word(adap->pdev,
3016 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3017 &val);
3018
3019 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3020 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3021 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3022 log2_width = fls(adap->params.pci.width) - 1;
3023 acklat = ack_lat[log2_width][pldsize];
3024 if (val & 1) /* check LOsEnable */
3025 acklat += fst_trn_tx * 4;
3026 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3027
3028 if (adap->params.rev == 0)
3029 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3030 V_T3A_ACKLAT(M_T3A_ACKLAT),
3031 V_T3A_ACKLAT(acklat));
3032 else
3033 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3034 V_ACKLAT(acklat));
3035
3036 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3037 V_REPLAYLMT(rpllmt));
3038
3039 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3040 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3041}
3042
3043/*
3044 * Initialize and configure T3 HW modules. This performs the
3045 * initialization steps that need to be done once after a card is reset.
3046 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3047 *
3048 * fw_params are passed to FW and their value is platform dependent. Only the
3049 * top 8 bits are available for use, the rest must be 0.
3050 */
3051int t3_init_hw(struct adapter *adapter, u32 fw_params)
3052{
3053 int err = -EIO, attempts = 100;
3054 const struct vpd_params *vpd = &adapter->params.vpd;
3055
3056 if (adapter->params.rev > 0)
3057 calibrate_xgm_t3b(adapter);
3058 else if (calibrate_xgm(adapter))
3059 goto out_err;
3060
3061 if (vpd->mclk) {
3062 partition_mem(adapter, &adapter->params.tp);
3063
3064 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3065 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3066 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3067 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3068 adapter->params.mc5.nfilters,
3069 adapter->params.mc5.nroutes))
3070 goto out_err;
3071 }
3072
3073 if (tp_init(adapter, &adapter->params.tp))
3074 goto out_err;
3075
3076 t3_tp_set_coalescing_size(adapter,
3077 min(adapter->params.sge.max_pkt_size,
3078 MAX_RX_COALESCING_LEN), 1);
3079 t3_tp_set_max_rxsize(adapter,
3080 min(adapter->params.sge.max_pkt_size, 16384U));
3081 ulp_config(adapter, &adapter->params.tp);
3082
3083 if (is_pcie(adapter))
3084 config_pcie(adapter);
3085 else
3086 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3087
3088 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3089 init_hw_for_avail_ports(adapter, adapter->params.nports);
3090 t3_sge_init(adapter, &adapter->params.sge);
3091
3092 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3093 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3094 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3095 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3096
3097 do { /* wait for uP to initialize */
3098 msleep(20);
3099 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3100 if (!attempts)
3101 goto out_err;
3102
3103 err = 0;
3104out_err:
3105 return err;
3106}
3107
3108/**
3109 * get_pci_mode - determine a card's PCI mode
3110 * @adapter: the adapter
3111 * @p: where to store the PCI settings
3112 *
3113 * Determines a card's PCI mode and associated parameters, such as speed
3114 * and width.
3115 */
3116static void __devinit get_pci_mode(struct adapter *adapter,
3117 struct pci_params *p)
3118{
3119 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3120 u32 pci_mode, pcie_cap;
3121
3122 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3123 if (pcie_cap) {
3124 u16 val;
3125
3126 p->variant = PCI_VARIANT_PCIE;
3127 p->pcie_cap_addr = pcie_cap;
3128 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3129 &val);
3130 p->width = (val >> 4) & 0x3f;
3131 return;
3132 }
3133
3134 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3135 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3136 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3137 pci_mode = G_PCIXINITPAT(pci_mode);
3138 if (pci_mode == 0)
3139 p->variant = PCI_VARIANT_PCI;
3140 else if (pci_mode < 4)
3141 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3142 else if (pci_mode < 8)
3143 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3144 else
3145 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3146}
3147
3148/**
3149 * init_link_config - initialize a link's SW state
3150 * @lc: structure holding the link state
3151 * @ai: information about the current card
3152 *
3153 * Initializes the SW state maintained for each link, including the link's
3154 * capabilities and default speed/duplex/flow-control/autonegotiation
3155 * settings.
3156 */
3157static void __devinit init_link_config(struct link_config *lc,
3158 unsigned int caps)
3159{
3160 lc->supported = caps;
3161 lc->requested_speed = lc->speed = SPEED_INVALID;
3162 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3163 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3164 if (lc->supported & SUPPORTED_Autoneg) {
3165 lc->advertising = lc->supported;
3166 lc->autoneg = AUTONEG_ENABLE;
3167 lc->requested_fc |= PAUSE_AUTONEG;
3168 } else {
3169 lc->advertising = 0;
3170 lc->autoneg = AUTONEG_DISABLE;
3171 }
3172}
3173
3174/**
3175 * mc7_calc_size - calculate MC7 memory size
3176 * @cfg: the MC7 configuration
3177 *
3178 * Calculates the size of an MC7 memory in bytes from the value of its
3179 * configuration register.
3180 */
3181static unsigned int __devinit mc7_calc_size(u32 cfg)
3182{
3183 unsigned int width = G_WIDTH(cfg);
3184 unsigned int banks = !!(cfg & F_BKS) + 1;
3185 unsigned int org = !!(cfg & F_ORG) + 1;
3186 unsigned int density = G_DEN(cfg);
3187 unsigned int MBs = ((256 << density) * banks) / (org << width);
3188
3189 return MBs << 20;
3190}
3191
3192static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3193 unsigned int base_addr, const char *name)
3194{
3195 u32 cfg;
3196
3197 mc7->adapter = adapter;
3198 mc7->name = name;
3199 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3200 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3201 mc7->size = mc7_calc_size(cfg);
3202 mc7->width = G_WIDTH(cfg);
3203}
3204
3205void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3206{
3207 mac->adapter = adapter;
3208 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3209 mac->nucast = 1;
3210
3211 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3212 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3213 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3214 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3215 F_ENRGMII, 0);
3216 }
3217}
3218
3219void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3220{
3221 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3222
3223 mi1_init(adapter, ai);
3224 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3225 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3226 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3227 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3228
3229 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3230 val |= F_ENRGMII;
3231
3232 /* Enable MAC clocks so we can access the registers */
3233 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3234 t3_read_reg(adapter, A_XGM_PORT_CFG);
3235
3236 val |= F_CLKDIVRESET_;
3237 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3238 t3_read_reg(adapter, A_XGM_PORT_CFG);
3239 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3240 t3_read_reg(adapter, A_XGM_PORT_CFG);
3241}
3242
3243/*
3244 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3245 * ones don't.
3246 */
3247int t3_reset_adapter(struct adapter *adapter)
3248{
3249 int i;
3250 uint16_t devid = 0;
3251
3252 if (is_pcie(adapter))
3253 pci_save_state(adapter->pdev);
3254 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3255
3256 /*
3257 * Delay. Give Some time to device to reset fully.
3258 * XXX The delay time should be modified.
3259 */
3260 for (i = 0; i < 10; i++) {
3261 msleep(50);
3262 pci_read_config_word(adapter->pdev, 0x00, &devid);
3263 if (devid == 0x1425)
3264 break;
3265 }
3266
3267 if (devid != 0x1425)
3268 return -1;
3269
3270 if (is_pcie(adapter))
3271 pci_restore_state(adapter->pdev);
3272 return 0;
3273}
3274
3275/*
3276 * Initialize adapter SW state for the various HW modules, set initial values
3277 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3278 * interface.
3279 */
3280int __devinit t3_prep_adapter(struct adapter *adapter,
3281 const struct adapter_info *ai, int reset)
3282{
3283 int ret;
3284 unsigned int i, j = 0;
3285
3286 get_pci_mode(adapter, &adapter->params.pci);
3287
3288 adapter->params.info = ai;
3289 adapter->params.nports = ai->nports;
3290 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3291 adapter->params.linkpoll_period = 0;
3292 adapter->params.stats_update_period = is_10G(adapter) ?
3293 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3294 adapter->params.pci.vpd_cap_addr =
3295 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3296 ret = get_vpd_params(adapter, &adapter->params.vpd);
3297 if (ret < 0)
3298 return ret;
3299
3300 if (reset && t3_reset_adapter(adapter))
3301 return -1;
3302
3303 t3_sge_prep(adapter, &adapter->params.sge);
3304
3305 if (adapter->params.vpd.mclk) {
3306 struct tp_params *p = &adapter->params.tp;
3307
3308 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3309 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3310 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3311
3312 p->nchan = ai->nports;
3313 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3314 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3315 p->cm_size = t3_mc7_size(&adapter->cm);
3316 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3317 p->chan_tx_size = p->pmtx_size / p->nchan;
3318 p->rx_pg_size = 64 * 1024;
3319 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3320 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3321 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3322 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3323 adapter->params.rev > 0 ? 12 : 6;
3324
3325 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3326 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3327 DEFAULT_NFILTERS : 0;
3328 adapter->params.mc5.nroutes = 0;
3329 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3330
3331 init_mtus(adapter->params.mtus);
3332 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3333 }
3334
3335 early_hw_init(adapter, ai);
3336
3337 for_each_port(adapter, i) {
3338 u8 hw_addr[6];
3339 struct port_info *p = adap2pinfo(adapter, i);
3340
3341 while (!adapter->params.vpd.port_type[j])
3342 ++j;
3343
3344 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3345 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3346 ai->mdio_ops);
3347 mac_prep(&p->mac, adapter, j);
3348 ++j;
3349
3350 /*
3351 * The VPD EEPROM stores the base Ethernet address for the
3352 * card. A port's address is derived from the base by adding
3353 * the port's index to the base's low octet.
3354 */
3355 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3356 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3357
3358 memcpy(adapter->port[i]->dev_addr, hw_addr,
3359 ETH_ALEN);
3360 memcpy(adapter->port[i]->perm_addr, hw_addr,
3361 ETH_ALEN);
3362 init_link_config(&p->link_config, p->port_type->caps);
3363 p->phy.ops->power_down(&p->phy, 1);
3364 if (!(p->port_type->caps & SUPPORTED_IRQ))
3365 adapter->params.linkpoll_period = 10;
3366 }
3367
3368 return 0;
3369}
3370
3371void t3_led_ready(struct adapter *adapter)
3372{
3373 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3374 F_GPIO0_OUT_VAL);
3375}
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
new file mode 100644
index 000000000000..9af3bcd64b3b
--- /dev/null
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright (C) 2006-2007 Chelsio Communications. All rights reserved.
3 * Copyright (C) 2006-2007 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#ifndef _T3CDEV_H_
34#define _T3CDEV_H_
35
36#include <linux/list.h>
37#include <asm/atomic.h>
38#include <asm/semaphore.h>
39#include <linux/netdevice.h>
40#include <linux/proc_fs.h>
41#include <linux/skbuff.h>
42#include <net/neighbour.h>
43
44#define T3CNAMSIZ 16
45
46/* Get the t3cdev associated with a net_device */
47#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
48
49struct cxgb3_client;
50
51enum t3ctype {
52 T3A = 0,
53 T3B
54};
55
56struct t3cdev {
57 char name[T3CNAMSIZ]; /* T3C device name */
58 enum t3ctype type;
59 struct list_head ofld_dev_list; /* for list linking */
60 struct net_device *lldev; /* LL dev associated with T3C messages */
61 struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
62 int (*send)(struct t3cdev *dev, struct sk_buff *skb);
63 int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
64 int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
65 void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
66 void *priv; /* driver private data */
67 void *l2opt; /* optional layer 2 data */
68 void *l3opt; /* optional layer 3 data */
69 void *l4opt; /* optional layer 4 data */
70 void *ulp; /* ulp stuff */
71};
72
73#endif /* _T3CDEV_H_ */
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
new file mode 100644
index 000000000000..2b67dd523cc1
--- /dev/null
+++ b/drivers/net/cxgb3/version.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
33#ifndef __CHELSIO_VERSION_H
34#define __CHELSIO_VERSION_H
35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3"
37/* Driver version */
38#define DRV_VERSION "1.0"
39#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
new file mode 100644
index 000000000000..eee4285b31be
--- /dev/null
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33
34/* VSC8211 PHY specific registers. */
35enum {
36 VSC8211_INTR_ENABLE = 25,
37 VSC8211_INTR_STATUS = 26,
38 VSC8211_AUX_CTRL_STAT = 28,
39};
40
41enum {
42 VSC_INTR_RX_ERR = 1 << 0,
43 VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
44 VSC_INTR_CABLE = 1 << 2, /* cable impairment */
45 VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
46 VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
47 VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
48 VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
49 VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
50 VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
51 VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
52 VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
53 VSC_INTR_LINK_CHG = 1 << 13, /* link change */
54 VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
55};
56
57#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
58 VSC_INTR_NEG_DONE)
59#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
60 VSC_INTR_ENABLE)
61
62/* PHY specific auxiliary control & status register fields */
63#define S_ACSR_ACTIPHY_TMR 0
64#define M_ACSR_ACTIPHY_TMR 0x3
65#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
66
67#define S_ACSR_SPEED 3
68#define M_ACSR_SPEED 0x3
69#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
70
71#define S_ACSR_DUPLEX 5
72#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
73
74#define S_ACSR_ACTIPHY 6
75#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
76
77/*
78 * Reset the PHY. This PHY completes reset immediately so we never wait.
79 */
80static int vsc8211_reset(struct cphy *cphy, int wait)
81{
82 return t3_phy_reset(cphy, 0, 0);
83}
84
85static int vsc8211_intr_enable(struct cphy *cphy)
86{
87 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
88}
89
90static int vsc8211_intr_disable(struct cphy *cphy)
91{
92 return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
93}
94
95static int vsc8211_intr_clear(struct cphy *cphy)
96{
97 u32 val;
98
99 /* Clear PHY interrupts by reading the register. */
100 return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
101}
102
103static int vsc8211_autoneg_enable(struct cphy *cphy)
104{
105 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
106 BMCR_ANENABLE | BMCR_ANRESTART);
107}
108
109static int vsc8211_autoneg_restart(struct cphy *cphy)
110{
111 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
112 BMCR_ANRESTART);
113}
114
115static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
116 int *speed, int *duplex, int *fc)
117{
118 unsigned int bmcr, status, lpa, adv;
119 int err, sp = -1, dplx = -1, pause = 0;
120
121 err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
122 if (!err)
123 err = mdio_read(cphy, 0, MII_BMSR, &status);
124 if (err)
125 return err;
126
127 if (link_ok) {
128 /*
129 * BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
130 * once more to get the current link state.
131 */
132 if (!(status & BMSR_LSTATUS))
133 err = mdio_read(cphy, 0, MII_BMSR, &status);
134 if (err)
135 return err;
136 *link_ok = (status & BMSR_LSTATUS) != 0;
137 }
138 if (!(bmcr & BMCR_ANENABLE)) {
139 dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
140 if (bmcr & BMCR_SPEED1000)
141 sp = SPEED_1000;
142 else if (bmcr & BMCR_SPEED100)
143 sp = SPEED_100;
144 else
145 sp = SPEED_10;
146 } else if (status & BMSR_ANEGCOMPLETE) {
147 err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
148 if (err)
149 return err;
150
151 dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
152 sp = G_ACSR_SPEED(status);
153 if (sp == 0)
154 sp = SPEED_10;
155 else if (sp == 1)
156 sp = SPEED_100;
157 else
158 sp = SPEED_1000;
159
160 if (fc && dplx == DUPLEX_FULL) {
161 err = mdio_read(cphy, 0, MII_LPA, &lpa);
162 if (!err)
163 err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
164 if (err)
165 return err;
166
167 if (lpa & adv & ADVERTISE_PAUSE_CAP)
168 pause = PAUSE_RX | PAUSE_TX;
169 else if ((lpa & ADVERTISE_PAUSE_CAP) &&
170 (lpa & ADVERTISE_PAUSE_ASYM) &&
171 (adv & ADVERTISE_PAUSE_ASYM))
172 pause = PAUSE_TX;
173 else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
174 (adv & ADVERTISE_PAUSE_CAP))
175 pause = PAUSE_RX;
176 }
177 }
178 if (speed)
179 *speed = sp;
180 if (duplex)
181 *duplex = dplx;
182 if (fc)
183 *fc = pause;
184 return 0;
185}
186
187static int vsc8211_power_down(struct cphy *cphy, int enable)
188{
189 return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
190 enable ? BMCR_PDOWN : 0);
191}
192
193static int vsc8211_intr_handler(struct cphy *cphy)
194{
195 unsigned int cause;
196 int err, cphy_cause = 0;
197
198 err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
199 if (err)
200 return err;
201
202 cause &= INTR_MASK;
203 if (cause & CFG_CHG_INTR_MASK)
204 cphy_cause |= cphy_cause_link_change;
205 if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
206 cphy_cause |= cphy_cause_fifo_error;
207 return cphy_cause;
208}
209
210static struct cphy_ops vsc8211_ops = {
211 .reset = vsc8211_reset,
212 .intr_enable = vsc8211_intr_enable,
213 .intr_disable = vsc8211_intr_disable,
214 .intr_clear = vsc8211_intr_clear,
215 .intr_handler = vsc8211_intr_handler,
216 .autoneg_enable = vsc8211_autoneg_enable,
217 .autoneg_restart = vsc8211_autoneg_restart,
218 .advertise = t3_phy_advertise,
219 .set_speed_duplex = t3_set_phy_speed_duplex,
220 .get_link_status = vsc8211_get_link_status,
221 .power_down = vsc8211_power_down,
222};
223
224void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
225 int phy_addr, const struct mdio_ops *mdio_ops)
226{
227 cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
228}
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
new file mode 100644
index 000000000000..907a272ae32d
--- /dev/null
+++ b/drivers/net/cxgb3/xgmac.c
@@ -0,0 +1,409 @@
1/*
2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34
35/*
36 * # of exact address filters. The first one is used for the station address,
37 * the rest are available for multicast addresses.
38 */
39#define EXACT_ADDR_FILTERS 8
40
41static inline int macidx(const struct cmac *mac)
42{
43 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
44}
45
46static void xaui_serdes_reset(struct cmac *mac)
47{
48 static const unsigned int clear[] = {
49 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
50 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
51 };
52
53 int i;
54 struct adapter *adap = mac->adapter;
55 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
56
57 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
58 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
59 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
60 F_RESETPLL23 | F_RESETPLL01);
61 t3_read_reg(adap, ctrl);
62 udelay(15);
63
64 for (i = 0; i < ARRAY_SIZE(clear); i++) {
65 t3_set_reg_field(adap, ctrl, clear[i], 0);
66 udelay(15);
67 }
68}
69
70void t3b_pcs_reset(struct cmac *mac)
71{
72 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
73 F_PCS_RESET_, 0);
74 udelay(20);
75 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
76 F_PCS_RESET_);
77}
78
79int t3_mac_reset(struct cmac *mac)
80{
81 static const struct addr_val_pair mac_reset_avp[] = {
82 {A_XGM_TX_CTRL, 0},
83 {A_XGM_RX_CTRL, 0},
84 {A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
85 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
86 {A_XGM_RX_HASH_LOW, 0},
87 {A_XGM_RX_HASH_HIGH, 0},
88 {A_XGM_RX_EXACT_MATCH_LOW_1, 0},
89 {A_XGM_RX_EXACT_MATCH_LOW_2, 0},
90 {A_XGM_RX_EXACT_MATCH_LOW_3, 0},
91 {A_XGM_RX_EXACT_MATCH_LOW_4, 0},
92 {A_XGM_RX_EXACT_MATCH_LOW_5, 0},
93 {A_XGM_RX_EXACT_MATCH_LOW_6, 0},
94 {A_XGM_RX_EXACT_MATCH_LOW_7, 0},
95 {A_XGM_RX_EXACT_MATCH_LOW_8, 0},
96 {A_XGM_STAT_CTRL, F_CLRSTATS}
97 };
98 u32 val;
99 struct adapter *adap = mac->adapter;
100 unsigned int oft = mac->offset;
101
102 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
103 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
104
105 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
106 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
107 F_RXSTRFRWRD | F_DISERRFRAMES,
108 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
109
110 if (uses_xaui(adap)) {
111 if (adap->params.rev == 0) {
112 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
113 F_RXENABLE | F_TXENABLE);
114 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
115 F_CMULOCK, 1, 5, 2)) {
116 CH_ERR(adap,
117 "MAC %d XAUI SERDES CMU lock failed\n",
118 macidx(mac));
119 return -1;
120 }
121 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
122 F_SERDESRESET_);
123 } else
124 xaui_serdes_reset(mac);
125 }
126
127 if (adap->params.rev > 0)
128 t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
129
130 val = F_MAC_RESET_;
131 if (is_10G(adap))
132 val |= F_PCS_RESET_;
133 else if (uses_xaui(adap))
134 val |= F_PCS_RESET_ | F_XG2G_RESET_;
135 else
136 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
137 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
138 t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
139 if ((val & F_PCS_RESET_) && adap->params.rev) {
140 msleep(1);
141 t3b_pcs_reset(mac);
142 }
143
144 memset(&mac->stats, 0, sizeof(mac->stats));
145 return 0;
146}
147
148/*
149 * Set the exact match register 'idx' to recognize the given Ethernet address.
150 */
151static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
152{
153 u32 addr_lo, addr_hi;
154 unsigned int oft = mac->offset + idx * 8;
155
156 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
157 addr_hi = (addr[5] << 8) | addr[4];
158
159 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
160 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
161}
162
163/* Set one of the station's unicast MAC addresses. */
164int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
165{
166 if (idx >= mac->nucast)
167 return -EINVAL;
168 set_addr_filter(mac, idx, addr);
169 return 0;
170}
171
172/*
173 * Specify the number of exact address filters that should be reserved for
174 * unicast addresses. Caller should reload the unicast and multicast addresses
175 * after calling this.
176 */
177int t3_mac_set_num_ucast(struct cmac *mac, int n)
178{
179 if (n > EXACT_ADDR_FILTERS)
180 return -EINVAL;
181 mac->nucast = n;
182 return 0;
183}
184
185/* Calculate the RX hash filter index of an Ethernet address */
186static int hash_hw_addr(const u8 * addr)
187{
188 int hash = 0, octet, bit, i = 0, c;
189
190 for (octet = 0; octet < 6; ++octet)
191 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
192 hash ^= (c & 1) << i;
193 if (++i == 6)
194 i = 0;
195 }
196 return hash;
197}
198
199int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
200{
201 u32 val, hash_lo, hash_hi;
202 struct adapter *adap = mac->adapter;
203 unsigned int oft = mac->offset;
204
205 val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
206 if (rm->dev->flags & IFF_PROMISC)
207 val |= F_COPYALLFRAMES;
208 t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
209
210 if (rm->dev->flags & IFF_ALLMULTI)
211 hash_lo = hash_hi = 0xffffffff;
212 else {
213 u8 *addr;
214 int exact_addr_idx = mac->nucast;
215
216 hash_lo = hash_hi = 0;
217 while ((addr = t3_get_next_mcaddr(rm)))
218 if (exact_addr_idx < EXACT_ADDR_FILTERS)
219 set_addr_filter(mac, exact_addr_idx++, addr);
220 else {
221 int hash = hash_hw_addr(addr);
222
223 if (hash < 32)
224 hash_lo |= (1 << hash);
225 else
226 hash_hi |= (1 << (hash - 32));
227 }
228 }
229
230 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
231 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
232 return 0;
233}
234
235int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
236{
237 int hwm, lwm;
238 unsigned int thres, v;
239 struct adapter *adap = mac->adapter;
240
241 /*
242 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
243 * packet size register includes header, but not FCS.
244 */
245 mtu += 14;
246 if (mtu > MAX_FRAME_SIZE - 4)
247 return -EINVAL;
248 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
249
250 /*
251 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
252 * HWM only if flow-control is enabled.
253 */
254 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
255 hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
256 lwm = hwm - 1024;
257 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
258 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
259 v |= V_RXFIFOPAUSELWM(lwm / 8);
260 if (G_RXFIFOPAUSEHWM(v))
261 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
262 V_RXFIFOPAUSEHWM(hwm / 8);
263 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
264
265 /* Adjust the TX FIFO threshold based on the MTU */
266 thres = (adap->params.vpd.cclk * 1000) / 15625;
267 thres = (thres * mtu) / 1000;
268 if (is_10G(adap))
269 thres /= 10;
270 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
271 thres = max(thres, 8U); /* need at least 8 */
272 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
273 V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
274 return 0;
275}
276
277int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
278{
279 u32 val;
280 struct adapter *adap = mac->adapter;
281 unsigned int oft = mac->offset;
282
283 if (duplex >= 0 && duplex != DUPLEX_FULL)
284 return -EINVAL;
285 if (speed >= 0) {
286 if (speed == SPEED_10)
287 val = V_PORTSPEED(0);
288 else if (speed == SPEED_100)
289 val = V_PORTSPEED(1);
290 else if (speed == SPEED_1000)
291 val = V_PORTSPEED(2);
292 else if (speed == SPEED_10000)
293 val = V_PORTSPEED(3);
294 else
295 return -EINVAL;
296
297 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
298 V_PORTSPEED(M_PORTSPEED), val);
299 }
300
301 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
302 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
303 if (fc & PAUSE_TX)
304 val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
305 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
306
307 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
308 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
309 return 0;
310}
311
312int t3_mac_enable(struct cmac *mac, int which)
313{
314 int idx = macidx(mac);
315 struct adapter *adap = mac->adapter;
316 unsigned int oft = mac->offset;
317
318 if (which & MAC_DIRECTION_TX) {
319 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
320 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
321 t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
322 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
323 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
324 }
325 if (which & MAC_DIRECTION_RX)
326 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
327 return 0;
328}
329
330int t3_mac_disable(struct cmac *mac, int which)
331{
332 int idx = macidx(mac);
333 struct adapter *adap = mac->adapter;
334
335 if (which & MAC_DIRECTION_TX) {
336 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
337 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
338 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
339 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
340 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
341 }
342 if (which & MAC_DIRECTION_RX)
343 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
344 return 0;
345}
346
347/*
348 * This function is called periodically to accumulate the current values of the
349 * RMON counters into the port statistics. Since the packet counters are only
350 * 32 bits they can overflow in ~286 secs at 10G, so the function should be
351 * called more frequently than that. The byte counters are 45-bit wide, they
352 * would overflow in ~7.8 hours.
353 */
354const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
355{
356#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
357#define RMON_UPDATE(mac, name, reg) \
358 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
359#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
360 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
361 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
362
363 u32 v, lo;
364
365 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
366 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
367 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
368 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
369 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
370 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
371 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
372 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
373 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
374
375 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
376 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
377
378 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
379 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
380 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
381 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
382 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
383 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
384 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
385
386 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
387 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
388 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
389 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
390 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
391 /* This counts error frames in general (bad FCS, underrun, etc). */
392 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
393
394 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
395 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
396 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
397 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
398 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
399 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
400 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
401
402 /* The next stat isn't clear-on-read. */
403 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
404 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
405 lo = (u32) mac->stats.rx_cong_drops;
406 mac->stats.rx_cong_drops += (u64) (v - lo);
407
408 return &mac->stats;
409}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 4ae0fed7122e..9f7e1db8ce62 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * adopted from sunlance.c by Richard van den Berg 6 * adopted from sunlance.c by Richard van den Berg
7 * 7 *
8 * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki 8 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
9 * 9 *
10 * additional sources: 10 * additional sources:
11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, 11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
@@ -44,6 +44,8 @@
44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the 44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
45 * PMAX requirement to only use halfword accesses to the 45 * PMAX requirement to only use halfword accesses to the
46 * buffer. macro 46 * buffer. macro
47 *
48 * v0.011: Converted the PMAD to the driver model. macro
47 */ 49 */
48 50
49#include <linux/crc32.h> 51#include <linux/crc32.h>
@@ -58,6 +60,7 @@
58#include <linux/spinlock.h> 60#include <linux/spinlock.h>
59#include <linux/stddef.h> 61#include <linux/stddef.h>
60#include <linux/string.h> 62#include <linux/string.h>
63#include <linux/tc.h>
61#include <linux/types.h> 64#include <linux/types.h>
62 65
63#include <asm/addrspace.h> 66#include <asm/addrspace.h>
@@ -69,15 +72,16 @@
69#include <asm/dec/kn01.h> 72#include <asm/dec/kn01.h>
70#include <asm/dec/machtype.h> 73#include <asm/dec/machtype.h>
71#include <asm/dec/system.h> 74#include <asm/dec/system.h>
72#include <asm/dec/tc.h>
73 75
74static char version[] __devinitdata = 76static char version[] __devinitdata =
75"declance.c: v0.010 by Linux MIPS DECstation task force\n"; 77"declance.c: v0.011 by Linux MIPS DECstation task force\n";
76 78
77MODULE_AUTHOR("Linux MIPS DECstation task force"); 79MODULE_AUTHOR("Linux MIPS DECstation task force");
78MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver"); 80MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
79MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
80 82
83#define __unused __attribute__ ((unused))
84
81/* 85/*
82 * card types 86 * card types
83 */ 87 */
@@ -246,7 +250,6 @@ struct lance_init_block {
246struct lance_private { 250struct lance_private {
247 struct net_device *next; 251 struct net_device *next;
248 int type; 252 int type;
249 int slot;
250 int dma_irq; 253 int dma_irq;
251 volatile struct lance_regs *ll; 254 volatile struct lance_regs *ll;
252 255
@@ -288,6 +291,7 @@ struct lance_regs {
288 291
289int dec_lance_debug = 2; 292int dec_lance_debug = 2;
290 293
294static struct tc_driver dec_lance_tc_driver;
291static struct net_device *root_lance_dev; 295static struct net_device *root_lance_dev;
292 296
293static inline void writereg(volatile unsigned short *regptr, short value) 297static inline void writereg(volatile unsigned short *regptr, short value)
@@ -1023,7 +1027,7 @@ static void lance_set_multicast_retry(unsigned long _opaque)
1023 lance_set_multicast(dev); 1027 lance_set_multicast(dev);
1024} 1028}
1025 1029
1026static int __init dec_lance_init(const int type, const int slot) 1030static int __init dec_lance_probe(struct device *bdev, const int type)
1027{ 1031{
1028 static unsigned version_printed; 1032 static unsigned version_printed;
1029 static const char fmt[] = "declance%d"; 1033 static const char fmt[] = "declance%d";
@@ -1031,6 +1035,7 @@ static int __init dec_lance_init(const int type, const int slot)
1031 struct net_device *dev; 1035 struct net_device *dev;
1032 struct lance_private *lp; 1036 struct lance_private *lp;
1033 volatile struct lance_regs *ll; 1037 volatile struct lance_regs *ll;
1038 resource_size_t start = 0, len = 0;
1034 int i, ret; 1039 int i, ret;
1035 unsigned long esar_base; 1040 unsigned long esar_base;
1036 unsigned char *esar; 1041 unsigned char *esar;
@@ -1038,14 +1043,18 @@ static int __init dec_lance_init(const int type, const int slot)
1038 if (dec_lance_debug && version_printed++ == 0) 1043 if (dec_lance_debug && version_printed++ == 0)
1039 printk(version); 1044 printk(version);
1040 1045
1041 i = 0; 1046 if (bdev)
1042 dev = root_lance_dev; 1047 snprintf(name, sizeof(name), "%s", bdev->bus_id);
1043 while (dev) { 1048 else {
1044 i++; 1049 i = 0;
1045 lp = (struct lance_private *)dev->priv; 1050 dev = root_lance_dev;
1046 dev = lp->next; 1051 while (dev) {
1052 i++;
1053 lp = (struct lance_private *)dev->priv;
1054 dev = lp->next;
1055 }
1056 snprintf(name, sizeof(name), fmt, i);
1047 } 1057 }
1048 snprintf(name, sizeof(name), fmt, i);
1049 1058
1050 dev = alloc_etherdev(sizeof(struct lance_private)); 1059 dev = alloc_etherdev(sizeof(struct lance_private));
1051 if (!dev) { 1060 if (!dev) {
@@ -1063,7 +1072,6 @@ static int __init dec_lance_init(const int type, const int slot)
1063 spin_lock_init(&lp->lock); 1072 spin_lock_init(&lp->lock);
1064 1073
1065 lp->type = type; 1074 lp->type = type;
1066 lp->slot = slot;
1067 switch (type) { 1075 switch (type) {
1068 case ASIC_LANCE: 1076 case ASIC_LANCE:
1069 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); 1077 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
@@ -1110,12 +1118,22 @@ static int __init dec_lance_init(const int type, const int slot)
1110 break; 1118 break;
1111#ifdef CONFIG_TC 1119#ifdef CONFIG_TC
1112 case PMAD_LANCE: 1120 case PMAD_LANCE:
1113 claim_tc_card(slot); 1121 dev_set_drvdata(bdev, dev);
1122
1123 start = to_tc_dev(bdev)->resource.start;
1124 len = to_tc_dev(bdev)->resource.end - start + 1;
1125 if (!request_mem_region(start, len, bdev->bus_id)) {
1126 printk(KERN_ERR
1127 "%s: Unable to reserve MMIO resource\n",
1128 bdev->bus_id);
1129 ret = -EBUSY;
1130 goto err_out_dev;
1131 }
1114 1132
1115 dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot)); 1133 dev->mem_start = CKSEG1ADDR(start);
1116 dev->mem_end = dev->mem_start + 0x100000; 1134 dev->mem_end = dev->mem_start + 0x100000;
1117 dev->base_addr = dev->mem_start + 0x100000; 1135 dev->base_addr = dev->mem_start + 0x100000;
1118 dev->irq = get_tc_irq_nr(slot); 1136 dev->irq = to_tc_dev(bdev)->interrupt;
1119 esar_base = dev->mem_start + 0x1c0002; 1137 esar_base = dev->mem_start + 0x1c0002;
1120 lp->dma_irq = -1; 1138 lp->dma_irq = -1;
1121 1139
@@ -1174,7 +1192,7 @@ static int __init dec_lance_init(const int type, const int slot)
1174 printk(KERN_ERR "%s: declance_init called with unknown type\n", 1192 printk(KERN_ERR "%s: declance_init called with unknown type\n",
1175 name); 1193 name);
1176 ret = -ENODEV; 1194 ret = -ENODEV;
1177 goto err_out_free_dev; 1195 goto err_out_dev;
1178 } 1196 }
1179 1197
1180 ll = (struct lance_regs *) dev->base_addr; 1198 ll = (struct lance_regs *) dev->base_addr;
@@ -1188,7 +1206,7 @@ static int __init dec_lance_init(const int type, const int slot)
1188 "%s: Ethernet station address prom not found!\n", 1206 "%s: Ethernet station address prom not found!\n",
1189 name); 1207 name);
1190 ret = -ENODEV; 1208 ret = -ENODEV;
1191 goto err_out_free_dev; 1209 goto err_out_resource;
1192 } 1210 }
1193 /* Check the prom contents */ 1211 /* Check the prom contents */
1194 for (i = 0; i < 8; i++) { 1212 for (i = 0; i < 8; i++) {
@@ -1198,7 +1216,7 @@ static int __init dec_lance_init(const int type, const int slot)
1198 printk(KERN_ERR "%s: Something is wrong with the " 1216 printk(KERN_ERR "%s: Something is wrong with the "
1199 "ethernet station address prom!\n", name); 1217 "ethernet station address prom!\n", name);
1200 ret = -ENODEV; 1218 ret = -ENODEV;
1201 goto err_out_free_dev; 1219 goto err_out_resource;
1202 } 1220 }
1203 } 1221 }
1204 1222
@@ -1255,48 +1273,51 @@ static int __init dec_lance_init(const int type, const int slot)
1255 if (ret) { 1273 if (ret) {
1256 printk(KERN_ERR 1274 printk(KERN_ERR
1257 "%s: Unable to register netdev, aborting.\n", name); 1275 "%s: Unable to register netdev, aborting.\n", name);
1258 goto err_out_free_dev; 1276 goto err_out_resource;
1259 } 1277 }
1260 1278
1261 lp->next = root_lance_dev; 1279 if (!bdev) {
1262 root_lance_dev = dev; 1280 lp->next = root_lance_dev;
1281 root_lance_dev = dev;
1282 }
1263 1283
1264 printk("%s: registered as %s.\n", name, dev->name); 1284 printk("%s: registered as %s.\n", name, dev->name);
1265 return 0; 1285 return 0;
1266 1286
1267err_out_free_dev: 1287err_out_resource:
1288 if (bdev)
1289 release_mem_region(start, len);
1290
1291err_out_dev:
1268 free_netdev(dev); 1292 free_netdev(dev);
1269 1293
1270err_out: 1294err_out:
1271 return ret; 1295 return ret;
1272} 1296}
1273 1297
1298static void __exit dec_lance_remove(struct device *bdev)
1299{
1300 struct net_device *dev = dev_get_drvdata(bdev);
1301 resource_size_t start, len;
1302
1303 unregister_netdev(dev);
1304 start = to_tc_dev(bdev)->resource.start;
1305 len = to_tc_dev(bdev)->resource.end - start + 1;
1306 release_mem_region(start, len);
1307 free_netdev(dev);
1308}
1274 1309
1275/* Find all the lance cards on the system and initialize them */ 1310/* Find all the lance cards on the system and initialize them */
1276static int __init dec_lance_probe(void) 1311static int __init dec_lance_platform_probe(void)
1277{ 1312{
1278 int count = 0; 1313 int count = 0;
1279 1314
1280 /* Scan slots for PMAD-AA cards first. */
1281#ifdef CONFIG_TC
1282 if (TURBOCHANNEL) {
1283 int slot;
1284
1285 while ((slot = search_tc_card("PMAD-AA")) >= 0) {
1286 if (dec_lance_init(PMAD_LANCE, slot) < 0)
1287 break;
1288 count++;
1289 }
1290 }
1291#endif
1292
1293 /* Then handle onboard devices. */
1294 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) { 1315 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1295 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) { 1316 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1296 if (dec_lance_init(ASIC_LANCE, -1) >= 0) 1317 if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1297 count++; 1318 count++;
1298 } else if (!TURBOCHANNEL) { 1319 } else if (!TURBOCHANNEL) {
1299 if (dec_lance_init(PMAX_LANCE, -1) >= 0) 1320 if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1300 count++; 1321 count++;
1301 } 1322 }
1302 } 1323 }
@@ -1304,21 +1325,70 @@ static int __init dec_lance_probe(void)
1304 return (count > 0) ? 0 : -ENODEV; 1325 return (count > 0) ? 0 : -ENODEV;
1305} 1326}
1306 1327
1307static void __exit dec_lance_cleanup(void) 1328static void __exit dec_lance_platform_remove(void)
1308{ 1329{
1309 while (root_lance_dev) { 1330 while (root_lance_dev) {
1310 struct net_device *dev = root_lance_dev; 1331 struct net_device *dev = root_lance_dev;
1311 struct lance_private *lp = netdev_priv(dev); 1332 struct lance_private *lp = netdev_priv(dev);
1312 1333
1313 unregister_netdev(dev); 1334 unregister_netdev(dev);
1314#ifdef CONFIG_TC
1315 if (lp->slot >= 0)
1316 release_tc_card(lp->slot);
1317#endif
1318 root_lance_dev = lp->next; 1335 root_lance_dev = lp->next;
1319 free_netdev(dev); 1336 free_netdev(dev);
1320 } 1337 }
1321} 1338}
1322 1339
1323module_init(dec_lance_probe); 1340#ifdef CONFIG_TC
1324module_exit(dec_lance_cleanup); 1341static int __init dec_lance_tc_probe(struct device *dev);
1342static int __exit dec_lance_tc_remove(struct device *dev);
1343
1344static const struct tc_device_id dec_lance_tc_table[] = {
1345 { "DEC ", "PMAD-AA " },
1346 { }
1347};
1348MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1349
1350static struct tc_driver dec_lance_tc_driver = {
1351 .id_table = dec_lance_tc_table,
1352 .driver = {
1353 .name = "declance",
1354 .bus = &tc_bus_type,
1355 .probe = dec_lance_tc_probe,
1356 .remove = __exit_p(dec_lance_tc_remove),
1357 },
1358};
1359
1360static int __init dec_lance_tc_probe(struct device *dev)
1361{
1362 int status = dec_lance_probe(dev, PMAD_LANCE);
1363 if (!status)
1364 get_device(dev);
1365 return status;
1366}
1367
1368static int __exit dec_lance_tc_remove(struct device *dev)
1369{
1370 put_device(dev);
1371 dec_lance_remove(dev);
1372 return 0;
1373}
1374#endif
1375
1376static int __init dec_lance_init(void)
1377{
1378 int status;
1379
1380 status = tc_register_driver(&dec_lance_tc_driver);
1381 if (!status)
1382 dec_lance_platform_probe();
1383 return status;
1384}
1385
1386static void __exit dec_lance_exit(void)
1387{
1388 dec_lance_platform_remove();
1389 tc_unregister_driver(&dec_lance_tc_driver);
1390}
1391
1392
1393module_init(dec_lance_init);
1394module_exit(dec_lance_exit);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index dc3ab3b5c8cb..07d2731c1aa8 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -10,10 +10,12 @@
10 * 10 *
11 * Abstract: 11 * Abstract:
12 * A Linux device driver supporting the Digital Equipment Corporation 12 * A Linux device driver supporting the Digital Equipment Corporation
13 * FDDI EISA and PCI controller families. Supported adapters include: 13 * FDDI TURBOchannel, EISA and PCI controller families. Supported
14 * adapters include:
14 * 15 *
15 * DEC FDDIcontroller/EISA (DEFEA) 16 * DEC FDDIcontroller/TURBOchannel (DEFTA)
16 * DEC FDDIcontroller/PCI (DEFPA) 17 * DEC FDDIcontroller/EISA (DEFEA)
18 * DEC FDDIcontroller/PCI (DEFPA)
17 * 19 *
18 * The original author: 20 * The original author:
19 * LVS Lawrence V. Stefani <lstefani@yahoo.com> 21 * LVS Lawrence V. Stefani <lstefani@yahoo.com>
@@ -193,24 +195,27 @@
193 * 14 Aug 2004 macro Fix device names reported. 195 * 14 Aug 2004 macro Fix device names reported.
194 * 14 Jun 2005 macro Use irqreturn_t. 196 * 14 Jun 2005 macro Use irqreturn_t.
195 * 23 Oct 2006 macro Big-endian host support. 197 * 23 Oct 2006 macro Big-endian host support.
198 * 14 Dec 2006 macro TURBOchannel support.
196 */ 199 */
197 200
198/* Include files */ 201/* Include files */
199 202#include <linux/bitops.h>
200#include <linux/module.h>
201#include <linux/kernel.h>
202#include <linux/string.h>
203#include <linux/errno.h>
204#include <linux/ioport.h>
205#include <linux/slab.h>
206#include <linux/interrupt.h>
207#include <linux/pci.h>
208#include <linux/delay.h> 203#include <linux/delay.h>
204#include <linux/dma-mapping.h>
205#include <linux/eisa.h>
206#include <linux/errno.h>
207#include <linux/fddidevice.h>
209#include <linux/init.h> 208#include <linux/init.h>
209#include <linux/interrupt.h>
210#include <linux/ioport.h>
211#include <linux/kernel.h>
212#include <linux/module.h>
210#include <linux/netdevice.h> 213#include <linux/netdevice.h>
211#include <linux/fddidevice.h> 214#include <linux/pci.h>
212#include <linux/skbuff.h> 215#include <linux/skbuff.h>
213#include <linux/bitops.h> 216#include <linux/slab.h>
217#include <linux/string.h>
218#include <linux/tc.h>
214 219
215#include <asm/byteorder.h> 220#include <asm/byteorder.h>
216#include <asm/io.h> 221#include <asm/io.h>
@@ -219,8 +224,8 @@
219 224
220/* Version information string should be updated prior to each new release! */ 225/* Version information string should be updated prior to each new release! */
221#define DRV_NAME "defxx" 226#define DRV_NAME "defxx"
222#define DRV_VERSION "v1.09" 227#define DRV_VERSION "v1.10"
223#define DRV_RELDATE "2006/10/23" 228#define DRV_RELDATE "2006/12/14"
224 229
225static char version[] __devinitdata = 230static char version[] __devinitdata =
226 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE 231 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
@@ -235,12 +240,41 @@ static char version[] __devinitdata =
235 */ 240 */
236#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) 241#define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128)
237 242
243#define __unused __attribute__ ((unused))
244
245#ifdef CONFIG_PCI
246#define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type)
247#else
248#define DFX_BUS_PCI(dev) 0
249#endif
250
251#ifdef CONFIG_EISA
252#define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type)
253#else
254#define DFX_BUS_EISA(dev) 0
255#endif
256
257#ifdef CONFIG_TC
258#define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type)
259#else
260#define DFX_BUS_TC(dev) 0
261#endif
262
263#ifdef CONFIG_DEFXX_MMIO
264#define DFX_MMIO 1
265#else
266#define DFX_MMIO 0
267#endif
268
238/* Define module-wide (static) routines */ 269/* Define module-wide (static) routines */
239 270
240static void dfx_bus_init(struct net_device *dev); 271static void dfx_bus_init(struct net_device *dev);
272static void dfx_bus_uninit(struct net_device *dev);
241static void dfx_bus_config_check(DFX_board_t *bp); 273static void dfx_bus_config_check(DFX_board_t *bp);
242 274
243static int dfx_driver_init(struct net_device *dev, const char *print_name); 275static int dfx_driver_init(struct net_device *dev,
276 const char *print_name,
277 resource_size_t bar_start);
244static int dfx_adap_init(DFX_board_t *bp, int get_buffers); 278static int dfx_adap_init(DFX_board_t *bp, int get_buffers);
245 279
246static int dfx_open(struct net_device *dev); 280static int dfx_open(struct net_device *dev);
@@ -273,13 +307,13 @@ static void dfx_xmt_flush(DFX_board_t *bp);
273 307
274/* Define module-wide (static) variables */ 308/* Define module-wide (static) variables */
275 309
276static struct net_device *root_dfx_eisa_dev; 310static struct pci_driver dfx_pci_driver;
311static struct eisa_driver dfx_eisa_driver;
312static struct tc_driver dfx_tc_driver;
277 313
278 314
279/* 315/*
280 * ======================= 316 * =======================
281 * = dfx_port_write_byte =
282 * = dfx_port_read_byte =
283 * = dfx_port_write_long = 317 * = dfx_port_write_long =
284 * = dfx_port_read_long = 318 * = dfx_port_read_long =
285 * ======================= 319 * =======================
@@ -291,12 +325,11 @@ static struct net_device *root_dfx_eisa_dev;
291 * None 325 * None
292 * 326 *
293 * Arguments: 327 * Arguments:
294 * bp - pointer to board information 328 * bp - pointer to board information
295 * offset - register offset from base I/O address 329 * offset - register offset from base I/O address
296 * data - for dfx_port_write_byte and dfx_port_write_long, this 330 * data - for dfx_port_write_long, this is a value to write;
297 * is a value to write. 331 * for dfx_port_read_long, this is a pointer to store
298 * for dfx_port_read_byte and dfx_port_read_byte, this 332 * the read value
299 * is a pointer to store the read value.
300 * 333 *
301 * Functional Description: 334 * Functional Description:
302 * These routines perform the correct operation to read or write 335 * These routines perform the correct operation to read or write
@@ -310,7 +343,7 @@ static struct net_device *root_dfx_eisa_dev;
310 * registers using the register offsets defined in DEFXX.H. 343 * registers using the register offsets defined in DEFXX.H.
311 * 344 *
312 * PCI port block base addresses are assigned by the PCI BIOS or system 345 * PCI port block base addresses are assigned by the PCI BIOS or system
313 * firmware. There is one 128 byte port block which can be accessed. It 346 * firmware. There is one 128 byte port block which can be accessed. It
314 * allows for I/O mapping of both PDQ and PFI registers using the register 347 * allows for I/O mapping of both PDQ and PFI registers using the register
315 * offsets defined in DEFXX.H. 348 * offsets defined in DEFXX.H.
316 * 349 *
@@ -318,7 +351,7 @@ static struct net_device *root_dfx_eisa_dev;
318 * None 351 * None
319 * 352 *
320 * Assumptions: 353 * Assumptions:
321 * bp->base_addr is a valid base I/O address for this adapter. 354 * bp->base is a valid base I/O address for this adapter.
322 * offset is a valid register offset for this adapter. 355 * offset is a valid register offset for this adapter.
323 * 356 *
324 * Side Effects: 357 * Side Effects:
@@ -329,69 +362,135 @@ static struct net_device *root_dfx_eisa_dev;
329 * advantage of strict data type checking. 362 * advantage of strict data type checking.
330 */ 363 */
331 364
332static inline void dfx_port_write_byte( 365static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data)
333 DFX_board_t *bp, 366{
334 int offset, 367 writel(data, bp->base.mem + offset);
335 u8 data 368 mb();
336 ) 369}
337 370
338 { 371static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data)
339 u16 port = bp->base_addr + offset; 372{
373 outl(data, bp->base.port + offset);
374}
340 375
341 outb(data, port); 376static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data)
342 } 377{
378 struct device __unused *bdev = bp->bus_dev;
379 int dfx_bus_tc = DFX_BUS_TC(bdev);
380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
343 381
344static inline void dfx_port_read_byte( 382 if (dfx_use_mmio)
345 DFX_board_t *bp, 383 dfx_writel(bp, offset, data);
346 int offset, 384 else
347 u8 *data 385 dfx_outl(bp, offset, data);
348 ) 386}
349 387
350 {
351 u16 port = bp->base_addr + offset;
352 388
353 *data = inb(port); 389static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data)
354 } 390{
391 mb();
392 *data = readl(bp->base.mem + offset);
393}
355 394
356static inline void dfx_port_write_long( 395static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data)
357 DFX_board_t *bp, 396{
358 int offset, 397 *data = inl(bp->base.port + offset);
359 u32 data 398}
360 )
361 399
362 { 400static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data)
363 u16 port = bp->base_addr + offset; 401{
402 struct device __unused *bdev = bp->bus_dev;
403 int dfx_bus_tc = DFX_BUS_TC(bdev);
404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
364 405
365 outl(data, port); 406 if (dfx_use_mmio)
366 } 407 dfx_readl(bp, offset, data);
408 else
409 dfx_inl(bp, offset, data);
410}
367 411
368static inline void dfx_port_read_long(
369 DFX_board_t *bp,
370 int offset,
371 u32 *data
372 )
373 412
374 { 413/*
375 u16 port = bp->base_addr + offset; 414 * ================
415 * = dfx_get_bars =
416 * ================
417 *
418 * Overview:
419 * Retrieves the address range used to access control and status
420 * registers.
421 *
422 * Returns:
423 * None
424 *
425 * Arguments:
426 * bdev - pointer to device information
427 * bar_start - pointer to store the start address
428 * bar_len - pointer to store the length of the area
429 *
430 * Assumptions:
431 * I am sure there are some.
432 *
433 * Side Effects:
434 * None
435 */
436static void dfx_get_bars(struct device *bdev,
437 resource_size_t *bar_start, resource_size_t *bar_len)
438{
439 int dfx_bus_pci = DFX_BUS_PCI(bdev);
440 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
441 int dfx_bus_tc = DFX_BUS_TC(bdev);
442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
376 443
377 *data = inl(port); 444 if (dfx_bus_pci) {
378 } 445 int num = dfx_use_mmio ? 0 : 1;
379 446
447 *bar_start = pci_resource_start(to_pci_dev(bdev), num);
448 *bar_len = pci_resource_len(to_pci_dev(bdev), num);
449 }
450 if (dfx_bus_eisa) {
451 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
452 resource_size_t bar;
453
454 if (dfx_use_mmio) {
455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2);
456 bar <<= 8;
457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1);
458 bar <<= 8;
459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0);
460 bar <<= 16;
461 *bar_start = bar;
462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2);
463 bar <<= 8;
464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1);
465 bar <<= 8;
466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0);
467 bar <<= 16;
468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1;
469 } else {
470 *bar_start = base_addr;
471 *bar_len = PI_ESIC_K_CSR_IO_LEN;
472 }
473 }
474 if (dfx_bus_tc) {
475 *bar_start = to_tc_dev(bdev)->resource.start +
476 PI_TC_K_CSR_OFFSET;
477 *bar_len = PI_TC_K_CSR_LEN;
478 }
479}
380 480
381/* 481/*
382 * ============= 482 * ================
383 * = dfx_init_one_pci_or_eisa = 483 * = dfx_register =
384 * ============= 484 * ================
385 * 485 *
386 * Overview: 486 * Overview:
387 * Initializes a supported FDDI EISA or PCI controller 487 * Initializes a supported FDDI controller
388 * 488 *
389 * Returns: 489 * Returns:
390 * Condition code 490 * Condition code
391 * 491 *
392 * Arguments: 492 * Arguments:
393 * pdev - pointer to pci device information (NULL for EISA) 493 * bdev - pointer to device information
394 * ioaddr - pointer to port (NULL for PCI)
395 * 494 *
396 * Functional Description: 495 * Functional Description:
397 * 496 *
@@ -407,56 +506,74 @@ static inline void dfx_port_read_long(
407 * initialized and the board resources are read and stored in 506 * initialized and the board resources are read and stored in
408 * the device structure. 507 * the device structure.
409 */ 508 */
410static int __devinit dfx_init_one_pci_or_eisa(struct pci_dev *pdev, long ioaddr) 509static int __devinit dfx_register(struct device *bdev)
411{ 510{
412 static int version_disp; 511 static int version_disp;
413 char *print_name = DRV_NAME; 512 int dfx_bus_pci = DFX_BUS_PCI(bdev);
513 int dfx_bus_tc = DFX_BUS_TC(bdev);
514 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
515 char *print_name = bdev->bus_id;
414 struct net_device *dev; 516 struct net_device *dev;
415 DFX_board_t *bp; /* board pointer */ 517 DFX_board_t *bp; /* board pointer */
518 resource_size_t bar_start = 0; /* pointer to port */
519 resource_size_t bar_len = 0; /* resource length */
416 int alloc_size; /* total buffer size used */ 520 int alloc_size; /* total buffer size used */
417 int err; 521 struct resource *region;
522 int err = 0;
418 523
419 if (!version_disp) { /* display version info if adapter is found */ 524 if (!version_disp) { /* display version info if adapter is found */
420 version_disp = 1; /* set display flag to TRUE so that */ 525 version_disp = 1; /* set display flag to TRUE so that */
421 printk(version); /* we only display this string ONCE */ 526 printk(version); /* we only display this string ONCE */
422 } 527 }
423 528
424 if (pdev != NULL)
425 print_name = pci_name(pdev);
426
427 dev = alloc_fddidev(sizeof(*bp)); 529 dev = alloc_fddidev(sizeof(*bp));
428 if (!dev) { 530 if (!dev) {
429 printk(KERN_ERR "%s: unable to allocate fddidev, aborting\n", 531 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n",
430 print_name); 532 print_name);
431 return -ENOMEM; 533 return -ENOMEM;
432 } 534 }
433 535
434 /* Enable PCI device. */ 536 /* Enable PCI device. */
435 if (pdev != NULL) { 537 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) {
436 err = pci_enable_device (pdev); 538 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n",
437 if (err) goto err_out; 539 print_name);
438 ioaddr = pci_resource_start (pdev, 1); 540 goto err_out;
439 } 541 }
440 542
441 SET_MODULE_OWNER(dev); 543 SET_MODULE_OWNER(dev);
442 if (pdev != NULL) 544 SET_NETDEV_DEV(dev, bdev);
443 SET_NETDEV_DEV(dev, &pdev->dev); 545
546 bp = netdev_priv(dev);
547 bp->bus_dev = bdev;
548 dev_set_drvdata(bdev, dev);
444 549
445 bp = dev->priv; 550 dfx_get_bars(bdev, &bar_start, &bar_len);
446 551
447 if (!request_region(ioaddr, 552 if (dfx_use_mmio)
448 pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN, 553 region = request_mem_region(bar_start, bar_len, print_name);
449 print_name)) { 554 else
555 region = request_region(bar_start, bar_len, print_name);
556 if (!region) {
450 printk(KERN_ERR "%s: Cannot reserve I/O resource " 557 printk(KERN_ERR "%s: Cannot reserve I/O resource "
451 "0x%x @ 0x%lx, aborting\n", print_name, 558 "0x%lx @ 0x%lx, aborting\n",
452 pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN, ioaddr); 559 print_name, (long)bar_len, (long)bar_start);
453 err = -EBUSY; 560 err = -EBUSY;
454 goto err_out; 561 goto err_out_disable;
455 } 562 }
456 563
457 /* Initialize new device structure */ 564 /* Set up I/O base address. */
565 if (dfx_use_mmio) {
566 bp->base.mem = ioremap_nocache(bar_start, bar_len);
567 if (!bp->base.mem) {
568 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name);
569 goto err_out_region;
570 }
571 } else {
572 bp->base.port = bar_start;
573 dev->base_addr = bar_start;
574 }
458 575
459 dev->base_addr = ioaddr; /* save port (I/O) base address */ 576 /* Initialize new device structure */
460 577
461 dev->get_stats = dfx_ctl_get_stats; 578 dev->get_stats = dfx_ctl_get_stats;
462 dev->open = dfx_open; 579 dev->open = dfx_open;
@@ -465,22 +582,12 @@ static int __devinit dfx_init_one_pci_or_eisa(struct pci_dev *pdev, long ioaddr)
465 dev->set_multicast_list = dfx_ctl_set_multicast_list; 582 dev->set_multicast_list = dfx_ctl_set_multicast_list;
466 dev->set_mac_address = dfx_ctl_set_mac_address; 583 dev->set_mac_address = dfx_ctl_set_mac_address;
467 584
468 if (pdev == NULL) { 585 if (dfx_bus_pci)
469 /* EISA board */ 586 pci_set_master(to_pci_dev(bdev));
470 bp->bus_type = DFX_BUS_TYPE_EISA;
471 bp->next = root_dfx_eisa_dev;
472 root_dfx_eisa_dev = dev;
473 } else {
474 /* PCI board */
475 bp->bus_type = DFX_BUS_TYPE_PCI;
476 bp->pci_dev = pdev;
477 pci_set_drvdata (pdev, dev);
478 pci_set_master (pdev);
479 }
480 587
481 if (dfx_driver_init(dev, print_name) != DFX_K_SUCCESS) { 588 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) {
482 err = -ENODEV; 589 err = -ENODEV;
483 goto err_out_region; 590 goto err_out_unmap;
484 } 591 }
485 592
486 err = register_netdev(dev); 593 err = register_netdev(dev);
@@ -499,44 +606,28 @@ err_out_kfree:
499 sizeof(PI_CONSUMER_BLOCK) + 606 sizeof(PI_CONSUMER_BLOCK) +
500 (PI_ALIGN_K_DESC_BLK - 1); 607 (PI_ALIGN_K_DESC_BLK - 1);
501 if (bp->kmalloced) 608 if (bp->kmalloced)
502 pci_free_consistent(pdev, alloc_size, 609 dma_free_coherent(bdev, alloc_size,
503 bp->kmalloced, bp->kmalloced_dma); 610 bp->kmalloced, bp->kmalloced_dma);
611
612err_out_unmap:
613 if (dfx_use_mmio)
614 iounmap(bp->base.mem);
615
504err_out_region: 616err_out_region:
505 release_region(ioaddr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN); 617 if (dfx_use_mmio)
618 release_mem_region(bar_start, bar_len);
619 else
620 release_region(bar_start, bar_len);
621
622err_out_disable:
623 if (dfx_bus_pci)
624 pci_disable_device(to_pci_dev(bdev));
625
506err_out: 626err_out:
507 free_netdev(dev); 627 free_netdev(dev);
508 return err; 628 return err;
509} 629}
510 630
511static int __devinit dfx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
512{
513 return dfx_init_one_pci_or_eisa(pdev, 0);
514}
515
516static int __init dfx_eisa_init(void)
517{
518 int rc = -ENODEV;
519 int i; /* used in for loops */
520 u16 port; /* temporary I/O (port) address */
521 u32 slot_id; /* EISA hardware (slot) ID read from adapter */
522
523 DBG_printk("In dfx_eisa_init...\n");
524
525 /* Scan for FDDI EISA controllers */
526
527 for (i=0; i < DFX_MAX_EISA_SLOTS; i++) /* only scan for up to 16 EISA slots */
528 {
529 port = (i << 12) + PI_ESIC_K_SLOT_ID; /* port = I/O address for reading slot ID */
530 slot_id = inl(port); /* read EISA HW (slot) ID */
531 if ((slot_id & 0xF0FFFFFF) == DEFEA_PRODUCT_ID)
532 {
533 port = (i << 12); /* recalc base addr */
534
535 if (dfx_init_one_pci_or_eisa(NULL, port) == 0) rc = 0;
536 }
537 }
538 return rc;
539}
540 631
541/* 632/*
542 * ================ 633 * ================
@@ -544,7 +635,7 @@ static int __init dfx_eisa_init(void)
544 * ================ 635 * ================
545 * 636 *
546 * Overview: 637 * Overview:
547 * Initializes EISA and PCI controller bus-specific logic. 638 * Initializes the bus-specific controller logic.
548 * 639 *
549 * Returns: 640 * Returns:
550 * None 641 * None
@@ -560,7 +651,7 @@ static int __init dfx_eisa_init(void)
560 * None 651 * None
561 * 652 *
562 * Assumptions: 653 * Assumptions:
563 * dev->base_addr has already been set with the proper 654 * bp->base has already been set with the proper
564 * base I/O address for this device. 655 * base I/O address for this device.
565 * 656 *
566 * Side Effects: 657 * Side Effects:
@@ -571,87 +662,103 @@ static int __init dfx_eisa_init(void)
571 662
572static void __devinit dfx_bus_init(struct net_device *dev) 663static void __devinit dfx_bus_init(struct net_device *dev)
573{ 664{
574 DFX_board_t *bp = dev->priv; 665 DFX_board_t *bp = netdev_priv(dev);
575 u8 val; /* used for I/O read/writes */ 666 struct device *bdev = bp->bus_dev;
667 int dfx_bus_pci = DFX_BUS_PCI(bdev);
668 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
669 int dfx_bus_tc = DFX_BUS_TC(bdev);
670 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
671 u8 val;
576 672
577 DBG_printk("In dfx_bus_init...\n"); 673 DBG_printk("In dfx_bus_init...\n");
578 674
579 /* 675 /* Initialize a pointer back to the net_device struct */
580 * Initialize base I/O address field in bp structure
581 *
582 * Note: bp->base_addr is the same as dev->base_addr.
583 * It's useful because often we'll need to read
584 * or write registers where we already have the
585 * bp pointer instead of the dev pointer. Having
586 * the base address in the bp structure will
587 * save a pointer dereference.
588 *
589 * IMPORTANT!! This field must be defined before
590 * any of the dfx_port_* inline functions are
591 * called.
592 */
593
594 bp->base_addr = dev->base_addr;
595
596 /* And a pointer back to the net_device struct */
597 bp->dev = dev; 676 bp->dev = dev;
598 677
599 /* Initialize adapter based on bus type */ 678 /* Initialize adapter based on bus type */
600 679
601 if (bp->bus_type == DFX_BUS_TYPE_EISA) 680 if (dfx_bus_tc)
602 { 681 dev->irq = to_tc_dev(bdev)->interrupt;
603 /* Get the interrupt level from the ESIC chip */ 682 if (dfx_bus_eisa) {
604 683 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
605 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val);
606 switch ((val & PI_CONFIG_STAT_0_M_IRQ) >> PI_CONFIG_STAT_0_V_IRQ)
607 {
608 case PI_CONFIG_STAT_0_IRQ_K_9:
609 dev->irq = 9;
610 break;
611
612 case PI_CONFIG_STAT_0_IRQ_K_10:
613 dev->irq = 10;
614 break;
615 684
616 case PI_CONFIG_STAT_0_IRQ_K_11: 685 /* Get the interrupt level from the ESIC chip. */
617 dev->irq = 11; 686 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
618 break; 687 val &= PI_CONFIG_STAT_0_M_IRQ;
688 val >>= PI_CONFIG_STAT_0_V_IRQ;
619 689
620 case PI_CONFIG_STAT_0_IRQ_K_15: 690 switch (val) {
621 dev->irq = 15; 691 case PI_CONFIG_STAT_0_IRQ_K_9:
622 break; 692 dev->irq = 9;
623 } 693 break;
624
625 /* Enable access to I/O on the board by writing 0x03 to Function Control Register */
626 694
627 dfx_port_write_byte(bp, PI_ESIC_K_FUNCTION_CNTRL, PI_ESIC_K_FUNCTION_CNTRL_IO_ENB); 695 case PI_CONFIG_STAT_0_IRQ_K_10:
696 dev->irq = 10;
697 break;
628 698
629 /* Set the I/O decode range of the board */ 699 case PI_CONFIG_STAT_0_IRQ_K_11:
700 dev->irq = 11;
701 break;
630 702
631 val = ((dev->base_addr >> 12) << PI_IO_CMP_V_SLOT); 703 case PI_CONFIG_STAT_0_IRQ_K_15:
632 dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_0_1, val); 704 dev->irq = 15;
633 dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_1_1, val); 705 break;
706 }
634 707
635 /* Enable access to rest of module (including PDQ and packet memory) */ 708 /*
709 * Enable memory decoding (MEMCS0) and/or port decoding
710 * (IOCS1/IOCS0) as appropriate in Function Control
711 * Register. One of the port chip selects seems to be
712 * used for the Burst Holdoff register, but this bit of
713 * documentation is missing and as yet it has not been
714 * determined which of the two. This is also the reason
715 * the size of the decoded port range is twice as large
716 * as one required by the PDQ.
717 */
636 718
637 dfx_port_write_byte(bp, PI_ESIC_K_SLOT_CNTRL, PI_SLOT_CNTRL_M_ENB); 719 /* Set the decode range of the board. */
720 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT);
721 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val);
722 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0);
723 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val);
724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0);
725 val = PI_ESIC_K_CSR_IO_LEN - 1;
726 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff);
727 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff);
728 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff);
729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff);
730
731 /* Enable the decoders. */
732 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0;
733 if (dfx_use_mmio)
734 val |= PI_FUNCTION_CNTRL_M_MEMCS0;
735 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val);
638 736
639 /* 737 /*
640 * Map PDQ registers into I/O space. This is done by clearing a bit 738 * Enable access to the rest of the module
641 * in Burst Holdoff register. 739 * (including PDQ and packet memory).
642 */ 740 */
741 val = PI_SLOT_CNTRL_M_ENB;
742 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val);
643 743
644 dfx_port_read_byte(bp, PI_ESIC_K_BURST_HOLDOFF, &val); 744 /*
645 dfx_port_write_byte(bp, PI_ESIC_K_BURST_HOLDOFF, (val & ~PI_BURST_HOLDOFF_M_MEM_MAP)); 745 * Map PDQ registers into memory or port space. This is
746 * done with a bit in the Burst Holdoff register.
747 */
748 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF);
749 if (dfx_use_mmio)
750 val |= PI_BURST_HOLDOFF_V_MEM_MAP;
751 else
752 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP;
753 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val);
646 754
647 /* Enable interrupts at EISA bus interface chip (ESIC) */ 755 /* Enable interrupts at EISA bus interface chip (ESIC) */
648 756 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
649 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val); 757 val |= PI_CONFIG_STAT_0_M_INT_ENB;
650 dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, (val | PI_CONFIG_STAT_0_M_INT_ENB)); 758 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
651 } 759 }
652 else 760 if (dfx_bus_pci) {
653 { 761 struct pci_dev *pdev = to_pci_dev(bdev);
654 struct pci_dev *pdev = bp->pci_dev;
655 762
656 /* Get the interrupt level from the PCI Configuration Table */ 763 /* Get the interrupt level from the PCI Configuration Table */
657 764
@@ -660,17 +767,70 @@ static void __devinit dfx_bus_init(struct net_device *dev)
660 /* Check Latency Timer and set if less than minimal */ 767 /* Check Latency Timer and set if less than minimal */
661 768
662 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val); 769 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val);
663 if (val < PFI_K_LAT_TIMER_MIN) /* if less than min, override with default */ 770 if (val < PFI_K_LAT_TIMER_MIN) {
664 {
665 val = PFI_K_LAT_TIMER_DEF; 771 val = PFI_K_LAT_TIMER_DEF;
666 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val); 772 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val);
667 } 773 }
668 774
669 /* Enable interrupts at PCI bus interface chip (PFI) */ 775 /* Enable interrupts at PCI bus interface chip (PFI) */
776 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB;
777 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val);
778 }
779}
670 780
671 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, (PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB)); 781/*
672 } 782 * ==================
783 * = dfx_bus_uninit =
784 * ==================
785 *
786 * Overview:
787 * Uninitializes the bus-specific controller logic.
788 *
789 * Returns:
790 * None
791 *
792 * Arguments:
793 * dev - pointer to device information
794 *
795 * Functional Description:
796 * Perform bus-specific logic uninitialization.
797 *
798 * Return Codes:
799 * None
800 *
801 * Assumptions:
802 * bp->base has already been set with the proper
803 * base I/O address for this device.
804 *
805 * Side Effects:
806 * Interrupts are disabled at the adapter bus-specific logic.
807 */
808
809static void __devinit dfx_bus_uninit(struct net_device *dev)
810{
811 DFX_board_t *bp = netdev_priv(dev);
812 struct device *bdev = bp->bus_dev;
813 int dfx_bus_pci = DFX_BUS_PCI(bdev);
814 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
815 u8 val;
816
817 DBG_printk("In dfx_bus_uninit...\n");
818
819 /* Uninitialize adapter based on bus type */
820
821 if (dfx_bus_eisa) {
822 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
823
824 /* Disable interrupts at EISA bus interface chip (ESIC) */
825 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
826 val &= ~PI_CONFIG_STAT_0_M_INT_ENB;
827 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val);
828 }
829 if (dfx_bus_pci) {
830 /* Disable interrupts at PCI bus interface chip (PFI) */
831 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0);
673 } 832 }
833}
674 834
675 835
676/* 836/*
@@ -705,18 +865,16 @@ static void __devinit dfx_bus_init(struct net_device *dev)
705 865
706static void __devinit dfx_bus_config_check(DFX_board_t *bp) 866static void __devinit dfx_bus_config_check(DFX_board_t *bp)
707{ 867{
868 struct device __unused *bdev = bp->bus_dev;
869 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
708 int status; /* return code from adapter port control call */ 870 int status; /* return code from adapter port control call */
709 u32 slot_id; /* EISA-bus hardware id (DEC3001, DEC3002,...) */
710 u32 host_data; /* LW data returned from port control call */ 871 u32 host_data; /* LW data returned from port control call */
711 872
712 DBG_printk("In dfx_bus_config_check...\n"); 873 DBG_printk("In dfx_bus_config_check...\n");
713 874
714 /* Configuration check only valid for EISA adapter */ 875 /* Configuration check only valid for EISA adapter */
715 876
716 if (bp->bus_type == DFX_BUS_TYPE_EISA) 877 if (dfx_bus_eisa) {
717 {
718 dfx_port_read_long(bp, PI_ESIC_K_SLOT_ID, &slot_id);
719
720 /* 878 /*
721 * First check if revision 2 EISA controller. Rev. 1 cards used 879 * First check if revision 2 EISA controller. Rev. 1 cards used
722 * PDQ revision B, so no workaround needed in this case. Rev. 3 880 * PDQ revision B, so no workaround needed in this case. Rev. 3
@@ -724,14 +882,11 @@ static void __devinit dfx_bus_config_check(DFX_board_t *bp)
724 * case, either. Only Rev. 2 cards used either Rev. D or E 882 * case, either. Only Rev. 2 cards used either Rev. D or E
725 * chips, so we must verify the chip revision on Rev. 2 cards. 883 * chips, so we must verify the chip revision on Rev. 2 cards.
726 */ 884 */
727 885 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
728 if (slot_id == DEFEA_PROD_ID_2)
729 {
730 /* 886 /*
731 * Revision 2 FDDI EISA controller found, so let's check PDQ 887 * Revision 2 FDDI EISA controller found,
732 * revision of adapter. 888 * so let's check PDQ revision of adapter.
733 */ 889 */
734
735 status = dfx_hw_port_ctrl_req(bp, 890 status = dfx_hw_port_ctrl_req(bp,
736 PI_PCTRL_M_SUB_CMD, 891 PI_PCTRL_M_SUB_CMD,
737 PI_SUB_CMD_K_PDQ_REV_GET, 892 PI_SUB_CMD_K_PDQ_REV_GET,
@@ -805,13 +960,20 @@ static void __devinit dfx_bus_config_check(DFX_board_t *bp)
805 */ 960 */
806 961
807static int __devinit dfx_driver_init(struct net_device *dev, 962static int __devinit dfx_driver_init(struct net_device *dev,
808 const char *print_name) 963 const char *print_name,
964 resource_size_t bar_start)
809{ 965{
810 DFX_board_t *bp = dev->priv; 966 DFX_board_t *bp = netdev_priv(dev);
811 int alloc_size; /* total buffer size needed */ 967 struct device *bdev = bp->bus_dev;
812 char *top_v, *curr_v; /* virtual addrs into memory block */ 968 int dfx_bus_pci = DFX_BUS_PCI(bdev);
813 dma_addr_t top_p, curr_p; /* physical addrs into memory block */ 969 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
814 u32 data; /* host data register value */ 970 int dfx_bus_tc = DFX_BUS_TC(bdev);
971 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
972 int alloc_size; /* total buffer size needed */
973 char *top_v, *curr_v; /* virtual addrs into memory block */
974 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
975 u32 data, le32; /* host data register value */
976 char *board_name = NULL;
815 977
816 DBG_printk("In dfx_driver_init...\n"); 978 DBG_printk("In dfx_driver_init...\n");
817 979
@@ -860,8 +1022,8 @@ static int __devinit dfx_driver_init(struct net_device *dev,
860 print_name); 1022 print_name);
861 return(DFX_K_FAILURE); 1023 return(DFX_K_FAILURE);
862 } 1024 }
863 data = cpu_to_le32(data); 1025 le32 = cpu_to_le32(data);
864 memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32)); 1026 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
865 1027
866 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, 1028 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0,
867 &data) != DFX_K_SUCCESS) { 1029 &data) != DFX_K_SUCCESS) {
@@ -869,8 +1031,8 @@ static int __devinit dfx_driver_init(struct net_device *dev,
869 print_name); 1031 print_name);
870 return(DFX_K_FAILURE); 1032 return(DFX_K_FAILURE);
871 } 1033 }
872 data = cpu_to_le32(data); 1034 le32 = cpu_to_le32(data);
873 memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16)); 1035 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
874 1036
875 /* 1037 /*
876 * Set current address to factory address 1038 * Set current address to factory address
@@ -880,20 +1042,18 @@ static int __devinit dfx_driver_init(struct net_device *dev,
880 */ 1042 */
881 1043
882 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); 1044 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
883 if (bp->bus_type == DFX_BUS_TYPE_EISA) 1045 if (dfx_bus_tc)
884 printk("%s: DEFEA at I/O addr = 0x%lX, IRQ = %d, " 1046 board_name = "DEFTA";
885 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n", 1047 if (dfx_bus_eisa)
886 print_name, dev->base_addr, dev->irq, 1048 board_name = "DEFEA";
887 dev->dev_addr[0], dev->dev_addr[1], 1049 if (dfx_bus_pci)
888 dev->dev_addr[2], dev->dev_addr[3], 1050 board_name = "DEFPA";
889 dev->dev_addr[4], dev->dev_addr[5]); 1051 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
890 else 1052 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
891 printk("%s: DEFPA at I/O addr = 0x%lX, IRQ = %d, " 1053 print_name, board_name, dfx_use_mmio ? "" : "I/O ",
892 "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n", 1054 (long long)bar_start, dev->irq,
893 print_name, dev->base_addr, dev->irq, 1055 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
894 dev->dev_addr[0], dev->dev_addr[1], 1056 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
895 dev->dev_addr[2], dev->dev_addr[3],
896 dev->dev_addr[4], dev->dev_addr[5]);
897 1057
898 /* 1058 /*
899 * Get memory for descriptor block, consumer block, and other buffers 1059 * Get memory for descriptor block, consumer block, and other buffers
@@ -908,8 +1068,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
908#endif 1068#endif
909 sizeof(PI_CONSUMER_BLOCK) + 1069 sizeof(PI_CONSUMER_BLOCK) +
910 (PI_ALIGN_K_DESC_BLK - 1); 1070 (PI_ALIGN_K_DESC_BLK - 1);
911 bp->kmalloced = top_v = pci_alloc_consistent(bp->pci_dev, alloc_size, 1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
912 &bp->kmalloced_dma); 1072 &bp->kmalloced_dma,
1073 GFP_ATOMIC);
913 if (top_v == NULL) { 1074 if (top_v == NULL) {
914 printk("%s: Could not allocate memory for host buffers " 1075 printk("%s: Could not allocate memory for host buffers "
915 "and structures!\n", print_name); 1076 "and structures!\n", print_name);
@@ -1219,14 +1380,15 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1219 1380
1220static int dfx_open(struct net_device *dev) 1381static int dfx_open(struct net_device *dev)
1221{ 1382{
1383 DFX_board_t *bp = netdev_priv(dev);
1222 int ret; 1384 int ret;
1223 DFX_board_t *bp = dev->priv;
1224 1385
1225 DBG_printk("In dfx_open...\n"); 1386 DBG_printk("In dfx_open...\n");
1226 1387
1227 /* Register IRQ - support shared interrupts by passing device ptr */ 1388 /* Register IRQ - support shared interrupts by passing device ptr */
1228 1389
1229 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name, dev); 1390 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name,
1391 dev);
1230 if (ret) { 1392 if (ret) {
1231 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); 1393 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
1232 return ret; 1394 return ret;
@@ -1309,7 +1471,7 @@ static int dfx_open(struct net_device *dev)
1309 1471
1310static int dfx_close(struct net_device *dev) 1472static int dfx_close(struct net_device *dev)
1311{ 1473{
1312 DFX_board_t *bp = dev->priv; 1474 DFX_board_t *bp = netdev_priv(dev);
1313 1475
1314 DBG_printk("In dfx_close...\n"); 1476 DBG_printk("In dfx_close...\n");
1315 1477
@@ -1645,7 +1807,7 @@ static void dfx_int_type_0_process(DFX_board_t *bp)
1645 1807
1646static void dfx_int_common(struct net_device *dev) 1808static void dfx_int_common(struct net_device *dev)
1647{ 1809{
1648 DFX_board_t *bp = dev->priv; 1810 DFX_board_t *bp = netdev_priv(dev);
1649 PI_UINT32 port_status; /* Port Status register */ 1811 PI_UINT32 port_status; /* Port Status register */
1650 1812
1651 /* Process xmt interrupts - frequent case, so always call this routine */ 1813 /* Process xmt interrupts - frequent case, so always call this routine */
@@ -1715,18 +1877,16 @@ static void dfx_int_common(struct net_device *dev)
1715 1877
1716static irqreturn_t dfx_interrupt(int irq, void *dev_id) 1878static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1717{ 1879{
1718 struct net_device *dev = dev_id; 1880 struct net_device *dev = dev_id;
1719 DFX_board_t *bp; /* private board structure pointer */ 1881 DFX_board_t *bp = netdev_priv(dev);
1720 1882 struct device *bdev = bp->bus_dev;
1721 /* Get board pointer only if device structure is valid */ 1883 int dfx_bus_pci = DFX_BUS_PCI(bdev);
1722 1884 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
1723 bp = dev->priv; 1885 int dfx_bus_tc = DFX_BUS_TC(bdev);
1724
1725 /* See if we're already servicing an interrupt */
1726 1886
1727 /* Service adapter interrupts */ 1887 /* Service adapter interrupts */
1728 1888
1729 if (bp->bus_type == DFX_BUS_TYPE_PCI) { 1889 if (dfx_bus_pci) {
1730 u32 status; 1890 u32 status;
1731 1891
1732 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); 1892 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status);
@@ -1750,10 +1910,12 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1750 PFI_MODE_M_DMA_ENB)); 1910 PFI_MODE_M_DMA_ENB));
1751 1911
1752 spin_unlock(&bp->lock); 1912 spin_unlock(&bp->lock);
1753 } else { 1913 }
1914 if (dfx_bus_eisa) {
1915 unsigned long base_addr = to_eisa_device(bdev)->base_addr;
1754 u8 status; 1916 u8 status;
1755 1917
1756 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &status); 1918 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1757 if (!(status & PI_CONFIG_STAT_0_M_PEND)) 1919 if (!(status & PI_CONFIG_STAT_0_M_PEND))
1758 return IRQ_NONE; 1920 return IRQ_NONE;
1759 1921
@@ -1761,15 +1923,35 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1761 1923
1762 /* Disable interrupts at the ESIC */ 1924 /* Disable interrupts at the ESIC */
1763 status &= ~PI_CONFIG_STAT_0_M_INT_ENB; 1925 status &= ~PI_CONFIG_STAT_0_M_INT_ENB;
1764 dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, status); 1926 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1765 1927
1766 /* Call interrupt service routine for this adapter */ 1928 /* Call interrupt service routine for this adapter */
1767 dfx_int_common(dev); 1929 dfx_int_common(dev);
1768 1930
1769 /* Reenable interrupts at the ESIC */ 1931 /* Reenable interrupts at the ESIC */
1770 dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &status); 1932 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0);
1771 status |= PI_CONFIG_STAT_0_M_INT_ENB; 1933 status |= PI_CONFIG_STAT_0_M_INT_ENB;
1772 dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, status); 1934 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status);
1935
1936 spin_unlock(&bp->lock);
1937 }
1938 if (dfx_bus_tc) {
1939 u32 status;
1940
1941 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status);
1942 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING |
1943 PI_PSTATUS_M_XMT_DATA_PENDING |
1944 PI_PSTATUS_M_SMT_HOST_PENDING |
1945 PI_PSTATUS_M_UNSOL_PENDING |
1946 PI_PSTATUS_M_CMD_RSP_PENDING |
1947 PI_PSTATUS_M_CMD_REQ_PENDING |
1948 PI_PSTATUS_M_TYPE_0_PENDING)))
1949 return IRQ_NONE;
1950
1951 spin_lock(&bp->lock);
1952
1953 /* Call interrupt service routine for this adapter */
1954 dfx_int_common(dev);
1773 1955
1774 spin_unlock(&bp->lock); 1956 spin_unlock(&bp->lock);
1775 } 1957 }
@@ -1823,7 +2005,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id)
1823 2005
1824static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) 2006static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
1825 { 2007 {
1826 DFX_board_t *bp = dev->priv; 2008 DFX_board_t *bp = netdev_priv(dev);
1827 2009
1828 /* Fill the bp->stats structure with driver-maintained counters */ 2010 /* Fill the bp->stats structure with driver-maintained counters */
1829 2011
@@ -2009,8 +2191,8 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2009 */ 2191 */
2010 2192
2011static void dfx_ctl_set_multicast_list(struct net_device *dev) 2193static void dfx_ctl_set_multicast_list(struct net_device *dev)
2012 { 2194{
2013 DFX_board_t *bp = dev->priv; 2195 DFX_board_t *bp = netdev_priv(dev);
2014 int i; /* used as index in for loop */ 2196 int i; /* used as index in for loop */
2015 struct dev_mc_list *dmi; /* ptr to multicast addr entry */ 2197 struct dev_mc_list *dmi; /* ptr to multicast addr entry */
2016 2198
@@ -2124,8 +2306,8 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2124 2306
2125static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) 2307static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2126 { 2308 {
2127 DFX_board_t *bp = dev->priv;
2128 struct sockaddr *p_sockaddr = (struct sockaddr *)addr; 2309 struct sockaddr *p_sockaddr = (struct sockaddr *)addr;
2310 DFX_board_t *bp = netdev_priv(dev);
2129 2311
2130 /* Copy unicast address to driver-maintained structs and update count */ 2312 /* Copy unicast address to driver-maintained structs and update count */
2131 2313
@@ -2764,9 +2946,9 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
2764 2946
2765 my_skb_align(newskb, 128); 2947 my_skb_align(newskb, 128);
2766 bp->descr_block_virt->rcv_data[i + j].long_1 = 2948 bp->descr_block_virt->rcv_data[i + j].long_1 =
2767 (u32)pci_map_single(bp->pci_dev, newskb->data, 2949 (u32)dma_map_single(bp->bus_dev, newskb->data,
2768 NEW_SKB_SIZE, 2950 NEW_SKB_SIZE,
2769 PCI_DMA_FROMDEVICE); 2951 DMA_FROM_DEVICE);
2770 /* 2952 /*
2771 * p_rcv_buff_va is only used inside the 2953 * p_rcv_buff_va is only used inside the
2772 * kernel so we put the skb pointer here. 2954 * kernel so we put the skb pointer here.
@@ -2880,17 +3062,17 @@ static void dfx_rcv_queue_process(
2880 3062
2881 my_skb_align(newskb, 128); 3063 my_skb_align(newskb, 128);
2882 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; 3064 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
2883 pci_unmap_single(bp->pci_dev, 3065 dma_unmap_single(bp->bus_dev,
2884 bp->descr_block_virt->rcv_data[entry].long_1, 3066 bp->descr_block_virt->rcv_data[entry].long_1,
2885 NEW_SKB_SIZE, 3067 NEW_SKB_SIZE,
2886 PCI_DMA_FROMDEVICE); 3068 DMA_FROM_DEVICE);
2887 skb_reserve(skb, RCV_BUFF_K_PADDING); 3069 skb_reserve(skb, RCV_BUFF_K_PADDING);
2888 bp->p_rcv_buff_va[entry] = (char *)newskb; 3070 bp->p_rcv_buff_va[entry] = (char *)newskb;
2889 bp->descr_block_virt->rcv_data[entry].long_1 = 3071 bp->descr_block_virt->rcv_data[entry].long_1 =
2890 (u32)pci_map_single(bp->pci_dev, 3072 (u32)dma_map_single(bp->bus_dev,
2891 newskb->data, 3073 newskb->data,
2892 NEW_SKB_SIZE, 3074 NEW_SKB_SIZE,
2893 PCI_DMA_FROMDEVICE); 3075 DMA_FROM_DEVICE);
2894 } else 3076 } else
2895 skb = NULL; 3077 skb = NULL;
2896 } else 3078 } else
@@ -3010,7 +3192,7 @@ static int dfx_xmt_queue_pkt(
3010 ) 3192 )
3011 3193
3012 { 3194 {
3013 DFX_board_t *bp = dev->priv; 3195 DFX_board_t *bp = netdev_priv(dev);
3014 u8 prod; /* local transmit producer index */ 3196 u8 prod; /* local transmit producer index */
3015 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */ 3197 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */
3016 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3198 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */
@@ -3116,8 +3298,8 @@ static int dfx_xmt_queue_pkt(
3116 */ 3298 */
3117 3299
3118 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN)); 3300 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
3119 p_xmt_descr->long_1 = (u32)pci_map_single(bp->pci_dev, skb->data, 3301 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
3120 skb->len, PCI_DMA_TODEVICE); 3302 skb->len, DMA_TO_DEVICE);
3121 3303
3122 /* 3304 /*
3123 * Verify that descriptor is actually available 3305 * Verify that descriptor is actually available
@@ -3220,10 +3402,10 @@ static int dfx_xmt_done(DFX_board_t *bp)
3220 3402
3221 /* Return skb to operating system */ 3403 /* Return skb to operating system */
3222 comp = bp->rcv_xmt_reg.index.xmt_comp; 3404 comp = bp->rcv_xmt_reg.index.xmt_comp;
3223 pci_unmap_single(bp->pci_dev, 3405 dma_unmap_single(bp->bus_dev,
3224 bp->descr_block_virt->xmt_data[comp].long_1, 3406 bp->descr_block_virt->xmt_data[comp].long_1,
3225 p_xmt_drv_descr->p_skb->len, 3407 p_xmt_drv_descr->p_skb->len,
3226 PCI_DMA_TODEVICE); 3408 DMA_TO_DEVICE);
3227 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3409 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
3228 3410
3229 /* 3411 /*
@@ -3344,10 +3526,10 @@ static void dfx_xmt_flush( DFX_board_t *bp )
3344 3526
3345 /* Return skb to operating system */ 3527 /* Return skb to operating system */
3346 comp = bp->rcv_xmt_reg.index.xmt_comp; 3528 comp = bp->rcv_xmt_reg.index.xmt_comp;
3347 pci_unmap_single(bp->pci_dev, 3529 dma_unmap_single(bp->bus_dev,
3348 bp->descr_block_virt->xmt_data[comp].long_1, 3530 bp->descr_block_virt->xmt_data[comp].long_1,
3349 p_xmt_drv_descr->p_skb->len, 3531 p_xmt_drv_descr->p_skb->len,
3350 PCI_DMA_TODEVICE); 3532 DMA_TO_DEVICE);
3351 dev_kfree_skb(p_xmt_drv_descr->p_skb); 3533 dev_kfree_skb(p_xmt_drv_descr->p_skb);
3352 3534
3353 /* Increment transmit error counter */ 3535 /* Increment transmit error counter */
@@ -3375,13 +3557,44 @@ static void dfx_xmt_flush( DFX_board_t *bp )
3375 bp->cons_block_virt->xmt_rcv_data = prod_cons; 3557 bp->cons_block_virt->xmt_rcv_data = prod_cons;
3376 } 3558 }
3377 3559
3378static void __devexit dfx_remove_one_pci_or_eisa(struct pci_dev *pdev, struct net_device *dev) 3560/*
3561 * ==================
3562 * = dfx_unregister =
3563 * ==================
3564 *
3565 * Overview:
3566 * Shuts down an FDDI controller
3567 *
3568 * Returns:
3569 * Condition code
3570 *
3571 * Arguments:
3572 * bdev - pointer to device information
3573 *
3574 * Functional Description:
3575 *
3576 * Return Codes:
3577 * None
3578 *
3579 * Assumptions:
3580 * It compiles so it should work :-( (PCI cards do :-)
3581 *
3582 * Side Effects:
3583 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
3584 * freed.
3585 */
3586static void __devexit dfx_unregister(struct device *bdev)
3379{ 3587{
3380 DFX_board_t *bp = dev->priv; 3588 struct net_device *dev = dev_get_drvdata(bdev);
3589 DFX_board_t *bp = netdev_priv(dev);
3590 int dfx_bus_pci = DFX_BUS_PCI(bdev);
3591 int dfx_bus_tc = DFX_BUS_TC(bdev);
3592 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
3593 resource_size_t bar_start = 0; /* pointer to port */
3594 resource_size_t bar_len = 0; /* resource length */
3381 int alloc_size; /* total buffer size used */ 3595 int alloc_size; /* total buffer size used */
3382 3596
3383 unregister_netdev(dev); 3597 unregister_netdev(dev);
3384 release_region(dev->base_addr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN );
3385 3598
3386 alloc_size = sizeof(PI_DESCR_BLOCK) + 3599 alloc_size = sizeof(PI_DESCR_BLOCK) +
3387 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + 3600 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX +
@@ -3391,78 +3604,141 @@ static void __devexit dfx_remove_one_pci_or_eisa(struct pci_dev *pdev, struct ne
3391 sizeof(PI_CONSUMER_BLOCK) + 3604 sizeof(PI_CONSUMER_BLOCK) +
3392 (PI_ALIGN_K_DESC_BLK - 1); 3605 (PI_ALIGN_K_DESC_BLK - 1);
3393 if (bp->kmalloced) 3606 if (bp->kmalloced)
3394 pci_free_consistent(pdev, alloc_size, bp->kmalloced, 3607 dma_free_coherent(bdev, alloc_size,
3395 bp->kmalloced_dma); 3608 bp->kmalloced, bp->kmalloced_dma);
3609
3610 dfx_bus_uninit(dev);
3611
3612 dfx_get_bars(bdev, &bar_start, &bar_len);
3613 if (dfx_use_mmio) {
3614 iounmap(bp->base.mem);
3615 release_mem_region(bar_start, bar_len);
3616 } else
3617 release_region(bar_start, bar_len);
3618
3619 if (dfx_bus_pci)
3620 pci_disable_device(to_pci_dev(bdev));
3621
3396 free_netdev(dev); 3622 free_netdev(dev);
3397} 3623}
3398 3624
3399static void __devexit dfx_remove_one (struct pci_dev *pdev)
3400{
3401 struct net_device *dev = pci_get_drvdata(pdev);
3402 3625
3403 dfx_remove_one_pci_or_eisa(pdev, dev); 3626static int __devinit __unused dfx_dev_register(struct device *);
3404 pci_set_drvdata(pdev, NULL); 3627static int __devexit __unused dfx_dev_unregister(struct device *);
3405}
3406 3628
3407static struct pci_device_id dfx_pci_tbl[] = { 3629#ifdef CONFIG_PCI
3408 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI, PCI_ANY_ID, PCI_ANY_ID, }, 3630static int __devinit dfx_pci_register(struct pci_dev *,
3409 { 0, } 3631 const struct pci_device_id *);
3632static void __devexit dfx_pci_unregister(struct pci_dev *);
3633
3634static struct pci_device_id dfx_pci_table[] = {
3635 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
3636 { }
3410}; 3637};
3411MODULE_DEVICE_TABLE(pci, dfx_pci_tbl); 3638MODULE_DEVICE_TABLE(pci, dfx_pci_table);
3412 3639
3413static struct pci_driver dfx_driver = { 3640static struct pci_driver dfx_pci_driver = {
3414 .name = "defxx", 3641 .name = "defxx",
3415 .probe = dfx_init_one, 3642 .id_table = dfx_pci_table,
3416 .remove = __devexit_p(dfx_remove_one), 3643 .probe = dfx_pci_register,
3417 .id_table = dfx_pci_tbl, 3644 .remove = __devexit_p(dfx_pci_unregister),
3418}; 3645};
3419 3646
3420static int dfx_have_pci; 3647static __devinit int dfx_pci_register(struct pci_dev *pdev,
3421static int dfx_have_eisa; 3648 const struct pci_device_id *ent)
3422 3649{
3650 return dfx_register(&pdev->dev);
3651}
3423 3652
3424static void __exit dfx_eisa_cleanup(void) 3653static void __devexit dfx_pci_unregister(struct pci_dev *pdev)
3425{ 3654{
3426 struct net_device *dev = root_dfx_eisa_dev; 3655 dfx_unregister(&pdev->dev);
3656}
3657#endif /* CONFIG_PCI */
3658
3659#ifdef CONFIG_EISA
3660static struct eisa_device_id dfx_eisa_table[] = {
3661 { "DEC3001", DEFEA_PROD_ID_1 },
3662 { "DEC3002", DEFEA_PROD_ID_2 },
3663 { "DEC3003", DEFEA_PROD_ID_3 },
3664 { "DEC3004", DEFEA_PROD_ID_4 },
3665 { }
3666};
3667MODULE_DEVICE_TABLE(eisa, dfx_eisa_table);
3668
3669static struct eisa_driver dfx_eisa_driver = {
3670 .id_table = dfx_eisa_table,
3671 .driver = {
3672 .name = "defxx",
3673 .bus = &eisa_bus_type,
3674 .probe = dfx_dev_register,
3675 .remove = __devexit_p(dfx_dev_unregister),
3676 },
3677};
3678#endif /* CONFIG_EISA */
3679
3680#ifdef CONFIG_TC
3681static struct tc_device_id const dfx_tc_table[] = {
3682 { "DEC ", "PMAF-FA " },
3683 { "DEC ", "PMAF-FD " },
3684 { "DEC ", "PMAF-FS " },
3685 { "DEC ", "PMAF-FU " },
3686 { }
3687};
3688MODULE_DEVICE_TABLE(tc, dfx_tc_table);
3689
3690static struct tc_driver dfx_tc_driver = {
3691 .id_table = dfx_tc_table,
3692 .driver = {
3693 .name = "defxx",
3694 .bus = &tc_bus_type,
3695 .probe = dfx_dev_register,
3696 .remove = __devexit_p(dfx_dev_unregister),
3697 },
3698};
3699#endif /* CONFIG_TC */
3427 3700
3428 while (dev) 3701static int __devinit __unused dfx_dev_register(struct device *dev)
3429 { 3702{
3430 struct net_device *tmp; 3703 int status;
3431 DFX_board_t *bp;
3432 3704
3433 bp = (DFX_board_t*)dev->priv; 3705 status = dfx_register(dev);
3434 tmp = bp->next; 3706 if (!status)
3435 dfx_remove_one_pci_or_eisa(NULL, dev); 3707 get_device(dev);
3436 dev = tmp; 3708 return status;
3437 }
3438} 3709}
3439 3710
3440static int __init dfx_init(void) 3711static int __devexit __unused dfx_dev_unregister(struct device *dev)
3441{ 3712{
3442 int rc_pci, rc_eisa; 3713 put_device(dev);
3443 3714 dfx_unregister(dev);
3444 rc_pci = pci_register_driver(&dfx_driver); 3715 return 0;
3445 if (rc_pci >= 0) dfx_have_pci = 1; 3716}
3446 3717
3447 rc_eisa = dfx_eisa_init();
3448 if (rc_eisa >= 0) dfx_have_eisa = 1;
3449 3718
3450 return ((rc_eisa < 0) ? 0 : rc_eisa) + ((rc_pci < 0) ? 0 : rc_pci); 3719static int __devinit dfx_init(void)
3720{
3721 int status;
3722
3723 status = pci_register_driver(&dfx_pci_driver);
3724 if (!status)
3725 status = eisa_driver_register(&dfx_eisa_driver);
3726 if (!status)
3727 status = tc_register_driver(&dfx_tc_driver);
3728 return status;
3451} 3729}
3452 3730
3453static void __exit dfx_cleanup(void) 3731static void __devexit dfx_cleanup(void)
3454{ 3732{
3455 if (dfx_have_pci) 3733 tc_unregister_driver(&dfx_tc_driver);
3456 pci_unregister_driver(&dfx_driver); 3734 eisa_driver_unregister(&dfx_eisa_driver);
3457 if (dfx_have_eisa) 3735 pci_unregister_driver(&dfx_pci_driver);
3458 dfx_eisa_cleanup();
3459
3460} 3736}
3461 3737
3462module_init(dfx_init); 3738module_init(dfx_init);
3463module_exit(dfx_cleanup); 3739module_exit(dfx_cleanup);
3464MODULE_AUTHOR("Lawrence V. Stefani"); 3740MODULE_AUTHOR("Lawrence V. Stefani");
3465MODULE_DESCRIPTION("DEC FDDIcontroller EISA/PCI (DEFEA/DEFPA) driver " 3741MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3466 DRV_VERSION " " DRV_RELDATE); 3742 DRV_VERSION " " DRV_RELDATE);
3467MODULE_LICENSE("GPL"); 3743MODULE_LICENSE("GPL");
3468 3744
diff --git a/drivers/net/defxx.h b/drivers/net/defxx.h
index 2ce8f97253eb..19a6f64df198 100644
--- a/drivers/net/defxx.h
+++ b/drivers/net/defxx.h
@@ -26,6 +26,7 @@
26 * 12-Sep-96 LVS Removed packet request header pointers. 26 * 12-Sep-96 LVS Removed packet request header pointers.
27 * 04 Aug 2003 macro Converted to the DMA API. 27 * 04 Aug 2003 macro Converted to the DMA API.
28 * 23 Oct 2006 macro Big-endian host support. 28 * 23 Oct 2006 macro Big-endian host support.
29 * 14 Dec 2006 macro TURBOchannel support.
29 */ 30 */
30 31
31#ifndef _DEFXX_H_ 32#ifndef _DEFXX_H_
@@ -1471,9 +1472,17 @@ typedef union
1471 1472
1472#endif /* __BIG_ENDIAN */ 1473#endif /* __BIG_ENDIAN */
1473 1474
1475/* Define TC PDQ CSR offset and length */
1476
1477#define PI_TC_K_CSR_OFFSET 0x100000
1478#define PI_TC_K_CSR_LEN 0x40 /* 64 bytes */
1479
1474/* Define EISA controller register offsets */ 1480/* Define EISA controller register offsets */
1475 1481
1476#define PI_ESIC_K_BURST_HOLDOFF 0x040 1482#define PI_ESIC_K_CSR_IO_LEN 0x80 /* 128 bytes */
1483
1484#define PI_DEFEA_K_BURST_HOLDOFF 0x040
1485
1477#define PI_ESIC_K_SLOT_ID 0xC80 1486#define PI_ESIC_K_SLOT_ID 0xC80
1478#define PI_ESIC_K_SLOT_CNTRL 0xC84 1487#define PI_ESIC_K_SLOT_CNTRL 0xC84
1479#define PI_ESIC_K_MEM_ADD_CMP_0 0xC85 1488#define PI_ESIC_K_MEM_ADD_CMP_0 0xC85
@@ -1488,14 +1497,14 @@ typedef union
1488#define PI_ESIC_K_MEM_ADD_LO_CMP_0 0xC8E 1497#define PI_ESIC_K_MEM_ADD_LO_CMP_0 0xC8E
1489#define PI_ESIC_K_MEM_ADD_LO_CMP_1 0xC8F 1498#define PI_ESIC_K_MEM_ADD_LO_CMP_1 0xC8F
1490#define PI_ESIC_K_MEM_ADD_LO_CMP_2 0xC90 1499#define PI_ESIC_K_MEM_ADD_LO_CMP_2 0xC90
1491#define PI_ESIC_K_IO_CMP_0_0 0xC91 1500#define PI_ESIC_K_IO_ADD_CMP_0_0 0xC91
1492#define PI_ESIC_K_IO_CMP_0_1 0xC92 1501#define PI_ESIC_K_IO_ADD_CMP_0_1 0xC92
1493#define PI_ESIC_K_IO_CMP_1_0 0xC93 1502#define PI_ESIC_K_IO_ADD_CMP_1_0 0xC93
1494#define PI_ESIC_K_IO_CMP_1_1 0xC94 1503#define PI_ESIC_K_IO_ADD_CMP_1_1 0xC94
1495#define PI_ESIC_K_IO_CMP_2_0 0xC95 1504#define PI_ESIC_K_IO_ADD_CMP_2_0 0xC95
1496#define PI_ESIC_K_IO_CMP_2_1 0xC96 1505#define PI_ESIC_K_IO_ADD_CMP_2_1 0xC96
1497#define PI_ESIC_K_IO_CMP_3_0 0xC97 1506#define PI_ESIC_K_IO_ADD_CMP_3_0 0xC97
1498#define PI_ESIC_K_IO_CMP_3_1 0xC98 1507#define PI_ESIC_K_IO_ADD_CMP_3_1 0xC98
1499#define PI_ESIC_K_IO_ADD_MASK_0_0 0xC99 1508#define PI_ESIC_K_IO_ADD_MASK_0_0 0xC99
1500#define PI_ESIC_K_IO_ADD_MASK_0_1 0xC9A 1509#define PI_ESIC_K_IO_ADD_MASK_0_1 0xC9A
1501#define PI_ESIC_K_IO_ADD_MASK_1_0 0xC9B 1510#define PI_ESIC_K_IO_ADD_MASK_1_0 0xC9B
@@ -1518,11 +1527,16 @@ typedef union
1518#define PI_ESIC_K_INPUT_PORT 0xCAC 1527#define PI_ESIC_K_INPUT_PORT 0xCAC
1519#define PI_ESIC_K_OUTPUT_PORT 0xCAD 1528#define PI_ESIC_K_OUTPUT_PORT 0xCAD
1520#define PI_ESIC_K_FUNCTION_CNTRL 0xCAE 1529#define PI_ESIC_K_FUNCTION_CNTRL 0xCAE
1521#define PI_ESIC_K_CSR_IO_LEN PI_ESIC_K_FUNCTION_CNTRL+1 /* always last reg + 1 */
1522 1530
1523/* Define the value all drivers must write to the function control register. */ 1531/* Define the bits in the function control register. */
1524 1532
1525#define PI_ESIC_K_FUNCTION_CNTRL_IO_ENB 0x03 1533#define PI_FUNCTION_CNTRL_M_IOCS0 0x01
1534#define PI_FUNCTION_CNTRL_M_IOCS1 0x02
1535#define PI_FUNCTION_CNTRL_M_IOCS2 0x04
1536#define PI_FUNCTION_CNTRL_M_IOCS3 0x08
1537#define PI_FUNCTION_CNTRL_M_MEMCS0 0x10
1538#define PI_FUNCTION_CNTRL_M_MEMCS1 0x20
1539#define PI_FUNCTION_CNTRL_M_DMA 0x80
1526 1540
1527/* Define the bits in the slot control register. */ 1541/* Define the bits in the slot control register. */
1528 1542
@@ -1540,6 +1554,10 @@ typedef union
1540#define PI_BURST_HOLDOFF_V_RESERVED 1 1554#define PI_BURST_HOLDOFF_V_RESERVED 1
1541#define PI_BURST_HOLDOFF_V_MEM_MAP 0 1555#define PI_BURST_HOLDOFF_V_MEM_MAP 0
1542 1556
1557/* Define the implicit mask of the Memory Address Mask Register. */
1558
1559#define PI_MEM_ADD_MASK_M 0x3ff
1560
1543/* 1561/*
1544 * Define the fields in the IO Compare registers. 1562 * Define the fields in the IO Compare registers.
1545 * The driver must initialize the slot field with the slot ID shifted by the 1563 * The driver must initialize the slot field with the slot ID shifted by the
@@ -1577,6 +1595,7 @@ typedef union
1577#define DEFEA_PROD_ID_1 0x0130A310 /* DEC product 300, rev 1 */ 1595#define DEFEA_PROD_ID_1 0x0130A310 /* DEC product 300, rev 1 */
1578#define DEFEA_PROD_ID_2 0x0230A310 /* DEC product 300, rev 2 */ 1596#define DEFEA_PROD_ID_2 0x0230A310 /* DEC product 300, rev 2 */
1579#define DEFEA_PROD_ID_3 0x0330A310 /* DEC product 300, rev 3 */ 1597#define DEFEA_PROD_ID_3 0x0330A310 /* DEC product 300, rev 3 */
1598#define DEFEA_PROD_ID_4 0x0430A310 /* DEC product 300, rev 4 */
1580 1599
1581/**********************************************/ 1600/**********************************************/
1582/* Digital PFI Specification v1.0 Definitions */ 1601/* Digital PFI Specification v1.0 Definitions */
@@ -1633,12 +1652,6 @@ typedef union
1633#define PFI_STATUS_V_FIFO_EMPTY 1 1652#define PFI_STATUS_V_FIFO_EMPTY 1
1634#define PFI_STATUS_V_DMA_IN_PROGRESS 0 1653#define PFI_STATUS_V_DMA_IN_PROGRESS 0
1635 1654
1636#define DFX_MAX_EISA_SLOTS 16 /* maximum number of EISA slots to scan */
1637#define DFX_MAX_NUM_BOARDS 8 /* maximum number of adapters supported */
1638
1639#define DFX_BUS_TYPE_PCI 0 /* type code for DEC FDDIcontroller/PCI */
1640#define DFX_BUS_TYPE_EISA 1 /* type code for DEC FDDIcontroller/EISA */
1641
1642#define DFX_FC_PRH2_PRH1_PRH0 0x54003820 /* Packet Request Header bytes + FC */ 1655#define DFX_FC_PRH2_PRH1_PRH0 0x54003820 /* Packet Request Header bytes + FC */
1643#define DFX_PRH0_BYTE 0x20 /* Packet Request Header byte 0 */ 1656#define DFX_PRH0_BYTE 0x20 /* Packet Request Header byte 0 */
1644#define DFX_PRH1_BYTE 0x38 /* Packet Request Header byte 1 */ 1657#define DFX_PRH1_BYTE 0x38 /* Packet Request Header byte 1 */
@@ -1756,10 +1769,11 @@ typedef struct DFX_board_tag
1756 /* Store device, bus-specific, and parameter information for this adapter */ 1769 /* Store device, bus-specific, and parameter information for this adapter */
1757 1770
1758 struct net_device *dev; /* pointer to device structure */ 1771 struct net_device *dev; /* pointer to device structure */
1759 struct net_device *next; 1772 union {
1760 u32 bus_type; /* bus type (0 == PCI, 1 == EISA) */ 1773 void __iomem *mem;
1761 u16 base_addr; /* base I/O address (same as dev->base_addr) */ 1774 int port;
1762 struct pci_dev * pci_dev; 1775 } base; /* base address */
1776 struct device *bus_dev;
1763 u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */ 1777 u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */
1764 u32 req_ttrt; /* requested TTRT value (in 80ns units) */ 1778 u32 req_ttrt; /* requested TTRT value (in 80ns units) */
1765 u32 burst_size; /* adapter burst size (enumerated) */ 1779 u32 burst_size; /* adapter burst size (enumerated) */
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3208dac29168..0cefef5e3f06 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2718,14 +2718,12 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2718 struct net_device *netdev = pci_get_drvdata(pdev); 2718 struct net_device *netdev = pci_get_drvdata(pdev);
2719 struct nic *nic = netdev_priv(netdev); 2719 struct nic *nic = netdev_priv(netdev);
2720 2720
2721#ifdef CONFIG_E100_NAPI
2722 if (netif_running(netdev)) 2721 if (netif_running(netdev))
2723 netif_poll_disable(nic->netdev); 2722 netif_poll_disable(nic->netdev);
2724#endif
2725 del_timer_sync(&nic->watchdog); 2723 del_timer_sync(&nic->watchdog);
2726 netif_carrier_off(nic->netdev); 2724 netif_carrier_off(nic->netdev);
2727
2728 netif_device_detach(netdev); 2725 netif_device_detach(netdev);
2726
2729 pci_save_state(pdev); 2727 pci_save_state(pdev);
2730 2728
2731 if ((nic->flags & wol_magic) | e100_asf(nic)) { 2729 if ((nic->flags & wol_magic) | e100_asf(nic)) {
@@ -2761,16 +2759,13 @@ static int e100_resume(struct pci_dev *pdev)
2761} 2759}
2762#endif /* CONFIG_PM */ 2760#endif /* CONFIG_PM */
2763 2761
2764
2765static void e100_shutdown(struct pci_dev *pdev) 2762static void e100_shutdown(struct pci_dev *pdev)
2766{ 2763{
2767 struct net_device *netdev = pci_get_drvdata(pdev); 2764 struct net_device *netdev = pci_get_drvdata(pdev);
2768 struct nic *nic = netdev_priv(netdev); 2765 struct nic *nic = netdev_priv(netdev);
2769 2766
2770#ifdef CONFIG_E100_NAPI
2771 if (netif_running(netdev)) 2767 if (netif_running(netdev))
2772 netif_poll_disable(nic->netdev); 2768 netif_poll_disable(nic->netdev);
2773#endif
2774 del_timer_sync(&nic->watchdog); 2769 del_timer_sync(&nic->watchdog);
2775 netif_carrier_off(nic->netdev); 2770 netif_carrier_off(nic->netdev);
2776 2771
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index f091042b146e..689f158a469e 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -59,17 +59,13 @@
59#include <linux/capability.h> 59#include <linux/capability.h>
60#include <linux/in.h> 60#include <linux/in.h>
61#include <linux/ip.h> 61#include <linux/ip.h>
62#ifdef NETIF_F_TSO6
63#include <linux/ipv6.h> 62#include <linux/ipv6.h>
64#endif
65#include <linux/tcp.h> 63#include <linux/tcp.h>
66#include <linux/udp.h> 64#include <linux/udp.h>
67#include <net/pkt_sched.h> 65#include <net/pkt_sched.h>
68#include <linux/list.h> 66#include <linux/list.h>
69#include <linux/reboot.h> 67#include <linux/reboot.h>
70#ifdef NETIF_F_TSO
71#include <net/checksum.h> 68#include <net/checksum.h>
72#endif
73#include <linux/mii.h> 69#include <linux/mii.h>
74#include <linux/ethtool.h> 70#include <linux/ethtool.h>
75#include <linux/if_vlan.h> 71#include <linux/if_vlan.h>
@@ -257,7 +253,6 @@ struct e1000_adapter {
257 spinlock_t tx_queue_lock; 253 spinlock_t tx_queue_lock;
258#endif 254#endif
259 atomic_t irq_sem; 255 atomic_t irq_sem;
260 unsigned int detect_link;
261 unsigned int total_tx_bytes; 256 unsigned int total_tx_bytes;
262 unsigned int total_tx_packets; 257 unsigned int total_tx_packets;
263 unsigned int total_rx_bytes; 258 unsigned int total_rx_bytes;
@@ -348,9 +343,7 @@ struct e1000_adapter {
348 boolean_t have_msi; 343 boolean_t have_msi;
349#endif 344#endif
350 /* to not mess up cache alignment, always add to the bottom */ 345 /* to not mess up cache alignment, always add to the bottom */
351#ifdef NETIF_F_TSO
352 boolean_t tso_force; 346 boolean_t tso_force;
353#endif
354 boolean_t smart_power_down; /* phy smart power down */ 347 boolean_t smart_power_down; /* phy smart power down */
355 boolean_t quad_port_a; 348 boolean_t quad_port_a;
356 unsigned long flags; 349 unsigned long flags;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index fb96c87f9e56..44ebc72962dc 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -338,7 +338,6 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
338 return 0; 338 return 0;
339} 339}
340 340
341#ifdef NETIF_F_TSO
342static int 341static int
343e1000_set_tso(struct net_device *netdev, uint32_t data) 342e1000_set_tso(struct net_device *netdev, uint32_t data)
344{ 343{
@@ -352,18 +351,15 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
352 else 351 else
353 netdev->features &= ~NETIF_F_TSO; 352 netdev->features &= ~NETIF_F_TSO;
354 353
355#ifdef NETIF_F_TSO6
356 if (data) 354 if (data)
357 netdev->features |= NETIF_F_TSO6; 355 netdev->features |= NETIF_F_TSO6;
358 else 356 else
359 netdev->features &= ~NETIF_F_TSO6; 357 netdev->features &= ~NETIF_F_TSO6;
360#endif
361 358
362 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 359 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
363 adapter->tso_force = TRUE; 360 adapter->tso_force = TRUE;
364 return 0; 361 return 0;
365} 362}
366#endif /* NETIF_F_TSO */
367 363
368static uint32_t 364static uint32_t
369e1000_get_msglevel(struct net_device *netdev) 365e1000_get_msglevel(struct net_device *netdev)
@@ -1971,10 +1967,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1971 .set_tx_csum = e1000_set_tx_csum, 1967 .set_tx_csum = e1000_set_tx_csum,
1972 .get_sg = ethtool_op_get_sg, 1968 .get_sg = ethtool_op_get_sg,
1973 .set_sg = ethtool_op_set_sg, 1969 .set_sg = ethtool_op_set_sg,
1974#ifdef NETIF_F_TSO
1975 .get_tso = ethtool_op_get_tso, 1970 .get_tso = ethtool_op_get_tso,
1976 .set_tso = e1000_set_tso, 1971 .set_tso = e1000_set_tso,
1977#endif
1978 .self_test_count = e1000_diag_test_count, 1972 .self_test_count = e1000_diag_test_count,
1979 .self_test = e1000_diag_test, 1973 .self_test = e1000_diag_test,
1980 .get_strings = e1000_get_strings, 1974 .get_strings = e1000_get_strings,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c6259c7127f6..619c89218b4b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.3.15-k2"DRIVERNAPI 39#define DRV_VERSION "7.3.20-k2"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -990,16 +990,12 @@ e1000_probe(struct pci_dev *pdev,
990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER; 990 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
991 } 991 }
992 992
993#ifdef NETIF_F_TSO
994 if ((adapter->hw.mac_type >= e1000_82544) && 993 if ((adapter->hw.mac_type >= e1000_82544) &&
995 (adapter->hw.mac_type != e1000_82547)) 994 (adapter->hw.mac_type != e1000_82547))
996 netdev->features |= NETIF_F_TSO; 995 netdev->features |= NETIF_F_TSO;
997 996
998#ifdef NETIF_F_TSO6
999 if (adapter->hw.mac_type > e1000_82547_rev_2) 997 if (adapter->hw.mac_type > e1000_82547_rev_2)
1000 netdev->features |= NETIF_F_TSO6; 998 netdev->features |= NETIF_F_TSO6;
1001#endif
1002#endif
1003 if (pci_using_dac) 999 if (pci_using_dac)
1004 netdev->features |= NETIF_F_HIGHDMA; 1000 netdev->features |= NETIF_F_HIGHDMA;
1005 1001
@@ -2583,15 +2579,22 @@ e1000_watchdog(unsigned long data)
2583 2579
2584 if (link) { 2580 if (link) {
2585 if (!netif_carrier_ok(netdev)) { 2581 if (!netif_carrier_ok(netdev)) {
2582 uint32_t ctrl;
2586 boolean_t txb2b = 1; 2583 boolean_t txb2b = 1;
2587 e1000_get_speed_and_duplex(&adapter->hw, 2584 e1000_get_speed_and_duplex(&adapter->hw,
2588 &adapter->link_speed, 2585 &adapter->link_speed,
2589 &adapter->link_duplex); 2586 &adapter->link_duplex);
2590 2587
2591 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n", 2588 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2592 adapter->link_speed, 2589 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
2593 adapter->link_duplex == FULL_DUPLEX ? 2590 "Flow Control: %s\n",
2594 "Full Duplex" : "Half Duplex"); 2591 adapter->link_speed,
2592 adapter->link_duplex == FULL_DUPLEX ?
2593 "Full Duplex" : "Half Duplex",
2594 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2595 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2596 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2597 E1000_CTRL_TFCE) ? "TX" : "None" )));
2595 2598
2596 /* tweak tx_queue_len according to speed/duplex 2599 /* tweak tx_queue_len according to speed/duplex
2597 * and adjust the timeout factor */ 2600 * and adjust the timeout factor */
@@ -2619,7 +2622,6 @@ e1000_watchdog(unsigned long data)
2619 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2622 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2620 } 2623 }
2621 2624
2622#ifdef NETIF_F_TSO
2623 /* disable TSO for pcie and 10/100 speeds, to avoid 2625 /* disable TSO for pcie and 10/100 speeds, to avoid
2624 * some hardware issues */ 2626 * some hardware issues */
2625 if (!adapter->tso_force && 2627 if (!adapter->tso_force &&
@@ -2630,22 +2632,17 @@ e1000_watchdog(unsigned long data)
2630 DPRINTK(PROBE,INFO, 2632 DPRINTK(PROBE,INFO,
2631 "10/100 speed: disabling TSO\n"); 2633 "10/100 speed: disabling TSO\n");
2632 netdev->features &= ~NETIF_F_TSO; 2634 netdev->features &= ~NETIF_F_TSO;
2633#ifdef NETIF_F_TSO6
2634 netdev->features &= ~NETIF_F_TSO6; 2635 netdev->features &= ~NETIF_F_TSO6;
2635#endif
2636 break; 2636 break;
2637 case SPEED_1000: 2637 case SPEED_1000:
2638 netdev->features |= NETIF_F_TSO; 2638 netdev->features |= NETIF_F_TSO;
2639#ifdef NETIF_F_TSO6
2640 netdev->features |= NETIF_F_TSO6; 2639 netdev->features |= NETIF_F_TSO6;
2641#endif
2642 break; 2640 break;
2643 default: 2641 default:
2644 /* oops */ 2642 /* oops */
2645 break; 2643 break;
2646 } 2644 }
2647 } 2645 }
2648#endif
2649 2646
2650 /* enable transmits in the hardware, need to do this 2647 /* enable transmits in the hardware, need to do this
2651 * after setting TARC0 */ 2648 * after setting TARC0 */
@@ -2875,7 +2872,6 @@ static int
2875e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 2872e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2876 struct sk_buff *skb) 2873 struct sk_buff *skb)
2877{ 2874{
2878#ifdef NETIF_F_TSO
2879 struct e1000_context_desc *context_desc; 2875 struct e1000_context_desc *context_desc;
2880 struct e1000_buffer *buffer_info; 2876 struct e1000_buffer *buffer_info;
2881 unsigned int i; 2877 unsigned int i;
@@ -2904,7 +2900,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2904 0); 2900 0);
2905 cmd_length = E1000_TXD_CMD_IP; 2901 cmd_length = E1000_TXD_CMD_IP;
2906 ipcse = skb->h.raw - skb->data - 1; 2902 ipcse = skb->h.raw - skb->data - 1;
2907#ifdef NETIF_F_TSO6
2908 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2903 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2909 skb->nh.ipv6h->payload_len = 0; 2904 skb->nh.ipv6h->payload_len = 0;
2910 skb->h.th->check = 2905 skb->h.th->check =
@@ -2914,7 +2909,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2914 IPPROTO_TCP, 2909 IPPROTO_TCP,
2915 0); 2910 0);
2916 ipcse = 0; 2911 ipcse = 0;
2917#endif
2918 } 2912 }
2919 ipcss = skb->nh.raw - skb->data; 2913 ipcss = skb->nh.raw - skb->data;
2920 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 2914 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
@@ -2947,8 +2941,6 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2947 2941
2948 return TRUE; 2942 return TRUE;
2949 } 2943 }
2950#endif
2951
2952 return FALSE; 2944 return FALSE;
2953} 2945}
2954 2946
@@ -2968,8 +2960,9 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2968 buffer_info = &tx_ring->buffer_info[i]; 2960 buffer_info = &tx_ring->buffer_info[i];
2969 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 2961 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2970 2962
2963 context_desc->lower_setup.ip_config = 0;
2971 context_desc->upper_setup.tcp_fields.tucss = css; 2964 context_desc->upper_setup.tcp_fields.tucss = css;
2972 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; 2965 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
2973 context_desc->upper_setup.tcp_fields.tucse = 0; 2966 context_desc->upper_setup.tcp_fields.tucse = 0;
2974 context_desc->tcp_seg_setup.data = 0; 2967 context_desc->tcp_seg_setup.data = 0;
2975 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2968 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
@@ -3005,7 +2998,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3005 while (len) { 2998 while (len) {
3006 buffer_info = &tx_ring->buffer_info[i]; 2999 buffer_info = &tx_ring->buffer_info[i];
3007 size = min(len, max_per_txd); 3000 size = min(len, max_per_txd);
3008#ifdef NETIF_F_TSO
3009 /* Workaround for Controller erratum -- 3001 /* Workaround for Controller erratum --
3010 * descriptor for non-tso packet in a linear SKB that follows a 3002 * descriptor for non-tso packet in a linear SKB that follows a
3011 * tso gets written back prematurely before the data is fully 3003 * tso gets written back prematurely before the data is fully
@@ -3020,7 +3012,6 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3020 * in TSO mode. Append 4-byte sentinel desc */ 3012 * in TSO mode. Append 4-byte sentinel desc */
3021 if (unlikely(mss && !nr_frags && size == len && size > 8)) 3013 if (unlikely(mss && !nr_frags && size == len && size > 8))
3022 size -= 4; 3014 size -= 4;
3023#endif
3024 /* work-around for errata 10 and it applies 3015 /* work-around for errata 10 and it applies
3025 * to all controllers in PCI-X mode 3016 * to all controllers in PCI-X mode
3026 * The fix is to make sure that the first descriptor of a 3017 * The fix is to make sure that the first descriptor of a
@@ -3062,12 +3053,10 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3062 while (len) { 3053 while (len) {
3063 buffer_info = &tx_ring->buffer_info[i]; 3054 buffer_info = &tx_ring->buffer_info[i];
3064 size = min(len, max_per_txd); 3055 size = min(len, max_per_txd);
3065#ifdef NETIF_F_TSO
3066 /* Workaround for premature desc write-backs 3056 /* Workaround for premature desc write-backs
3067 * in TSO mode. Append 4-byte sentinel desc */ 3057 * in TSO mode. Append 4-byte sentinel desc */
3068 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) 3058 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
3069 size -= 4; 3059 size -= 4;
3070#endif
3071 /* Workaround for potential 82544 hang in PCI-X. 3060 /* Workaround for potential 82544 hang in PCI-X.
3072 * Avoid terminating buffers within evenly-aligned 3061 * Avoid terminating buffers within evenly-aligned
3073 * dwords. */ 3062 * dwords. */
@@ -3292,7 +3281,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3292 if (adapter->hw.mac_type >= e1000_82571) 3281 if (adapter->hw.mac_type >= e1000_82571)
3293 max_per_txd = 8192; 3282 max_per_txd = 8192;
3294 3283
3295#ifdef NETIF_F_TSO
3296 mss = skb_shinfo(skb)->gso_size; 3284 mss = skb_shinfo(skb)->gso_size;
3297 /* The controller does a simple calculation to 3285 /* The controller does a simple calculation to
3298 * make sure there is enough room in the FIFO before 3286 * make sure there is enough room in the FIFO before
@@ -3346,16 +3334,10 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3346 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) 3334 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3347 count++; 3335 count++;
3348 count++; 3336 count++;
3349#else
3350 if (skb->ip_summed == CHECKSUM_PARTIAL)
3351 count++;
3352#endif
3353 3337
3354#ifdef NETIF_F_TSO
3355 /* Controller Erratum workaround */ 3338 /* Controller Erratum workaround */
3356 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 3339 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3357 count++; 3340 count++;
3358#endif
3359 3341
3360 count += TXD_USE_COUNT(len, max_txd_pwr); 3342 count += TXD_USE_COUNT(len, max_txd_pwr);
3361 3343
@@ -3602,7 +3584,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3602 */ 3584 */
3603 if (adapter->link_speed == 0) 3585 if (adapter->link_speed == 0)
3604 return; 3586 return;
3605 if (pdev->error_state && pdev->error_state != pci_channel_io_normal) 3587 if (pci_channel_offline(pdev))
3606 return; 3588 return;
3607 3589
3608 spin_lock_irqsave(&adapter->stats_lock, flags); 3590 spin_lock_irqsave(&adapter->stats_lock, flags);
@@ -3765,8 +3747,8 @@ e1000_update_stats(struct e1000_adapter *adapter)
3765 * @data: pointer to a network interface device structure 3747 * @data: pointer to a network interface device structure
3766 **/ 3748 **/
3767 3749
3768static 3750static irqreturn_t
3769irqreturn_t e1000_intr_msi(int irq, void *data) 3751e1000_intr_msi(int irq, void *data)
3770{ 3752{
3771 struct net_device *netdev = data; 3753 struct net_device *netdev = data;
3772 struct e1000_adapter *adapter = netdev_priv(netdev); 3754 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3774,49 +3756,27 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
3774#ifndef CONFIG_E1000_NAPI 3756#ifndef CONFIG_E1000_NAPI
3775 int i; 3757 int i;
3776#endif 3758#endif
3759 uint32_t icr = E1000_READ_REG(hw, ICR);
3777 3760
3778 /* this code avoids the read of ICR but has to get 1000 interrupts
3779 * at every link change event before it will notice the change */
3780 if (++adapter->detect_link >= 1000) {
3781 uint32_t icr = E1000_READ_REG(hw, ICR);
3782#ifdef CONFIG_E1000_NAPI 3761#ifdef CONFIG_E1000_NAPI
3783 /* read ICR disables interrupts using IAM, so keep up with our 3762 /* read ICR disables interrupts using IAM, so keep up with our
3784 * enable/disable accounting */ 3763 * enable/disable accounting */
3785 atomic_inc(&adapter->irq_sem); 3764 atomic_inc(&adapter->irq_sem);
3786#endif 3765#endif
3787 adapter->detect_link = 0; 3766 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3788 if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) && 3767 hw->get_link_status = 1;
3789 (icr & E1000_ICR_INT_ASSERTED)) { 3768 /* 80003ES2LAN workaround-- For packet buffer work-around on
3790 hw->get_link_status = 1; 3769 * link down event; disable receives here in the ISR and reset
3791 /* 80003ES2LAN workaround-- 3770 * adapter in watchdog */
3792 * For packet buffer work-around on link down event; 3771 if (netif_carrier_ok(netdev) &&
3793 * disable receives here in the ISR and 3772 (adapter->hw.mac_type == e1000_80003es2lan)) {
3794 * reset adapter in watchdog 3773 /* disable receives */
3795 */ 3774 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3796 if (netif_carrier_ok(netdev) && 3775 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3797 (adapter->hw.mac_type == e1000_80003es2lan)) {
3798 /* disable receives */
3799 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3800 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3801 }
3802 /* guard against interrupt when we're going down */
3803 if (!test_bit(__E1000_DOWN, &adapter->flags))
3804 mod_timer(&adapter->watchdog_timer,
3805 jiffies + 1);
3806 } 3776 }
3807 } else { 3777 /* guard against interrupt when we're going down */
3808 E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ | 3778 if (!test_bit(__E1000_DOWN, &adapter->flags))
3809 E1000_ICR_LSC))); 3779 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3810 /* bummer we have to flush here, but things break otherwise as
3811 * some event appears to be lost or delayed and throughput
3812 * drops. In almost all tests this flush is un-necessary */
3813 E1000_WRITE_FLUSH(hw);
3814#ifdef CONFIG_E1000_NAPI
3815 /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
3816 * masked. No need for the IMC write, but it does mean we
3817 * should account for it ASAP. */
3818 atomic_inc(&adapter->irq_sem);
3819#endif
3820 } 3780 }
3821 3781
3822#ifdef CONFIG_E1000_NAPI 3782#ifdef CONFIG_E1000_NAPI
@@ -3836,7 +3796,7 @@ irqreturn_t e1000_intr_msi(int irq, void *data)
3836 3796
3837 for (i = 0; i < E1000_MAX_INTR; i++) 3797 for (i = 0; i < E1000_MAX_INTR; i++)
3838 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3798 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3839 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3799 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3840 break; 3800 break;
3841 3801
3842 if (likely(adapter->itr_setting & 3)) 3802 if (likely(adapter->itr_setting & 3))
@@ -3939,7 +3899,7 @@ e1000_intr(int irq, void *data)
3939 3899
3940 for (i = 0; i < E1000_MAX_INTR; i++) 3900 for (i = 0; i < E1000_MAX_INTR; i++)
3941 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & 3901 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3942 !e1000_clean_tx_irq(adapter, adapter->tx_ring))) 3902 e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3943 break; 3903 break;
3944 3904
3945 if (likely(adapter->itr_setting & 3)) 3905 if (likely(adapter->itr_setting & 3))
@@ -3989,7 +3949,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
3989 poll_dev->quota -= work_done; 3949 poll_dev->quota -= work_done;
3990 3950
3991 /* If no Tx and not enough Rx work done, exit the polling mode */ 3951 /* If no Tx and not enough Rx work done, exit the polling mode */
3992 if ((!tx_cleaned && (work_done == 0)) || 3952 if ((tx_cleaned && (work_done < work_to_do)) ||
3993 !netif_running(poll_dev)) { 3953 !netif_running(poll_dev)) {
3994quit_polling: 3954quit_polling:
3995 if (likely(adapter->itr_setting & 3)) 3955 if (likely(adapter->itr_setting & 3))
@@ -4019,7 +3979,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4019#ifdef CONFIG_E1000_NAPI 3979#ifdef CONFIG_E1000_NAPI
4020 unsigned int count = 0; 3980 unsigned int count = 0;
4021#endif 3981#endif
4022 boolean_t cleaned = FALSE; 3982 boolean_t cleaned = TRUE;
4023 unsigned int total_tx_bytes=0, total_tx_packets=0; 3983 unsigned int total_tx_bytes=0, total_tx_packets=0;
4024 3984
4025 i = tx_ring->next_to_clean; 3985 i = tx_ring->next_to_clean;
@@ -4034,10 +3994,13 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4034 3994
4035 if (cleaned) { 3995 if (cleaned) {
4036 struct sk_buff *skb = buffer_info->skb; 3996 struct sk_buff *skb = buffer_info->skb;
4037 unsigned int segs = skb_shinfo(skb)->gso_segs; 3997 unsigned int segs, bytecount;
3998 segs = skb_shinfo(skb)->gso_segs ?: 1;
3999 /* multiply data chunks by size of headers */
4000 bytecount = ((segs - 1) * skb_headlen(skb)) +
4001 skb->len;
4038 total_tx_packets += segs; 4002 total_tx_packets += segs;
4039 total_tx_packets++; 4003 total_tx_bytes += bytecount;
4040 total_tx_bytes += skb->len;
4041 } 4004 }
4042 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 4005 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
4043 tx_desc->upper.data = 0; 4006 tx_desc->upper.data = 0;
@@ -4050,7 +4013,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4050#ifdef CONFIG_E1000_NAPI 4013#ifdef CONFIG_E1000_NAPI
4051#define E1000_TX_WEIGHT 64 4014#define E1000_TX_WEIGHT 64
4052 /* weight of a sort for tx, to avoid endless transmit cleanup */ 4015 /* weight of a sort for tx, to avoid endless transmit cleanup */
4053 if (count++ == E1000_TX_WEIGHT) break; 4016 if (count++ == E1000_TX_WEIGHT) {
4017 cleaned = FALSE;
4018 break;
4019 }
4054#endif 4020#endif
4055 } 4021 }
4056 4022
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 18afc0c25dac..10af742d8a20 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -48,8 +48,6 @@ typedef enum {
48 TRUE = 1 48 TRUE = 1
49} boolean_t; 49} boolean_t;
50 50
51#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
52
53#ifdef DBG 51#ifdef DBG
54#define DEBUGOUT(S) printk(KERN_DEBUG S "\n") 52#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
55#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A) 53#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
@@ -58,7 +56,7 @@ typedef enum {
58#define DEBUGOUT1(S, A...) 56#define DEBUGOUT1(S, A...)
59#endif 57#endif
60 58
61#define DEBUGFUNC(F) DEBUGOUT(F) 59#define DEBUGFUNC(F) DEBUGOUT(F "\n")
62#define DEBUGOUT2 DEBUGOUT1 60#define DEBUGOUT2 DEBUGOUT1
63#define DEBUGOUT3 DEBUGOUT2 61#define DEBUGOUT3 DEBUGOUT2
64#define DEBUGOUT7 DEBUGOUT3 62#define DEBUGOUT7 DEBUGOUT3
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index cf2a279307e1..f8862e203ac9 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -760,22 +760,13 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
760 case SPEED_1000: 760 case SPEED_1000:
761 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " 761 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
762 "Duplex\n"); 762 "Duplex\n");
763 DPRINTK(PROBE, INFO, 763 goto full_duplex_only;
764 "Using Autonegotiation at 1000 Mbps "
765 "Full Duplex only\n");
766 adapter->hw.autoneg = adapter->fc_autoneg = 1;
767 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
768 break;
769 case SPEED_1000 + HALF_DUPLEX: 764 case SPEED_1000 + HALF_DUPLEX:
770 DPRINTK(PROBE, INFO, 765 DPRINTK(PROBE, INFO,
771 "Half Duplex is not supported at 1000 Mbps\n"); 766 "Half Duplex is not supported at 1000 Mbps\n");
772 DPRINTK(PROBE, INFO, 767 /* fall through */
773 "Using Autonegotiation at 1000 Mbps "
774 "Full Duplex only\n");
775 adapter->hw.autoneg = adapter->fc_autoneg = 1;
776 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
777 break;
778 case SPEED_1000 + FULL_DUPLEX: 768 case SPEED_1000 + FULL_DUPLEX:
769full_duplex_only:
779 DPRINTK(PROBE, INFO, 770 DPRINTK(PROBE, INFO,
780 "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); 771 "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
781 adapter->hw.autoneg = adapter->fc_autoneg = 1; 772 adapter->hw.autoneg = adapter->fc_autoneg = 1;
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index c62d9c6363c6..b2b0a96218ca 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -355,8 +355,7 @@ e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
355 355
356 mem_on(ioaddr, shared_mem, (ring_offset>>8)); 356 mem_on(ioaddr, shared_mem, (ring_offset>>8));
357 357
358 /* Packet is always in one chunk -- we can copy + cksum. */ 358 memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count);
359 eth_io_copy_and_sum(skb, ei_status.mem + (ring_offset & 0xff), count, 0);
360 359
361 mem_off(ioaddr); 360 mem_off(ioaddr);
362} 361}
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 2d2ea94a00bb..822e5bfd1a71 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -375,7 +375,7 @@ static void es_block_input(struct net_device *dev, int count, struct sk_buff *sk
375 memcpy_fromio(skb->data + semi_count, ei_status.mem, count); 375 memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
376 } else { 376 } else {
377 /* Packet is in one chunk. */ 377 /* Packet is in one chunk. */
378 eth_io_copy_and_sum(skb, xfer_start, count, 0); 378 memcpy_fromio(skb->data, xfer_start, count);
379 } 379 }
380} 380}
381 381
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 93f2b7a22160..a363148d0198 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -111,6 +111,7 @@
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit. 112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error. 113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
114 * 115 *
115 * Known bugs: 116 * Known bugs:
116 * We suspect that on some hardware no TX done interrupts are generated. 117 * We suspect that on some hardware no TX done interrupts are generated.
@@ -127,7 +128,7 @@
127#else 128#else
128#define DRIVERNAPI 129#define DRIVERNAPI
129#endif 130#endif
130#define FORCEDETH_VERSION "0.59" 131#define FORCEDETH_VERSION "0.60"
131#define DRV_NAME "forcedeth" 132#define DRV_NAME "forcedeth"
132 133
133#include <linux/module.h> 134#include <linux/module.h>
@@ -173,9 +174,10 @@
173#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 174#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
174#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 175#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
175#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 176#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
176#define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ 177#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
177#define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ 178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
178#define DEV_HAS_MGMT_UNIT 0x1000 /* device supports management unit */ 179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
179 181
180enum { 182enum {
181 NvRegIrqStatus = 0x000, 183 NvRegIrqStatus = 0x000,
@@ -210,7 +212,7 @@ enum {
210 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms 212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
211 */ 213 */
212 NvRegPollingInterval = 0x00c, 214 NvRegPollingInterval = 0x00c,
213#define NVREG_POLL_DEFAULT_THROUGHPUT 970 215#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
214#define NVREG_POLL_DEFAULT_CPU 13 216#define NVREG_POLL_DEFAULT_CPU 13
215 NvRegMSIMap0 = 0x020, 217 NvRegMSIMap0 = 0x020,
216 NvRegMSIMap1 = 0x024, 218 NvRegMSIMap1 = 0x024,
@@ -304,8 +306,8 @@ enum {
304#define NVREG_TXRXCTL_RESET 0x0010 306#define NVREG_TXRXCTL_RESET 0x0010
305#define NVREG_TXRXCTL_RXCHECK 0x0400 307#define NVREG_TXRXCTL_RXCHECK 0x0400
306#define NVREG_TXRXCTL_DESC_1 0 308#define NVREG_TXRXCTL_DESC_1 0
307#define NVREG_TXRXCTL_DESC_2 0x02100 309#define NVREG_TXRXCTL_DESC_2 0x002100
308#define NVREG_TXRXCTL_DESC_3 0x02200 310#define NVREG_TXRXCTL_DESC_3 0xc02200
309#define NVREG_TXRXCTL_VLANSTRIP 0x00040 311#define NVREG_TXRXCTL_VLANSTRIP 0x00040
310#define NVREG_TXRXCTL_VLANINS 0x00080 312#define NVREG_TXRXCTL_VLANINS 0x00080
311 NvRegTxRingPhysAddrHigh = 0x148, 313 NvRegTxRingPhysAddrHigh = 0x148,
@@ -487,7 +489,8 @@ union ring_type {
487 489
488/* Miscelaneous hardware related defines: */ 490/* Miscelaneous hardware related defines: */
489#define NV_PCI_REGSZ_VER1 0x270 491#define NV_PCI_REGSZ_VER1 0x270
490#define NV_PCI_REGSZ_VER2 0x604 492#define NV_PCI_REGSZ_VER2 0x2d4
493#define NV_PCI_REGSZ_VER3 0x604
491 494
492/* various timeout delays: all in usec */ 495/* various timeout delays: all in usec */
493#define NV_TXRX_RESET_DELAY 4 496#define NV_TXRX_RESET_DELAY 4
@@ -518,12 +521,6 @@ union ring_type {
518#define TX_RING_MIN 64 521#define TX_RING_MIN 64
519#define RING_MAX_DESC_VER_1 1024 522#define RING_MAX_DESC_VER_1 1024
520#define RING_MAX_DESC_VER_2_3 16384 523#define RING_MAX_DESC_VER_2_3 16384
521/*
522 * Difference between the get and put pointers for the tx ring.
523 * This is used to throttle the amount of data outstanding in the
524 * tx ring.
525 */
526#define TX_LIMIT_DIFFERENCE 1
527 524
528/* rx/tx mac addr + type + vlan + align + slack*/ 525/* rx/tx mac addr + type + vlan + align + slack*/
529#define NV_RX_HEADERS (64) 526#define NV_RX_HEADERS (64)
@@ -611,9 +608,6 @@ static const struct nv_ethtool_str nv_estats_str[] = {
611 { "tx_carrier_errors" }, 608 { "tx_carrier_errors" },
612 { "tx_excess_deferral" }, 609 { "tx_excess_deferral" },
613 { "tx_retry_error" }, 610 { "tx_retry_error" },
614 { "tx_deferral" },
615 { "tx_packets" },
616 { "tx_pause" },
617 { "rx_frame_error" }, 611 { "rx_frame_error" },
618 { "rx_extra_byte" }, 612 { "rx_extra_byte" },
619 { "rx_late_collision" }, 613 { "rx_late_collision" },
@@ -626,11 +620,17 @@ static const struct nv_ethtool_str nv_estats_str[] = {
626 { "rx_unicast" }, 620 { "rx_unicast" },
627 { "rx_multicast" }, 621 { "rx_multicast" },
628 { "rx_broadcast" }, 622 { "rx_broadcast" },
623 { "rx_packets" },
624 { "rx_errors_total" },
625 { "tx_errors_total" },
626
627 /* version 2 stats */
628 { "tx_deferral" },
629 { "tx_packets" },
629 { "rx_bytes" }, 630 { "rx_bytes" },
631 { "tx_pause" },
630 { "rx_pause" }, 632 { "rx_pause" },
631 { "rx_drop_frame" }, 633 { "rx_drop_frame" }
632 { "rx_packets" },
633 { "rx_errors_total" }
634}; 634};
635 635
636struct nv_ethtool_stats { 636struct nv_ethtool_stats {
@@ -643,9 +643,6 @@ struct nv_ethtool_stats {
643 u64 tx_carrier_errors; 643 u64 tx_carrier_errors;
644 u64 tx_excess_deferral; 644 u64 tx_excess_deferral;
645 u64 tx_retry_error; 645 u64 tx_retry_error;
646 u64 tx_deferral;
647 u64 tx_packets;
648 u64 tx_pause;
649 u64 rx_frame_error; 646 u64 rx_frame_error;
650 u64 rx_extra_byte; 647 u64 rx_extra_byte;
651 u64 rx_late_collision; 648 u64 rx_late_collision;
@@ -658,13 +655,22 @@ struct nv_ethtool_stats {
658 u64 rx_unicast; 655 u64 rx_unicast;
659 u64 rx_multicast; 656 u64 rx_multicast;
660 u64 rx_broadcast; 657 u64 rx_broadcast;
658 u64 rx_packets;
659 u64 rx_errors_total;
660 u64 tx_errors_total;
661
662 /* version 2 stats */
663 u64 tx_deferral;
664 u64 tx_packets;
661 u64 rx_bytes; 665 u64 rx_bytes;
666 u64 tx_pause;
662 u64 rx_pause; 667 u64 rx_pause;
663 u64 rx_drop_frame; 668 u64 rx_drop_frame;
664 u64 rx_packets;
665 u64 rx_errors_total;
666}; 669};
667 670
671#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
672#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
673
668/* diagnostics */ 674/* diagnostics */
669#define NV_TEST_COUNT_BASE 3 675#define NV_TEST_COUNT_BASE 3
670#define NV_TEST_COUNT_EXTENDED 4 676#define NV_TEST_COUNT_EXTENDED 4
@@ -691,6 +697,12 @@ static const struct register_test nv_registers_test[] = {
691 { 0,0 } 697 { 0,0 }
692}; 698};
693 699
700struct nv_skb_map {
701 struct sk_buff *skb;
702 dma_addr_t dma;
703 unsigned int dma_len;
704};
705
694/* 706/*
695 * SMP locking: 707 * SMP locking:
696 * All hardware access under dev->priv->lock, except the performance 708 * All hardware access under dev->priv->lock, except the performance
@@ -741,10 +753,12 @@ struct fe_priv {
741 /* rx specific fields. 753 /* rx specific fields.
742 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 754 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
743 */ 755 */
756 union ring_type get_rx, put_rx, first_rx, last_rx;
757 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
758 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
759 struct nv_skb_map *rx_skb;
760
744 union ring_type rx_ring; 761 union ring_type rx_ring;
745 unsigned int cur_rx, refill_rx;
746 struct sk_buff **rx_skbuff;
747 dma_addr_t *rx_dma;
748 unsigned int rx_buf_sz; 762 unsigned int rx_buf_sz;
749 unsigned int pkt_limit; 763 unsigned int pkt_limit;
750 struct timer_list oom_kick; 764 struct timer_list oom_kick;
@@ -761,15 +775,15 @@ struct fe_priv {
761 /* 775 /*
762 * tx specific fields. 776 * tx specific fields.
763 */ 777 */
778 union ring_type get_tx, put_tx, first_tx, last_tx;
779 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
780 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
781 struct nv_skb_map *tx_skb;
782
764 union ring_type tx_ring; 783 union ring_type tx_ring;
765 unsigned int next_tx, nic_tx;
766 struct sk_buff **tx_skbuff;
767 dma_addr_t *tx_dma;
768 unsigned int *tx_dma_len;
769 u32 tx_flags; 784 u32 tx_flags;
770 int tx_ring_size; 785 int tx_ring_size;
771 int tx_limit_start; 786 int tx_stop;
772 int tx_limit_stop;
773 787
774 /* vlan fields */ 788 /* vlan fields */
775 struct vlan_group *vlangrp; 789 struct vlan_group *vlangrp;
@@ -921,16 +935,10 @@ static void free_rings(struct net_device *dev)
921 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 935 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
922 np->rx_ring.ex, np->ring_addr); 936 np->rx_ring.ex, np->ring_addr);
923 } 937 }
924 if (np->rx_skbuff) 938 if (np->rx_skb)
925 kfree(np->rx_skbuff); 939 kfree(np->rx_skb);
926 if (np->rx_dma) 940 if (np->tx_skb)
927 kfree(np->rx_dma); 941 kfree(np->tx_skb);
928 if (np->tx_skbuff)
929 kfree(np->tx_skbuff);
930 if (np->tx_dma)
931 kfree(np->tx_dma);
932 if (np->tx_dma_len)
933 kfree(np->tx_dma_len);
934} 942}
935 943
936static int using_multi_irqs(struct net_device *dev) 944static int using_multi_irqs(struct net_device *dev)
@@ -1279,6 +1287,61 @@ static void nv_mac_reset(struct net_device *dev)
1279 pci_push(base); 1287 pci_push(base);
1280} 1288}
1281 1289
1290static void nv_get_hw_stats(struct net_device *dev)
1291{
1292 struct fe_priv *np = netdev_priv(dev);
1293 u8 __iomem *base = get_hwbase(dev);
1294
1295 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1296 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1297 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1298 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1299 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1300 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1301 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1302 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1303 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1304 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1305 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1306 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1307 np->estats.rx_runt += readl(base + NvRegRxRunt);
1308 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1309 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1310 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1311 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1312 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1313 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1314 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1315 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1316 np->estats.rx_packets =
1317 np->estats.rx_unicast +
1318 np->estats.rx_multicast +
1319 np->estats.rx_broadcast;
1320 np->estats.rx_errors_total =
1321 np->estats.rx_crc_errors +
1322 np->estats.rx_over_errors +
1323 np->estats.rx_frame_error +
1324 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1325 np->estats.rx_late_collision +
1326 np->estats.rx_runt +
1327 np->estats.rx_frame_too_long;
1328 np->estats.tx_errors_total =
1329 np->estats.tx_late_collision +
1330 np->estats.tx_fifo_errors +
1331 np->estats.tx_carrier_errors +
1332 np->estats.tx_excess_deferral +
1333 np->estats.tx_retry_error;
1334
1335 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1336 np->estats.tx_deferral += readl(base + NvRegTxDef);
1337 np->estats.tx_packets += readl(base + NvRegTxFrame);
1338 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1339 np->estats.tx_pause += readl(base + NvRegTxPause);
1340 np->estats.rx_pause += readl(base + NvRegRxPause);
1341 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1342 }
1343}
1344
1282/* 1345/*
1283 * nv_get_stats: dev->get_stats function 1346 * nv_get_stats: dev->get_stats function
1284 * Get latest stats value from the nic. 1347 * Get latest stats value from the nic.
@@ -1289,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1289{ 1352{
1290 struct fe_priv *np = netdev_priv(dev); 1353 struct fe_priv *np = netdev_priv(dev);
1291 1354
1292 /* It seems that the nic always generates interrupts and doesn't 1355 /* If the nic supports hw counters then retrieve latest values */
1293 * accumulate errors internally. Thus the current values in np->stats 1356 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1294 * are already up to date. 1357 nv_get_hw_stats(dev);
1295 */ 1358
1359 /* copy to net_device stats */
1360 np->stats.tx_bytes = np->estats.tx_bytes;
1361 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1362 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1363 np->stats.rx_crc_errors = np->estats.rx_crc_errors;
1364 np->stats.rx_over_errors = np->estats.rx_over_errors;
1365 np->stats.rx_errors = np->estats.rx_errors_total;
1366 np->stats.tx_errors = np->estats.tx_errors_total;
1367 }
1296 return &np->stats; 1368 return &np->stats;
1297} 1369}
1298 1370
@@ -1304,43 +1376,63 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1304static int nv_alloc_rx(struct net_device *dev) 1376static int nv_alloc_rx(struct net_device *dev)
1305{ 1377{
1306 struct fe_priv *np = netdev_priv(dev); 1378 struct fe_priv *np = netdev_priv(dev);
1307 unsigned int refill_rx = np->refill_rx; 1379 struct ring_desc* less_rx;
1308 int nr;
1309 1380
1310 while (np->cur_rx != refill_rx) { 1381 less_rx = np->get_rx.orig;
1311 struct sk_buff *skb; 1382 if (less_rx-- == np->first_rx.orig)
1312 1383 less_rx = np->last_rx.orig;
1313 nr = refill_rx % np->rx_ring_size;
1314 if (np->rx_skbuff[nr] == NULL) {
1315
1316 skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1317 if (!skb)
1318 break;
1319 1384
1385 while (np->put_rx.orig != less_rx) {
1386 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1387 if (skb) {
1320 skb->dev = dev; 1388 skb->dev = dev;
1321 np->rx_skbuff[nr] = skb; 1389 np->put_rx_ctx->skb = skb;
1390 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1391 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1392 np->put_rx_ctx->dma_len = skb->end-skb->data;
1393 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1394 wmb();
1395 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1396 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1397 np->put_rx.orig = np->first_rx.orig;
1398 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1399 np->put_rx_ctx = np->first_rx_ctx;
1322 } else { 1400 } else {
1323 skb = np->rx_skbuff[nr]; 1401 return 1;
1324 } 1402 }
1325 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1403 }
1326 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1404 return 0;
1327 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1405}
1328 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); 1406
1407static int nv_alloc_rx_optimized(struct net_device *dev)
1408{
1409 struct fe_priv *np = netdev_priv(dev);
1410 struct ring_desc_ex* less_rx;
1411
1412 less_rx = np->get_rx.ex;
1413 if (less_rx-- == np->first_rx.ex)
1414 less_rx = np->last_rx.ex;
1415
1416 while (np->put_rx.ex != less_rx) {
1417 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1418 if (skb) {
1419 skb->dev = dev;
1420 np->put_rx_ctx->skb = skb;
1421 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, skb->data,
1422 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1423 np->put_rx_ctx->dma_len = skb->end-skb->data;
1424 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1425 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1329 wmb(); 1426 wmb();
1330 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1427 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1428 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1429 np->put_rx.ex = np->first_rx.ex;
1430 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1431 np->put_rx_ctx = np->first_rx_ctx;
1331 } else { 1432 } else {
1332 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1433 return 1;
1333 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1334 wmb();
1335 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1336 } 1434 }
1337 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1338 dev->name, refill_rx);
1339 refill_rx++;
1340 } 1435 }
1341 np->refill_rx = refill_rx;
1342 if (np->cur_rx - refill_rx == np->rx_ring_size)
1343 return 1;
1344 return 0; 1436 return 0;
1345} 1437}
1346 1438
@@ -1358,6 +1450,7 @@ static void nv_do_rx_refill(unsigned long data)
1358{ 1450{
1359 struct net_device *dev = (struct net_device *) data; 1451 struct net_device *dev = (struct net_device *) data;
1360 struct fe_priv *np = netdev_priv(dev); 1452 struct fe_priv *np = netdev_priv(dev);
1453 int retcode;
1361 1454
1362 if (!using_multi_irqs(dev)) { 1455 if (!using_multi_irqs(dev)) {
1363 if (np->msi_flags & NV_MSI_X_ENABLED) 1456 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -1367,7 +1460,11 @@ static void nv_do_rx_refill(unsigned long data)
1367 } else { 1460 } else {
1368 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1461 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1369 } 1462 }
1370 if (nv_alloc_rx(dev)) { 1463 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1464 retcode = nv_alloc_rx(dev);
1465 else
1466 retcode = nv_alloc_rx_optimized(dev);
1467 if (retcode) {
1371 spin_lock_irq(&np->lock); 1468 spin_lock_irq(&np->lock);
1372 if (!np->in_shutdown) 1469 if (!np->in_shutdown)
1373 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1470 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
@@ -1388,56 +1485,81 @@ static void nv_init_rx(struct net_device *dev)
1388{ 1485{
1389 struct fe_priv *np = netdev_priv(dev); 1486 struct fe_priv *np = netdev_priv(dev);
1390 int i; 1487 int i;
1488 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1490 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1491 else
1492 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1493 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1494 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1391 1495
1392 np->cur_rx = np->rx_ring_size; 1496 for (i = 0; i < np->rx_ring_size; i++) {
1393 np->refill_rx = 0; 1497 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1394 for (i = 0; i < np->rx_ring_size; i++)
1395 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1396 np->rx_ring.orig[i].flaglen = 0; 1498 np->rx_ring.orig[i].flaglen = 0;
1397 else 1499 np->rx_ring.orig[i].buf = 0;
1500 } else {
1398 np->rx_ring.ex[i].flaglen = 0; 1501 np->rx_ring.ex[i].flaglen = 0;
1502 np->rx_ring.ex[i].txvlan = 0;
1503 np->rx_ring.ex[i].bufhigh = 0;
1504 np->rx_ring.ex[i].buflow = 0;
1505 }
1506 np->rx_skb[i].skb = NULL;
1507 np->rx_skb[i].dma = 0;
1508 }
1399} 1509}
1400 1510
1401static void nv_init_tx(struct net_device *dev) 1511static void nv_init_tx(struct net_device *dev)
1402{ 1512{
1403 struct fe_priv *np = netdev_priv(dev); 1513 struct fe_priv *np = netdev_priv(dev);
1404 int i; 1514 int i;
1515 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1517 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1518 else
1519 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1520 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1521 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1405 1522
1406 np->next_tx = np->nic_tx = 0;
1407 for (i = 0; i < np->tx_ring_size; i++) { 1523 for (i = 0; i < np->tx_ring_size; i++) {
1408 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1524 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1409 np->tx_ring.orig[i].flaglen = 0; 1525 np->tx_ring.orig[i].flaglen = 0;
1410 else 1526 np->tx_ring.orig[i].buf = 0;
1527 } else {
1411 np->tx_ring.ex[i].flaglen = 0; 1528 np->tx_ring.ex[i].flaglen = 0;
1412 np->tx_skbuff[i] = NULL; 1529 np->tx_ring.ex[i].txvlan = 0;
1413 np->tx_dma[i] = 0; 1530 np->tx_ring.ex[i].bufhigh = 0;
1531 np->tx_ring.ex[i].buflow = 0;
1532 }
1533 np->tx_skb[i].skb = NULL;
1534 np->tx_skb[i].dma = 0;
1414 } 1535 }
1415} 1536}
1416 1537
1417static int nv_init_ring(struct net_device *dev) 1538static int nv_init_ring(struct net_device *dev)
1418{ 1539{
1540 struct fe_priv *np = netdev_priv(dev);
1541
1419 nv_init_tx(dev); 1542 nv_init_tx(dev);
1420 nv_init_rx(dev); 1543 nv_init_rx(dev);
1421 return nv_alloc_rx(dev); 1544 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1545 return nv_alloc_rx(dev);
1546 else
1547 return nv_alloc_rx_optimized(dev);
1422} 1548}
1423 1549
1424static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) 1550static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
1425{ 1551{
1426 struct fe_priv *np = netdev_priv(dev); 1552 struct fe_priv *np = netdev_priv(dev);
1427 1553
1428 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", 1554 if (tx_skb->dma) {
1429 dev->name, skbnr); 1555 pci_unmap_page(np->pci_dev, tx_skb->dma,
1430 1556 tx_skb->dma_len,
1431 if (np->tx_dma[skbnr]) {
1432 pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
1433 np->tx_dma_len[skbnr],
1434 PCI_DMA_TODEVICE); 1557 PCI_DMA_TODEVICE);
1435 np->tx_dma[skbnr] = 0; 1558 tx_skb->dma = 0;
1436 } 1559 }
1437 1560 if (tx_skb->skb) {
1438 if (np->tx_skbuff[skbnr]) { 1561 dev_kfree_skb_any(tx_skb->skb);
1439 dev_kfree_skb_any(np->tx_skbuff[skbnr]); 1562 tx_skb->skb = NULL;
1440 np->tx_skbuff[skbnr] = NULL;
1441 return 1; 1563 return 1;
1442 } else { 1564 } else {
1443 return 0; 1565 return 0;
@@ -1450,11 +1572,16 @@ static void nv_drain_tx(struct net_device *dev)
1450 unsigned int i; 1572 unsigned int i;
1451 1573
1452 for (i = 0; i < np->tx_ring_size; i++) { 1574 for (i = 0; i < np->tx_ring_size; i++) {
1453 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1575 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1454 np->tx_ring.orig[i].flaglen = 0; 1576 np->tx_ring.orig[i].flaglen = 0;
1455 else 1577 np->tx_ring.orig[i].buf = 0;
1578 } else {
1456 np->tx_ring.ex[i].flaglen = 0; 1579 np->tx_ring.ex[i].flaglen = 0;
1457 if (nv_release_txskb(dev, i)) 1580 np->tx_ring.ex[i].txvlan = 0;
1581 np->tx_ring.ex[i].bufhigh = 0;
1582 np->tx_ring.ex[i].buflow = 0;
1583 }
1584 if (nv_release_txskb(dev, &np->tx_skb[i]))
1458 np->stats.tx_dropped++; 1585 np->stats.tx_dropped++;
1459 } 1586 }
1460} 1587}
@@ -1463,18 +1590,24 @@ static void nv_drain_rx(struct net_device *dev)
1463{ 1590{
1464 struct fe_priv *np = netdev_priv(dev); 1591 struct fe_priv *np = netdev_priv(dev);
1465 int i; 1592 int i;
1593
1466 for (i = 0; i < np->rx_ring_size; i++) { 1594 for (i = 0; i < np->rx_ring_size; i++) {
1467 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1595 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1468 np->rx_ring.orig[i].flaglen = 0; 1596 np->rx_ring.orig[i].flaglen = 0;
1469 else 1597 np->rx_ring.orig[i].buf = 0;
1598 } else {
1470 np->rx_ring.ex[i].flaglen = 0; 1599 np->rx_ring.ex[i].flaglen = 0;
1600 np->rx_ring.ex[i].txvlan = 0;
1601 np->rx_ring.ex[i].bufhigh = 0;
1602 np->rx_ring.ex[i].buflow = 0;
1603 }
1471 wmb(); 1604 wmb();
1472 if (np->rx_skbuff[i]) { 1605 if (np->rx_skb[i].skb) {
1473 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1606 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1474 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 1607 np->rx_skb[i].skb->end-np->rx_skb[i].skb->data,
1475 PCI_DMA_FROMDEVICE); 1608 PCI_DMA_FROMDEVICE);
1476 dev_kfree_skb(np->rx_skbuff[i]); 1609 dev_kfree_skb(np->rx_skb[i].skb);
1477 np->rx_skbuff[i] = NULL; 1610 np->rx_skb[i].skb = NULL;
1478 } 1611 }
1479 } 1612 }
1480} 1613}
@@ -1485,6 +1618,11 @@ static void drain_ring(struct net_device *dev)
1485 nv_drain_rx(dev); 1618 nv_drain_rx(dev);
1486} 1619}
1487 1620
1621static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1622{
1623 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1624}
1625
1488/* 1626/*
1489 * nv_start_xmit: dev->hard_start_xmit function 1627 * nv_start_xmit: dev->hard_start_xmit function
1490 * Called with netif_tx_lock held. 1628 * Called with netif_tx_lock held.
@@ -1495,14 +1633,16 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1495 u32 tx_flags = 0; 1633 u32 tx_flags = 0;
1496 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 1634 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1497 unsigned int fragments = skb_shinfo(skb)->nr_frags; 1635 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1498 unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
1499 unsigned int start_nr = np->next_tx % np->tx_ring_size;
1500 unsigned int i; 1636 unsigned int i;
1501 u32 offset = 0; 1637 u32 offset = 0;
1502 u32 bcnt; 1638 u32 bcnt;
1503 u32 size = skb->len-skb->data_len; 1639 u32 size = skb->len-skb->data_len;
1504 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1640 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1505 u32 tx_flags_vlan = 0; 1641 u32 empty_slots;
1642 struct ring_desc* put_tx;
1643 struct ring_desc* start_tx;
1644 struct ring_desc* prev_tx;
1645 struct nv_skb_map* prev_tx_ctx;
1506 1646
1507 /* add fragments to entries count */ 1647 /* add fragments to entries count */
1508 for (i = 0; i < fragments; i++) { 1648 for (i = 0; i < fragments; i++) {
@@ -1510,34 +1650,35 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1510 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 1650 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1511 } 1651 }
1512 1652
1513 spin_lock_irq(&np->lock); 1653 empty_slots = nv_get_empty_tx_slots(np);
1514 1654 if (unlikely(empty_slots <= entries)) {
1515 if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { 1655 spin_lock_irq(&np->lock);
1516 spin_unlock_irq(&np->lock);
1517 netif_stop_queue(dev); 1656 netif_stop_queue(dev);
1657 np->tx_stop = 1;
1658 spin_unlock_irq(&np->lock);
1518 return NETDEV_TX_BUSY; 1659 return NETDEV_TX_BUSY;
1519 } 1660 }
1520 1661
1662 start_tx = put_tx = np->put_tx.orig;
1663
1521 /* setup the header buffer */ 1664 /* setup the header buffer */
1522 do { 1665 do {
1666 prev_tx = put_tx;
1667 prev_tx_ctx = np->put_tx_ctx;
1523 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1668 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1524 nr = (nr + 1) % np->tx_ring_size; 1669 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1525
1526 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1527 PCI_DMA_TODEVICE); 1670 PCI_DMA_TODEVICE);
1528 np->tx_dma_len[nr] = bcnt; 1671 np->put_tx_ctx->dma_len = bcnt;
1672 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1673 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1529 1674
1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1531 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1532 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1533 } else {
1534 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1535 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1536 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1537 }
1538 tx_flags = np->tx_flags; 1675 tx_flags = np->tx_flags;
1539 offset += bcnt; 1676 offset += bcnt;
1540 size -= bcnt; 1677 size -= bcnt;
1678 if (unlikely(put_tx++ == np->last_tx.orig))
1679 put_tx = np->first_tx.orig;
1680 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1681 np->put_tx_ctx = np->first_tx_ctx;
1541 } while (size); 1682 } while (size);
1542 1683
1543 /* setup the fragments */ 1684 /* setup the fragments */
@@ -1547,58 +1688,174 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547 offset = 0; 1688 offset = 0;
1548 1689
1549 do { 1690 do {
1691 prev_tx = put_tx;
1692 prev_tx_ctx = np->put_tx_ctx;
1550 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; 1693 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1551 nr = (nr + 1) % np->tx_ring_size; 1694 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1552 1695 PCI_DMA_TODEVICE);
1553 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, 1696 np->put_tx_ctx->dma_len = bcnt;
1554 PCI_DMA_TODEVICE); 1697 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1555 np->tx_dma_len[nr] = bcnt; 1698 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1556 1699
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1558 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1559 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1560 } else {
1561 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1562 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1563 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1564 }
1565 offset += bcnt; 1700 offset += bcnt;
1566 size -= bcnt; 1701 size -= bcnt;
1702 if (unlikely(put_tx++ == np->last_tx.orig))
1703 put_tx = np->first_tx.orig;
1704 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1705 np->put_tx_ctx = np->first_tx_ctx;
1567 } while (size); 1706 } while (size);
1568 } 1707 }
1569 1708
1570 /* set last fragment flag */ 1709 /* set last fragment flag */
1571 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1710 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
1572 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1711
1573 } else { 1712 /* save skb in this slot's context area */
1574 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); 1713 prev_tx_ctx->skb = skb;
1714
1715 if (skb_is_gso(skb))
1716 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1717 else
1718 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1719 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1720
1721 spin_lock_irq(&np->lock);
1722
1723 /* set tx flags */
1724 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1725 np->put_tx.orig = put_tx;
1726
1727 spin_unlock_irq(&np->lock);
1728
1729 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1730 dev->name, entries, tx_flags_extra);
1731 {
1732 int j;
1733 for (j=0; j<64; j++) {
1734 if ((j%16) == 0)
1735 dprintk("\n%03x:", j);
1736 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1737 }
1738 dprintk("\n");
1739 }
1740
1741 dev->trans_start = jiffies;
1742 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1743 return NETDEV_TX_OK;
1744}
1745
1746static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1747{
1748 struct fe_priv *np = netdev_priv(dev);
1749 u32 tx_flags = 0;
1750 u32 tx_flags_extra;
1751 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1752 unsigned int i;
1753 u32 offset = 0;
1754 u32 bcnt;
1755 u32 size = skb->len-skb->data_len;
1756 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1757 u32 empty_slots;
1758 struct ring_desc_ex* put_tx;
1759 struct ring_desc_ex* start_tx;
1760 struct ring_desc_ex* prev_tx;
1761 struct nv_skb_map* prev_tx_ctx;
1762
1763 /* add fragments to entries count */
1764 for (i = 0; i < fragments; i++) {
1765 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1766 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1767 }
1768
1769 empty_slots = nv_get_empty_tx_slots(np);
1770 if (unlikely(empty_slots <= entries)) {
1771 spin_lock_irq(&np->lock);
1772 netif_stop_queue(dev);
1773 np->tx_stop = 1;
1774 spin_unlock_irq(&np->lock);
1775 return NETDEV_TX_BUSY;
1776 }
1777
1778 start_tx = put_tx = np->put_tx.ex;
1779
1780 /* setup the header buffer */
1781 do {
1782 prev_tx = put_tx;
1783 prev_tx_ctx = np->put_tx_ctx;
1784 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1785 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1786 PCI_DMA_TODEVICE);
1787 np->put_tx_ctx->dma_len = bcnt;
1788 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1789 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1790 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1791
1792 tx_flags = NV_TX2_VALID;
1793 offset += bcnt;
1794 size -= bcnt;
1795 if (unlikely(put_tx++ == np->last_tx.ex))
1796 put_tx = np->first_tx.ex;
1797 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1798 np->put_tx_ctx = np->first_tx_ctx;
1799 } while (size);
1800
1801 /* setup the fragments */
1802 for (i = 0; i < fragments; i++) {
1803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1804 u32 size = frag->size;
1805 offset = 0;
1806
1807 do {
1808 prev_tx = put_tx;
1809 prev_tx_ctx = np->put_tx_ctx;
1810 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1811 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1812 PCI_DMA_TODEVICE);
1813 np->put_tx_ctx->dma_len = bcnt;
1814 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1815 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1816 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1817
1818 offset += bcnt;
1819 size -= bcnt;
1820 if (unlikely(put_tx++ == np->last_tx.ex))
1821 put_tx = np->first_tx.ex;
1822 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
1823 np->put_tx_ctx = np->first_tx_ctx;
1824 } while (size);
1575 } 1825 }
1576 1826
1577 np->tx_skbuff[nr] = skb; 1827 /* set last fragment flag */
1828 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
1829
1830 /* save skb in this slot's context area */
1831 prev_tx_ctx->skb = skb;
1578 1832
1579#ifdef NETIF_F_TSO
1580 if (skb_is_gso(skb)) 1833 if (skb_is_gso(skb))
1581 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1834 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1582 else 1835 else
1583#endif 1836 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1584 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1585 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 1837 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1586 1838
1587 /* vlan tag */ 1839 /* vlan tag */
1588 if (np->vlangrp && vlan_tx_tag_present(skb)) { 1840 if (likely(!np->vlangrp)) {
1589 tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); 1841 start_tx->txvlan = 0;
1842 } else {
1843 if (vlan_tx_tag_present(skb))
1844 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1845 else
1846 start_tx->txvlan = 0;
1590 } 1847 }
1591 1848
1849 spin_lock_irq(&np->lock);
1850
1592 /* set tx flags */ 1851 /* set tx flags */
1593 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1852 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1594 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); 1853 np->put_tx.ex = put_tx;
1595 } else { 1854
1596 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); 1855 spin_unlock_irq(&np->lock);
1597 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1598 }
1599 1856
1600 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1857 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
1601 dev->name, np->next_tx, entries, tx_flags_extra); 1858 dev->name, entries, tx_flags_extra);
1602 { 1859 {
1603 int j; 1860 int j;
1604 for (j=0; j<64; j++) { 1861 for (j=0; j<64; j++) {
@@ -1609,12 +1866,8 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1609 dprintk("\n"); 1866 dprintk("\n");
1610 } 1867 }
1611 1868
1612 np->next_tx += entries;
1613
1614 dev->trans_start = jiffies; 1869 dev->trans_start = jiffies;
1615 spin_unlock_irq(&np->lock);
1616 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1870 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1617 pci_push(get_hwbase(dev));
1618 return NETDEV_TX_OK; 1871 return NETDEV_TX_OK;
1619} 1872}
1620 1873
@@ -1627,26 +1880,22 @@ static void nv_tx_done(struct net_device *dev)
1627{ 1880{
1628 struct fe_priv *np = netdev_priv(dev); 1881 struct fe_priv *np = netdev_priv(dev);
1629 u32 flags; 1882 u32 flags;
1630 unsigned int i; 1883 struct ring_desc* orig_get_tx = np->get_tx.orig;
1631 struct sk_buff *skb;
1632 1884
1633 while (np->nic_tx != np->next_tx) { 1885 while ((np->get_tx.orig != np->put_tx.orig) &&
1634 i = np->nic_tx % np->tx_ring_size; 1886 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
1635 1887
1636 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1888 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
1637 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); 1889 dev->name, flags);
1638 else 1890
1639 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); 1891 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1892 np->get_tx_ctx->dma_len,
1893 PCI_DMA_TODEVICE);
1894 np->get_tx_ctx->dma = 0;
1640 1895
1641 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1642 dev->name, np->nic_tx, flags);
1643 if (flags & NV_TX_VALID)
1644 break;
1645 if (np->desc_ver == DESC_VER_1) { 1896 if (np->desc_ver == DESC_VER_1) {
1646 if (flags & NV_TX_LASTPACKET) { 1897 if (flags & NV_TX_LASTPACKET) {
1647 skb = np->tx_skbuff[i]; 1898 if (flags & NV_TX_ERROR) {
1648 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1649 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1650 if (flags & NV_TX_UNDERFLOW) 1899 if (flags & NV_TX_UNDERFLOW)
1651 np->stats.tx_fifo_errors++; 1900 np->stats.tx_fifo_errors++;
1652 if (flags & NV_TX_CARRIERLOST) 1901 if (flags & NV_TX_CARRIERLOST)
@@ -1654,14 +1903,14 @@ static void nv_tx_done(struct net_device *dev)
1654 np->stats.tx_errors++; 1903 np->stats.tx_errors++;
1655 } else { 1904 } else {
1656 np->stats.tx_packets++; 1905 np->stats.tx_packets++;
1657 np->stats.tx_bytes += skb->len; 1906 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1658 } 1907 }
1908 dev_kfree_skb_any(np->get_tx_ctx->skb);
1909 np->get_tx_ctx->skb = NULL;
1659 } 1910 }
1660 } else { 1911 } else {
1661 if (flags & NV_TX2_LASTPACKET) { 1912 if (flags & NV_TX2_LASTPACKET) {
1662 skb = np->tx_skbuff[i]; 1913 if (flags & NV_TX2_ERROR) {
1663 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1664 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1665 if (flags & NV_TX2_UNDERFLOW) 1914 if (flags & NV_TX2_UNDERFLOW)
1666 np->stats.tx_fifo_errors++; 1915 np->stats.tx_fifo_errors++;
1667 if (flags & NV_TX2_CARRIERLOST) 1916 if (flags & NV_TX2_CARRIERLOST)
@@ -1669,15 +1918,56 @@ static void nv_tx_done(struct net_device *dev)
1669 np->stats.tx_errors++; 1918 np->stats.tx_errors++;
1670 } else { 1919 } else {
1671 np->stats.tx_packets++; 1920 np->stats.tx_packets++;
1672 np->stats.tx_bytes += skb->len; 1921 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
1673 } 1922 }
1923 dev_kfree_skb_any(np->get_tx_ctx->skb);
1924 np->get_tx_ctx->skb = NULL;
1674 } 1925 }
1675 } 1926 }
1676 nv_release_txskb(dev, i); 1927 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
1677 np->nic_tx++; 1928 np->get_tx.orig = np->first_tx.orig;
1929 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1930 np->get_tx_ctx = np->first_tx_ctx;
1678 } 1931 }
1679 if (np->next_tx - np->nic_tx < np->tx_limit_start) 1932 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
1933 np->tx_stop = 0;
1680 netif_wake_queue(dev); 1934 netif_wake_queue(dev);
1935 }
1936}
1937
1938static void nv_tx_done_optimized(struct net_device *dev, int limit)
1939{
1940 struct fe_priv *np = netdev_priv(dev);
1941 u32 flags;
1942 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
1943
1944 while ((np->get_tx.ex != np->put_tx.ex) &&
1945 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
1946 (limit-- > 0)) {
1947
1948 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
1949 dev->name, flags);
1950
1951 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
1952 np->get_tx_ctx->dma_len,
1953 PCI_DMA_TODEVICE);
1954 np->get_tx_ctx->dma = 0;
1955
1956 if (flags & NV_TX2_LASTPACKET) {
1957 if (!(flags & NV_TX2_ERROR))
1958 np->stats.tx_packets++;
1959 dev_kfree_skb_any(np->get_tx_ctx->skb);
1960 np->get_tx_ctx->skb = NULL;
1961 }
1962 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
1963 np->get_tx.ex = np->first_tx.ex;
1964 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
1965 np->get_tx_ctx = np->first_tx_ctx;
1966 }
1967 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
1968 np->tx_stop = 0;
1969 netif_wake_queue(dev);
1970 }
1681} 1971}
1682 1972
1683/* 1973/*
@@ -1700,9 +1990,8 @@ static void nv_tx_timeout(struct net_device *dev)
1700 { 1990 {
1701 int i; 1991 int i;
1702 1992
1703 printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", 1993 printk(KERN_INFO "%s: Ring at %lx\n",
1704 dev->name, (unsigned long)np->ring_addr, 1994 dev->name, (unsigned long)np->ring_addr);
1705 np->next_tx, np->nic_tx);
1706 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1995 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1707 for (i=0;i<=np->register_size;i+= 32) { 1996 for (i=0;i<=np->register_size;i+= 32) {
1708 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1997 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
@@ -1750,13 +2039,16 @@ static void nv_tx_timeout(struct net_device *dev)
1750 nv_stop_tx(dev); 2039 nv_stop_tx(dev);
1751 2040
1752 /* 2) check that the packets were not sent already: */ 2041 /* 2) check that the packets were not sent already: */
1753 nv_tx_done(dev); 2042 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2043 nv_tx_done(dev);
2044 else
2045 nv_tx_done_optimized(dev, np->tx_ring_size);
1754 2046
1755 /* 3) if there are dead entries: clear everything */ 2047 /* 3) if there are dead entries: clear everything */
1756 if (np->next_tx != np->nic_tx) { 2048 if (np->get_tx_ctx != np->put_tx_ctx) {
1757 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); 2049 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
1758 nv_drain_tx(dev); 2050 nv_drain_tx(dev);
1759 np->next_tx = np->nic_tx = 0; 2051 nv_init_tx(dev);
1760 setup_hw_rings(dev, NV_SETUP_TX_RING); 2052 setup_hw_rings(dev, NV_SETUP_TX_RING);
1761 netif_wake_queue(dev); 2053 netif_wake_queue(dev);
1762 } 2054 }
@@ -1823,40 +2115,27 @@ static int nv_rx_process(struct net_device *dev, int limit)
1823{ 2115{
1824 struct fe_priv *np = netdev_priv(dev); 2116 struct fe_priv *np = netdev_priv(dev);
1825 u32 flags; 2117 u32 flags;
1826 u32 vlanflags = 0; 2118 u32 rx_processed_cnt = 0;
1827 int count; 2119 struct sk_buff *skb;
1828 2120 int len;
1829 for (count = 0; count < limit; ++count) {
1830 struct sk_buff *skb;
1831 int len;
1832 int i;
1833 if (np->cur_rx - np->refill_rx >= np->rx_ring_size)
1834 break; /* we scanned the whole ring - do not continue */
1835
1836 i = np->cur_rx % np->rx_ring_size;
1837 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1838 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1839 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1840 } else {
1841 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1842 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1843 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1844 }
1845 2121
1846 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", 2122 while((np->get_rx.orig != np->put_rx.orig) &&
1847 dev->name, np->cur_rx, flags); 2123 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2124 (rx_processed_cnt++ < limit)) {
1848 2125
1849 if (flags & NV_RX_AVAIL) 2126 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
1850 break; /* still owned by hardware, */ 2127 dev->name, flags);
1851 2128
1852 /* 2129 /*
1853 * the packet is for us - immediately tear down the pci mapping. 2130 * the packet is for us - immediately tear down the pci mapping.
1854 * TODO: check if a prefetch of the first cacheline improves 2131 * TODO: check if a prefetch of the first cacheline improves
1855 * the performance. 2132 * the performance.
1856 */ 2133 */
1857 pci_unmap_single(np->pci_dev, np->rx_dma[i], 2134 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
1858 np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, 2135 np->get_rx_ctx->dma_len,
1859 PCI_DMA_FROMDEVICE); 2136 PCI_DMA_FROMDEVICE);
2137 skb = np->get_rx_ctx->skb;
2138 np->get_rx_ctx->skb = NULL;
1860 2139
1861 { 2140 {
1862 int j; 2141 int j;
@@ -1864,123 +2143,228 @@ static int nv_rx_process(struct net_device *dev, int limit)
1864 for (j=0; j<64; j++) { 2143 for (j=0; j<64; j++) {
1865 if ((j%16) == 0) 2144 if ((j%16) == 0)
1866 dprintk("\n%03x:", j); 2145 dprintk("\n%03x:", j);
1867 dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); 2146 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1868 } 2147 }
1869 dprintk("\n"); 2148 dprintk("\n");
1870 } 2149 }
1871 /* look at what we actually got: */ 2150 /* look at what we actually got: */
1872 if (np->desc_ver == DESC_VER_1) { 2151 if (np->desc_ver == DESC_VER_1) {
1873 if (!(flags & NV_RX_DESCRIPTORVALID)) 2152 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
1874 goto next_pkt; 2153 len = flags & LEN_MASK_V1;
1875 2154 if (unlikely(flags & NV_RX_ERROR)) {
1876 if (flags & NV_RX_ERROR) { 2155 if (flags & NV_RX_ERROR4) {
1877 if (flags & NV_RX_MISSEDFRAME) { 2156 len = nv_getlen(dev, skb->data, len);
1878 np->stats.rx_missed_errors++; 2157 if (len < 0) {
1879 np->stats.rx_errors++; 2158 np->stats.rx_errors++;
1880 goto next_pkt; 2159 dev_kfree_skb(skb);
1881 } 2160 goto next_pkt;
1882 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 2161 }
1883 np->stats.rx_errors++; 2162 }
1884 goto next_pkt; 2163 /* framing errors are soft errors */
1885 } 2164 else if (flags & NV_RX_FRAMINGERR) {
1886 if (flags & NV_RX_CRCERR) { 2165 if (flags & NV_RX_SUBSTRACT1) {
1887 np->stats.rx_crc_errors++; 2166 len--;
1888 np->stats.rx_errors++; 2167 }
1889 goto next_pkt; 2168 }
1890 } 2169 /* the rest are hard errors */
1891 if (flags & NV_RX_OVERFLOW) { 2170 else {
1892 np->stats.rx_over_errors++; 2171 if (flags & NV_RX_MISSEDFRAME)
1893 np->stats.rx_errors++; 2172 np->stats.rx_missed_errors++;
1894 goto next_pkt; 2173 if (flags & NV_RX_CRCERR)
2174 np->stats.rx_crc_errors++;
2175 if (flags & NV_RX_OVERFLOW)
2176 np->stats.rx_over_errors++;
2177 np->stats.rx_errors++;
2178 dev_kfree_skb(skb);
2179 goto next_pkt;
2180 }
1895 } 2181 }
1896 if (flags & NV_RX_ERROR4) { 2182 } else {
1897 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2183 dev_kfree_skb(skb);
1898 if (len < 0) { 2184 goto next_pkt;
2185 }
2186 } else {
2187 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2188 len = flags & LEN_MASK_V2;
2189 if (unlikely(flags & NV_RX2_ERROR)) {
2190 if (flags & NV_RX2_ERROR4) {
2191 len = nv_getlen(dev, skb->data, len);
2192 if (len < 0) {
2193 np->stats.rx_errors++;
2194 dev_kfree_skb(skb);
2195 goto next_pkt;
2196 }
2197 }
2198 /* framing errors are soft errors */
2199 else if (flags & NV_RX2_FRAMINGERR) {
2200 if (flags & NV_RX2_SUBSTRACT1) {
2201 len--;
2202 }
2203 }
2204 /* the rest are hard errors */
2205 else {
2206 if (flags & NV_RX2_CRCERR)
2207 np->stats.rx_crc_errors++;
2208 if (flags & NV_RX2_OVERFLOW)
2209 np->stats.rx_over_errors++;
1899 np->stats.rx_errors++; 2210 np->stats.rx_errors++;
2211 dev_kfree_skb(skb);
1900 goto next_pkt; 2212 goto next_pkt;
1901 } 2213 }
1902 } 2214 }
1903 /* framing errors are soft errors. */ 2215 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
1904 if (flags & NV_RX_FRAMINGERR) { 2216 skb->ip_summed = CHECKSUM_UNNECESSARY;
1905 if (flags & NV_RX_SUBSTRACT1) { 2217 } else {
1906 len--; 2218 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2219 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2220 skb->ip_summed = CHECKSUM_UNNECESSARY;
1907 } 2221 }
1908 } 2222 }
1909 } 2223 } else {
1910 } else { 2224 dev_kfree_skb(skb);
1911 if (!(flags & NV_RX2_DESCRIPTORVALID))
1912 goto next_pkt; 2225 goto next_pkt;
2226 }
2227 }
2228 /* got a valid packet - forward it to the network core */
2229 skb_put(skb, len);
2230 skb->protocol = eth_type_trans(skb, dev);
2231 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2232 dev->name, len, skb->protocol);
2233#ifdef CONFIG_FORCEDETH_NAPI
2234 netif_receive_skb(skb);
2235#else
2236 netif_rx(skb);
2237#endif
2238 dev->last_rx = jiffies;
2239 np->stats.rx_packets++;
2240 np->stats.rx_bytes += len;
2241next_pkt:
2242 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2243 np->get_rx.orig = np->first_rx.orig;
2244 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2245 np->get_rx_ctx = np->first_rx_ctx;
2246 }
1913 2247
1914 if (flags & NV_RX2_ERROR) { 2248 return rx_processed_cnt;
1915 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 2249}
1916 np->stats.rx_errors++; 2250
1917 goto next_pkt; 2251static int nv_rx_process_optimized(struct net_device *dev, int limit)
1918 } 2252{
1919 if (flags & NV_RX2_CRCERR) { 2253 struct fe_priv *np = netdev_priv(dev);
1920 np->stats.rx_crc_errors++; 2254 u32 flags;
1921 np->stats.rx_errors++; 2255 u32 vlanflags = 0;
1922 goto next_pkt; 2256 u32 rx_processed_cnt = 0;
1923 } 2257 struct sk_buff *skb;
1924 if (flags & NV_RX2_OVERFLOW) { 2258 int len;
1925 np->stats.rx_over_errors++; 2259
1926 np->stats.rx_errors++; 2260 while((np->get_rx.ex != np->put_rx.ex) &&
1927 goto next_pkt; 2261 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
1928 } 2262 (rx_processed_cnt++ < limit)) {
2263
2264 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2265 dev->name, flags);
2266
2267 /*
2268 * the packet is for us - immediately tear down the pci mapping.
2269 * TODO: check if a prefetch of the first cacheline improves
2270 * the performance.
2271 */
2272 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2273 np->get_rx_ctx->dma_len,
2274 PCI_DMA_FROMDEVICE);
2275 skb = np->get_rx_ctx->skb;
2276 np->get_rx_ctx->skb = NULL;
2277
2278 {
2279 int j;
2280 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2281 for (j=0; j<64; j++) {
2282 if ((j%16) == 0)
2283 dprintk("\n%03x:", j);
2284 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2285 }
2286 dprintk("\n");
2287 }
2288 /* look at what we actually got: */
2289 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2290 len = flags & LEN_MASK_V2;
2291 if (unlikely(flags & NV_RX2_ERROR)) {
1929 if (flags & NV_RX2_ERROR4) { 2292 if (flags & NV_RX2_ERROR4) {
1930 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 2293 len = nv_getlen(dev, skb->data, len);
1931 if (len < 0) { 2294 if (len < 0) {
1932 np->stats.rx_errors++; 2295 dev_kfree_skb(skb);
1933 goto next_pkt; 2296 goto next_pkt;
1934 } 2297 }
1935 } 2298 }
1936 /* framing errors are soft errors */ 2299 /* framing errors are soft errors */
1937 if (flags & NV_RX2_FRAMINGERR) { 2300 else if (flags & NV_RX2_FRAMINGERR) {
1938 if (flags & NV_RX2_SUBSTRACT1) { 2301 if (flags & NV_RX2_SUBSTRACT1) {
1939 len--; 2302 len--;
1940 } 2303 }
1941 } 2304 }
2305 /* the rest are hard errors */
2306 else {
2307 dev_kfree_skb(skb);
2308 goto next_pkt;
2309 }
1942 } 2310 }
1943 if (np->rx_csum) { 2311
1944 flags &= NV_RX2_CHECKSUMMASK; 2312 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
1945 if (flags == NV_RX2_CHECKSUMOK1 || 2313 skb->ip_summed = CHECKSUM_UNNECESSARY;
1946 flags == NV_RX2_CHECKSUMOK2 || 2314 } else {
1947 flags == NV_RX2_CHECKSUMOK3) { 2315 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
1948 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 2316 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
1949 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 2317 skb->ip_summed = CHECKSUM_UNNECESSARY;
1950 } else {
1951 dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
1952 } 2318 }
1953 } 2319 }
1954 }
1955 /* got a valid packet - forward it to the network core */
1956 skb = np->rx_skbuff[i];
1957 np->rx_skbuff[i] = NULL;
1958 2320
1959 skb_put(skb, len); 2321 /* got a valid packet - forward it to the network core */
1960 skb->protocol = eth_type_trans(skb, dev); 2322 skb_put(skb, len);
1961 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 2323 skb->protocol = eth_type_trans(skb, dev);
1962 dev->name, np->cur_rx, len, skb->protocol); 2324 prefetch(skb->data);
2325
2326 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2327 dev->name, len, skb->protocol);
2328
2329 if (likely(!np->vlangrp)) {
1963#ifdef CONFIG_FORCEDETH_NAPI 2330#ifdef CONFIG_FORCEDETH_NAPI
1964 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2331 netif_receive_skb(skb);
1965 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1966 vlanflags & NV_RX3_VLAN_TAG_MASK);
1967 else
1968 netif_receive_skb(skb);
1969#else 2332#else
1970 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) 2333 netif_rx(skb);
1971 vlan_hwaccel_rx(skb, np->vlangrp,
1972 vlanflags & NV_RX3_VLAN_TAG_MASK);
1973 else
1974 netif_rx(skb);
1975#endif 2334#endif
1976 dev->last_rx = jiffies; 2335 } else {
1977 np->stats.rx_packets++; 2336 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
1978 np->stats.rx_bytes += len; 2337 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2338#ifdef CONFIG_FORCEDETH_NAPI
2339 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2340 vlanflags & NV_RX3_VLAN_TAG_MASK);
2341#else
2342 vlan_hwaccel_rx(skb, np->vlangrp,
2343 vlanflags & NV_RX3_VLAN_TAG_MASK);
2344#endif
2345 } else {
2346#ifdef CONFIG_FORCEDETH_NAPI
2347 netif_receive_skb(skb);
2348#else
2349 netif_rx(skb);
2350#endif
2351 }
2352 }
2353
2354 dev->last_rx = jiffies;
2355 np->stats.rx_packets++;
2356 np->stats.rx_bytes += len;
2357 } else {
2358 dev_kfree_skb(skb);
2359 }
1979next_pkt: 2360next_pkt:
1980 np->cur_rx++; 2361 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2362 np->get_rx.ex = np->first_rx.ex;
2363 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2364 np->get_rx_ctx = np->first_rx_ctx;
1981 } 2365 }
1982 2366
1983 return count; 2367 return rx_processed_cnt;
1984} 2368}
1985 2369
1986static void set_bufsize(struct net_device *dev) 2370static void set_bufsize(struct net_device *dev)
@@ -2456,7 +2840,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2456 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2840 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2457 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 2841 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2458 } 2842 }
2459 pci_push(base);
2460 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 2843 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2461 if (!(events & np->irqmask)) 2844 if (!(events & np->irqmask))
2462 break; 2845 break;
@@ -2465,22 +2848,46 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2465 nv_tx_done(dev); 2848 nv_tx_done(dev);
2466 spin_unlock(&np->lock); 2849 spin_unlock(&np->lock);
2467 2850
2468 if (events & NVREG_IRQ_LINK) { 2851#ifdef CONFIG_FORCEDETH_NAPI
2852 if (events & NVREG_IRQ_RX_ALL) {
2853 netif_rx_schedule(dev);
2854
2855 /* Disable furthur receive irq's */
2856 spin_lock(&np->lock);
2857 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2858
2859 if (np->msi_flags & NV_MSI_X_ENABLED)
2860 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2861 else
2862 writel(np->irqmask, base + NvRegIrqMask);
2863 spin_unlock(&np->lock);
2864 }
2865#else
2866 if (nv_rx_process(dev, dev->weight)) {
2867 if (unlikely(nv_alloc_rx(dev))) {
2868 spin_lock(&np->lock);
2869 if (!np->in_shutdown)
2870 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2871 spin_unlock(&np->lock);
2872 }
2873 }
2874#endif
2875 if (unlikely(events & NVREG_IRQ_LINK)) {
2469 spin_lock(&np->lock); 2876 spin_lock(&np->lock);
2470 nv_link_irq(dev); 2877 nv_link_irq(dev);
2471 spin_unlock(&np->lock); 2878 spin_unlock(&np->lock);
2472 } 2879 }
2473 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2880 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2474 spin_lock(&np->lock); 2881 spin_lock(&np->lock);
2475 nv_linkchange(dev); 2882 nv_linkchange(dev);
2476 spin_unlock(&np->lock); 2883 spin_unlock(&np->lock);
2477 np->link_timeout = jiffies + LINK_TIMEOUT; 2884 np->link_timeout = jiffies + LINK_TIMEOUT;
2478 } 2885 }
2479 if (events & (NVREG_IRQ_TX_ERR)) { 2886 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2480 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2887 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2481 dev->name, events); 2888 dev->name, events);
2482 } 2889 }
2483 if (events & (NVREG_IRQ_UNKNOWN)) { 2890 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
2484 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2891 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2485 dev->name, events); 2892 dev->name, events);
2486 } 2893 }
@@ -2501,6 +2908,63 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2501 spin_unlock(&np->lock); 2908 spin_unlock(&np->lock);
2502 break; 2909 break;
2503 } 2910 }
2911 if (unlikely(i > max_interrupt_work)) {
2912 spin_lock(&np->lock);
2913 /* disable interrupts on the nic */
2914 if (!(np->msi_flags & NV_MSI_X_ENABLED))
2915 writel(0, base + NvRegIrqMask);
2916 else
2917 writel(np->irqmask, base + NvRegIrqMask);
2918 pci_push(base);
2919
2920 if (!np->in_shutdown) {
2921 np->nic_poll_irq = np->irqmask;
2922 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2923 }
2924 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
2925 spin_unlock(&np->lock);
2926 break;
2927 }
2928
2929 }
2930 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
2931
2932 return IRQ_RETVAL(i);
2933}
2934
2935#define TX_WORK_PER_LOOP 64
2936#define RX_WORK_PER_LOOP 64
2937/**
2938 * All _optimized functions are used to help increase performance
2939 * (reduce CPU and increase throughput). They use descripter version 3,
2940 * compiler directives, and reduce memory accesses.
2941 */
2942static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
2943{
2944 struct net_device *dev = (struct net_device *) data;
2945 struct fe_priv *np = netdev_priv(dev);
2946 u8 __iomem *base = get_hwbase(dev);
2947 u32 events;
2948 int i;
2949
2950 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
2951
2952 for (i=0; ; i++) {
2953 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2954 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2955 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2956 } else {
2957 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2958 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2959 }
2960 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2961 if (!(events & np->irqmask))
2962 break;
2963
2964 spin_lock(&np->lock);
2965 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2966 spin_unlock(&np->lock);
2967
2504#ifdef CONFIG_FORCEDETH_NAPI 2968#ifdef CONFIG_FORCEDETH_NAPI
2505 if (events & NVREG_IRQ_RX_ALL) { 2969 if (events & NVREG_IRQ_RX_ALL) {
2506 netif_rx_schedule(dev); 2970 netif_rx_schedule(dev);
@@ -2516,15 +2980,53 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2516 spin_unlock(&np->lock); 2980 spin_unlock(&np->lock);
2517 } 2981 }
2518#else 2982#else
2519 nv_rx_process(dev, dev->weight); 2983 if (nv_rx_process_optimized(dev, dev->weight)) {
2520 if (nv_alloc_rx(dev)) { 2984 if (unlikely(nv_alloc_rx_optimized(dev))) {
2985 spin_lock(&np->lock);
2986 if (!np->in_shutdown)
2987 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2988 spin_unlock(&np->lock);
2989 }
2990 }
2991#endif
2992 if (unlikely(events & NVREG_IRQ_LINK)) {
2521 spin_lock(&np->lock); 2993 spin_lock(&np->lock);
2522 if (!np->in_shutdown) 2994 nv_link_irq(dev);
2523 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2524 spin_unlock(&np->lock); 2995 spin_unlock(&np->lock);
2525 } 2996 }
2526#endif 2997 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
2527 if (i > max_interrupt_work) { 2998 spin_lock(&np->lock);
2999 nv_linkchange(dev);
3000 spin_unlock(&np->lock);
3001 np->link_timeout = jiffies + LINK_TIMEOUT;
3002 }
3003 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
3004 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3005 dev->name, events);
3006 }
3007 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
3008 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3009 dev->name, events);
3010 }
3011 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3012 spin_lock(&np->lock);
3013 /* disable interrupts on the nic */
3014 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3015 writel(0, base + NvRegIrqMask);
3016 else
3017 writel(np->irqmask, base + NvRegIrqMask);
3018 pci_push(base);
3019
3020 if (!np->in_shutdown) {
3021 np->nic_poll_irq = np->irqmask;
3022 np->recover_error = 1;
3023 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3024 }
3025 spin_unlock(&np->lock);
3026 break;
3027 }
3028
3029 if (unlikely(i > max_interrupt_work)) {
2528 spin_lock(&np->lock); 3030 spin_lock(&np->lock);
2529 /* disable interrupts on the nic */ 3031 /* disable interrupts on the nic */
2530 if (!(np->msi_flags & NV_MSI_X_ENABLED)) 3032 if (!(np->msi_flags & NV_MSI_X_ENABLED))
@@ -2543,7 +3045,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
2543 } 3045 }
2544 3046
2545 } 3047 }
2546 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3048 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
2547 3049
2548 return IRQ_RETVAL(i); 3050 return IRQ_RETVAL(i);
2549} 3051}
@@ -2562,20 +3064,19 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
2562 for (i=0; ; i++) { 3064 for (i=0; ; i++) {
2563 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3065 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
2564 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3066 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
2565 pci_push(base);
2566 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); 3067 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
2567 if (!(events & np->irqmask)) 3068 if (!(events & np->irqmask))
2568 break; 3069 break;
2569 3070
2570 spin_lock_irqsave(&np->lock, flags); 3071 spin_lock_irqsave(&np->lock, flags);
2571 nv_tx_done(dev); 3072 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
2572 spin_unlock_irqrestore(&np->lock, flags); 3073 spin_unlock_irqrestore(&np->lock, flags);
2573 3074
2574 if (events & (NVREG_IRQ_TX_ERR)) { 3075 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
2575 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 3076 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2576 dev->name, events); 3077 dev->name, events);
2577 } 3078 }
2578 if (i > max_interrupt_work) { 3079 if (unlikely(i > max_interrupt_work)) {
2579 spin_lock_irqsave(&np->lock, flags); 3080 spin_lock_irqsave(&np->lock, flags);
2580 /* disable interrupts on the nic */ 3081 /* disable interrupts on the nic */
2581 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 3082 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
@@ -2604,7 +3105,10 @@ static int nv_napi_poll(struct net_device *dev, int *budget)
2604 u8 __iomem *base = get_hwbase(dev); 3105 u8 __iomem *base = get_hwbase(dev);
2605 unsigned long flags; 3106 unsigned long flags;
2606 3107
2607 pkts = nv_rx_process(dev, limit); 3108 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
3109 pkts = nv_rx_process(dev, limit);
3110 else
3111 pkts = nv_rx_process_optimized(dev, limit);
2608 3112
2609 if (nv_alloc_rx(dev)) { 3113 if (nv_alloc_rx(dev)) {
2610 spin_lock_irqsave(&np->lock, flags); 3114 spin_lock_irqsave(&np->lock, flags);
@@ -2670,20 +3174,20 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
2670 for (i=0; ; i++) { 3174 for (i=0; ; i++) {
2671 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3175 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2672 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3176 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2673 pci_push(base);
2674 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); 3177 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
2675 if (!(events & np->irqmask)) 3178 if (!(events & np->irqmask))
2676 break; 3179 break;
2677 3180
2678 nv_rx_process(dev, dev->weight); 3181 if (nv_rx_process_optimized(dev, dev->weight)) {
2679 if (nv_alloc_rx(dev)) { 3182 if (unlikely(nv_alloc_rx_optimized(dev))) {
2680 spin_lock_irqsave(&np->lock, flags); 3183 spin_lock_irqsave(&np->lock, flags);
2681 if (!np->in_shutdown) 3184 if (!np->in_shutdown)
2682 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 3185 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2683 spin_unlock_irqrestore(&np->lock, flags); 3186 spin_unlock_irqrestore(&np->lock, flags);
3187 }
2684 } 3188 }
2685 3189
2686 if (i > max_interrupt_work) { 3190 if (unlikely(i > max_interrupt_work)) {
2687 spin_lock_irqsave(&np->lock, flags); 3191 spin_lock_irqsave(&np->lock, flags);
2688 /* disable interrupts on the nic */ 3192 /* disable interrupts on the nic */
2689 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3193 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
@@ -2718,11 +3222,15 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2718 for (i=0; ; i++) { 3222 for (i=0; ; i++) {
2719 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3223 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
2720 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3224 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
2721 pci_push(base);
2722 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); 3225 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2723 if (!(events & np->irqmask)) 3226 if (!(events & np->irqmask))
2724 break; 3227 break;
2725 3228
3229 /* check tx in case we reached max loop limit in tx isr */
3230 spin_lock_irqsave(&np->lock, flags);
3231 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3232 spin_unlock_irqrestore(&np->lock, flags);
3233
2726 if (events & NVREG_IRQ_LINK) { 3234 if (events & NVREG_IRQ_LINK) {
2727 spin_lock_irqsave(&np->lock, flags); 3235 spin_lock_irqsave(&np->lock, flags);
2728 nv_link_irq(dev); 3236 nv_link_irq(dev);
@@ -2752,7 +3260,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
2752 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 3260 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2753 dev->name, events); 3261 dev->name, events);
2754 } 3262 }
2755 if (i > max_interrupt_work) { 3263 if (unlikely(i > max_interrupt_work)) {
2756 spin_lock_irqsave(&np->lock, flags); 3264 spin_lock_irqsave(&np->lock, flags);
2757 /* disable interrupts on the nic */ 3265 /* disable interrupts on the nic */
2758 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 3266 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
@@ -2835,6 +3343,16 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2835 u8 __iomem *base = get_hwbase(dev); 3343 u8 __iomem *base = get_hwbase(dev);
2836 int ret = 1; 3344 int ret = 1;
2837 int i; 3345 int i;
3346 irqreturn_t (*handler)(int foo, void *data);
3347
3348 if (intr_test) {
3349 handler = nv_nic_irq_test;
3350 } else {
3351 if (np->desc_ver == DESC_VER_3)
3352 handler = nv_nic_irq_optimized;
3353 else
3354 handler = nv_nic_irq;
3355 }
2838 3356
2839 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3357 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2840 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3358 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
@@ -2872,10 +3390,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2872 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3390 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2873 } else { 3391 } else {
2874 /* Request irq for all interrupts */ 3392 /* Request irq for all interrupts */
2875 if ((!intr_test && 3393 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
2876 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) ||
2877 (intr_test &&
2878 request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2879 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3394 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2880 pci_disable_msix(np->pci_dev); 3395 pci_disable_msix(np->pci_dev);
2881 np->msi_flags &= ~NV_MSI_X_ENABLED; 3396 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -2891,8 +3406,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2891 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3406 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2892 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3407 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2893 np->msi_flags |= NV_MSI_ENABLED; 3408 np->msi_flags |= NV_MSI_ENABLED;
2894 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3409 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
2895 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) {
2896 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3410 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2897 pci_disable_msi(np->pci_dev); 3411 pci_disable_msi(np->pci_dev);
2898 np->msi_flags &= ~NV_MSI_ENABLED; 3412 np->msi_flags &= ~NV_MSI_ENABLED;
@@ -2907,8 +3421,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
2907 } 3421 }
2908 } 3422 }
2909 if (ret != 0) { 3423 if (ret != 0) {
2910 if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || 3424 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
2911 (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0))
2912 goto out_err; 3425 goto out_err;
2913 3426
2914 } 3427 }
@@ -3051,47 +3564,8 @@ static void nv_do_stats_poll(unsigned long data)
3051{ 3564{
3052 struct net_device *dev = (struct net_device *) data; 3565 struct net_device *dev = (struct net_device *) data;
3053 struct fe_priv *np = netdev_priv(dev); 3566 struct fe_priv *np = netdev_priv(dev);
3054 u8 __iomem *base = get_hwbase(dev);
3055 3567
3056 np->estats.tx_bytes += readl(base + NvRegTxCnt); 3568 nv_get_hw_stats(dev);
3057 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
3058 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
3059 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
3060 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
3061 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
3062 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
3063 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
3064 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
3065 np->estats.tx_deferral += readl(base + NvRegTxDef);
3066 np->estats.tx_packets += readl(base + NvRegTxFrame);
3067 np->estats.tx_pause += readl(base + NvRegTxPause);
3068 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
3069 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
3070 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
3071 np->estats.rx_runt += readl(base + NvRegRxRunt);
3072 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
3073 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
3074 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
3075 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
3076 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
3077 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
3078 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
3079 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
3080 np->estats.rx_bytes += readl(base + NvRegRxCnt);
3081 np->estats.rx_pause += readl(base + NvRegRxPause);
3082 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
3083 np->estats.rx_packets =
3084 np->estats.rx_unicast +
3085 np->estats.rx_multicast +
3086 np->estats.rx_broadcast;
3087 np->estats.rx_errors_total =
3088 np->estats.rx_crc_errors +
3089 np->estats.rx_over_errors +
3090 np->estats.rx_frame_error +
3091 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
3092 np->estats.rx_late_collision +
3093 np->estats.rx_runt +
3094 np->estats.rx_frame_too_long;
3095 3569
3096 if (!np->in_shutdown) 3570 if (!np->in_shutdown)
3097 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3571 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
@@ -3465,7 +3939,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3465{ 3939{
3466 struct fe_priv *np = netdev_priv(dev); 3940 struct fe_priv *np = netdev_priv(dev);
3467 u8 __iomem *base = get_hwbase(dev); 3941 u8 __iomem *base = get_hwbase(dev);
3468 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; 3942 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
3469 dma_addr_t ring_addr; 3943 dma_addr_t ring_addr;
3470 3944
3471 if (ring->rx_pending < RX_RING_MIN || 3945 if (ring->rx_pending < RX_RING_MIN ||
@@ -3491,12 +3965,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3491 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 3965 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
3492 &ring_addr); 3966 &ring_addr);
3493 } 3967 }
3494 rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); 3968 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
3495 rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); 3969 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
3496 tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); 3970 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
3497 tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
3498 tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
3499 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3500 /* fall back to old rings */ 3971 /* fall back to old rings */
3501 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3972 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3502 if (rxtx_ring) 3973 if (rxtx_ring)
@@ -3509,14 +3980,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3509 } 3980 }
3510 if (rx_skbuff) 3981 if (rx_skbuff)
3511 kfree(rx_skbuff); 3982 kfree(rx_skbuff);
3512 if (rx_dma)
3513 kfree(rx_dma);
3514 if (tx_skbuff) 3983 if (tx_skbuff)
3515 kfree(tx_skbuff); 3984 kfree(tx_skbuff);
3516 if (tx_dma)
3517 kfree(tx_dma);
3518 if (tx_dma_len)
3519 kfree(tx_dma_len);
3520 goto exit; 3985 goto exit;
3521 } 3986 }
3522 3987
@@ -3538,8 +4003,6 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3538 /* set new values */ 4003 /* set new values */
3539 np->rx_ring_size = ring->rx_pending; 4004 np->rx_ring_size = ring->rx_pending;
3540 np->tx_ring_size = ring->tx_pending; 4005 np->tx_ring_size = ring->tx_pending;
3541 np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE;
3542 np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1;
3543 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4006 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3544 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4007 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
3545 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4008 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
@@ -3547,18 +4010,12 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3547 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4010 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
3548 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4011 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
3549 } 4012 }
3550 np->rx_skbuff = (struct sk_buff**)rx_skbuff; 4013 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
3551 np->rx_dma = (dma_addr_t*)rx_dma; 4014 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
3552 np->tx_skbuff = (struct sk_buff**)tx_skbuff;
3553 np->tx_dma = (dma_addr_t*)tx_dma;
3554 np->tx_dma_len = (unsigned int*)tx_dma_len;
3555 np->ring_addr = ring_addr; 4015 np->ring_addr = ring_addr;
3556 4016
3557 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4017 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
3558 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4018 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
3559 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
3560 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
3561 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
3562 4019
3563 if (netif_running(dev)) { 4020 if (netif_running(dev)) {
3564 /* reinit driver view of the queues */ 4021 /* reinit driver view of the queues */
@@ -3727,8 +4184,10 @@ static int nv_get_stats_count(struct net_device *dev)
3727{ 4184{
3728 struct fe_priv *np = netdev_priv(dev); 4185 struct fe_priv *np = netdev_priv(dev);
3729 4186
3730 if (np->driver_data & DEV_HAS_STATISTICS) 4187 if (np->driver_data & DEV_HAS_STATISTICS_V1)
3731 return sizeof(struct nv_ethtool_stats)/sizeof(u64); 4188 return NV_DEV_STATISTICS_V1_COUNT;
4189 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4190 return NV_DEV_STATISTICS_V2_COUNT;
3732 else 4191 else
3733 return 0; 4192 return 0;
3734} 4193}
@@ -3955,7 +4414,7 @@ static int nv_loopback_test(struct net_device *dev)
3955 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", 4414 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
3956 dev->name, len, pkt_len); 4415 dev->name, len, pkt_len);
3957 } else { 4416 } else {
3958 rx_skb = np->rx_skbuff[0]; 4417 rx_skb = np->rx_skb[0].skb;
3959 for (i = 0; i < pkt_len; i++) { 4418 for (i = 0; i < pkt_len; i++) {
3960 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4419 if (rx_skb->data[i] != (u8)(i & 0xff)) {
3961 ret = 0; 4420 ret = 0;
@@ -4315,7 +4774,7 @@ static int nv_open(struct net_device *dev)
4315 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 4774 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4316 4775
4317 /* start statistics timer */ 4776 /* start statistics timer */
4318 if (np->driver_data & DEV_HAS_STATISTICS) 4777 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
4319 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4778 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4320 4779
4321 spin_unlock_irq(&np->lock); 4780 spin_unlock_irq(&np->lock);
@@ -4412,7 +4871,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4412 if (err < 0) 4871 if (err < 0)
4413 goto out_disable; 4872 goto out_disable;
4414 4873
4415 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) 4874 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
4875 np->register_size = NV_PCI_REGSZ_VER3;
4876 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
4416 np->register_size = NV_PCI_REGSZ_VER2; 4877 np->register_size = NV_PCI_REGSZ_VER2;
4417 else 4878 else
4418 np->register_size = NV_PCI_REGSZ_VER1; 4879 np->register_size = NV_PCI_REGSZ_VER1;
@@ -4475,10 +4936,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4475 np->rx_csum = 1; 4936 np->rx_csum = 1;
4476 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4937 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4477 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4938 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4478#ifdef NETIF_F_TSO
4479 dev->features |= NETIF_F_TSO; 4939 dev->features |= NETIF_F_TSO;
4480#endif 4940 }
4481 }
4482 4941
4483 np->vlanctl_bits = 0; 4942 np->vlanctl_bits = 0;
4484 if (id->driver_data & DEV_HAS_VLAN) { 4943 if (id->driver_data & DEV_HAS_VLAN) {
@@ -4512,8 +4971,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4512 4971
4513 np->rx_ring_size = RX_RING_DEFAULT; 4972 np->rx_ring_size = RX_RING_DEFAULT;
4514 np->tx_ring_size = TX_RING_DEFAULT; 4973 np->tx_ring_size = TX_RING_DEFAULT;
4515 np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE;
4516 np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
4517 4974
4518 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4975 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4519 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 4976 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
@@ -4530,22 +4987,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4530 goto out_unmap; 4987 goto out_unmap;
4531 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4988 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4532 } 4989 }
4533 np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); 4990 np->rx_skb = kmalloc(sizeof(struct nv_skb_map) * np->rx_ring_size, GFP_KERNEL);
4534 np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); 4991 np->tx_skb = kmalloc(sizeof(struct nv_skb_map) * np->tx_ring_size, GFP_KERNEL);
4535 np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); 4992 if (!np->rx_skb || !np->tx_skb)
4536 np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
4537 np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
4538 if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
4539 goto out_freering; 4993 goto out_freering;
4540 memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); 4994 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4541 memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); 4995 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4542 memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
4543 memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
4544 memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
4545 4996
4546 dev->open = nv_open; 4997 dev->open = nv_open;
4547 dev->stop = nv_close; 4998 dev->stop = nv_close;
4548 dev->hard_start_xmit = nv_start_xmit; 4999 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5000 dev->hard_start_xmit = nv_start_xmit;
5001 else
5002 dev->hard_start_xmit = nv_start_xmit_optimized;
4549 dev->get_stats = nv_get_stats; 5003 dev->get_stats = nv_get_stats;
4550 dev->change_mtu = nv_change_mtu; 5004 dev->change_mtu = nv_change_mtu;
4551 dev->set_mac_address = nv_set_mac_address; 5005 dev->set_mac_address = nv_set_mac_address;
@@ -4553,7 +5007,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4553#ifdef CONFIG_NET_POLL_CONTROLLER 5007#ifdef CONFIG_NET_POLL_CONTROLLER
4554 dev->poll_controller = nv_poll_controller; 5008 dev->poll_controller = nv_poll_controller;
4555#endif 5009#endif
4556 dev->weight = 64; 5010 dev->weight = RX_WORK_PER_LOOP;
4557#ifdef CONFIG_FORCEDETH_NAPI 5011#ifdef CONFIG_FORCEDETH_NAPI
4558 dev->poll = nv_napi_poll; 5012 dev->poll = nv_napi_poll;
4559#endif 5013#endif
@@ -4868,83 +5322,83 @@ static struct pci_device_id pci_tbl[] = {
4868 }, 5322 },
4869 { /* CK804 Ethernet Controller */ 5323 { /* CK804 Ethernet Controller */
4870 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 5324 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
4871 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5325 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4872 }, 5326 },
4873 { /* CK804 Ethernet Controller */ 5327 { /* CK804 Ethernet Controller */
4874 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 5328 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
4875 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5329 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4876 }, 5330 },
4877 { /* MCP04 Ethernet Controller */ 5331 { /* MCP04 Ethernet Controller */
4878 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 5332 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
4879 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5333 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4880 }, 5334 },
4881 { /* MCP04 Ethernet Controller */ 5335 { /* MCP04 Ethernet Controller */
4882 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 5336 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
4883 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, 5337 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
4884 }, 5338 },
4885 { /* MCP51 Ethernet Controller */ 5339 { /* MCP51 Ethernet Controller */
4886 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 5340 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
4887 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 5341 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
4888 }, 5342 },
4889 { /* MCP51 Ethernet Controller */ 5343 { /* MCP51 Ethernet Controller */
4890 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 5344 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
4891 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, 5345 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
4892 }, 5346 },
4893 { /* MCP55 Ethernet Controller */ 5347 { /* MCP55 Ethernet Controller */
4894 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5348 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
4895 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5349 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4896 }, 5350 },
4897 { /* MCP55 Ethernet Controller */ 5351 { /* MCP55 Ethernet Controller */
4898 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5352 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
4899 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5353 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4900 }, 5354 },
4901 { /* MCP61 Ethernet Controller */ 5355 { /* MCP61 Ethernet Controller */
4902 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5356 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
4903 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5357 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4904 }, 5358 },
4905 { /* MCP61 Ethernet Controller */ 5359 { /* MCP61 Ethernet Controller */
4906 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5360 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
4907 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5361 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4908 }, 5362 },
4909 { /* MCP61 Ethernet Controller */ 5363 { /* MCP61 Ethernet Controller */
4910 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5364 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
4911 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5365 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4912 }, 5366 },
4913 { /* MCP61 Ethernet Controller */ 5367 { /* MCP61 Ethernet Controller */
4914 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
4915 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4916 }, 5370 },
4917 { /* MCP65 Ethernet Controller */ 5371 { /* MCP65 Ethernet Controller */
4918 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
4919 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4920 }, 5374 },
4921 { /* MCP65 Ethernet Controller */ 5375 { /* MCP65 Ethernet Controller */
4922 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
4923 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4924 }, 5378 },
4925 { /* MCP65 Ethernet Controller */ 5379 { /* MCP65 Ethernet Controller */
4926 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
4927 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4928 }, 5382 },
4929 { /* MCP65 Ethernet Controller */ 5383 { /* MCP65 Ethernet Controller */
4930 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5384 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
4931 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5385 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4932 }, 5386 },
4933 { /* MCP67 Ethernet Controller */ 5387 { /* MCP67 Ethernet Controller */
4934 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5388 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
4935 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5389 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4936 }, 5390 },
4937 { /* MCP67 Ethernet Controller */ 5391 { /* MCP67 Ethernet Controller */
4938 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5392 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
4939 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5393 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4940 }, 5394 },
4941 { /* MCP67 Ethernet Controller */ 5395 { /* MCP67 Ethernet Controller */
4942 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5396 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
4943 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5397 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4944 }, 5398 },
4945 { /* MCP67 Ethernet Controller */ 5399 { /* MCP67 Ethernet Controller */
4946 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5400 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
4947 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5401 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
4948 }, 5402 },
4949 {0,}, 5403 {0,},
4950}; 5404};
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index 92590d8fc24b..569be225cd05 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -9,6 +9,7 @@
9#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
10 10
11#include <linux/fs_enet_pd.h> 11#include <linux/fs_enet_pd.h>
12#include <asm/fs_pd.h>
12 13
13#ifdef CONFIG_CPM1 14#ifdef CONFIG_CPM1
14#include <asm/commproc.h> 15#include <asm/commproc.h>
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6d71bea5e900..0d6943d67096 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -42,8 +42,6 @@
42 42
43#include "gianfar.h" 43#include "gianfar.h"
44 44
45#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
46
47extern void gfar_start(struct net_device *dev); 45extern void gfar_start(struct net_device *dev);
48extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 46extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
49 47
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 896aa02000d7..feb0ada7a025 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -113,7 +113,7 @@ config SCC_TRXECHO
113 113
114config BAYCOM_SER_FDX 114config BAYCOM_SER_FDX
115 tristate "BAYCOM ser12 fullduplex driver for AX.25" 115 tristate "BAYCOM ser12 fullduplex driver for AX.25"
116 depends on AX25 116 depends on AX25 && !S390
117 select CRC_CCITT 117 select CRC_CCITT
118 ---help--- 118 ---help---
119 This is one of two drivers for Baycom style simple amateur radio 119 This is one of two drivers for Baycom style simple amateur radio
@@ -133,7 +133,7 @@ config BAYCOM_SER_FDX
133 133
134config BAYCOM_SER_HDX 134config BAYCOM_SER_HDX
135 tristate "BAYCOM ser12 halfduplex driver for AX.25" 135 tristate "BAYCOM ser12 halfduplex driver for AX.25"
136 depends on AX25 136 depends on AX25 && !S390
137 select CRC_CCITT 137 select CRC_CCITT
138 ---help--- 138 ---help---
139 This is one of two drivers for Baycom style simple amateur radio 139 This is one of two drivers for Baycom style simple amateur radio
@@ -181,7 +181,7 @@ config BAYCOM_EPP
181 181
182config YAM 182config YAM
183 tristate "YAM driver for AX.25" 183 tristate "YAM driver for AX.25"
184 depends on AX25 184 depends on AX25 && !S390
185 help 185 help
186 The YAM is a modem for packet radio which connects to the serial 186 The YAM is a modem for packet radio which connects to the serial
187 port and includes some of the functions of a Terminal Node 187 port and includes some of the functions of a Terminal Node
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 844c136e9920..7dc5185aa2c0 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -3034,7 +3034,7 @@ static int __init hp100_module_init(void)
3034 goto out2; 3034 goto out2;
3035#endif 3035#endif
3036#ifdef CONFIG_PCI 3036#ifdef CONFIG_PCI
3037 err = pci_module_init(&hp100_pci_driver); 3037 err = pci_register_driver(&hp100_pci_driver);
3038 if (err && err != -ENODEV) 3038 if (err && err != -ENODEV)
3039 goto out3; 3039 goto out3;
3040#endif 3040#endif
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 2194b567239f..0e9ba3c3faf7 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1102,7 +1102,7 @@ static struct net_device * __init veth_probe_one(int vlan,
1102 } 1102 }
1103 1103
1104 kobject_init(&port->kobject); 1104 kobject_init(&port->kobject);
1105 port->kobject.parent = &dev->class_dev.kobj; 1105 port->kobject.parent = &dev->dev.kobj;
1106 port->kobject.ktype = &veth_port_ktype; 1106 port->kobject.ktype = &veth_port_ktype;
1107 kobject_set_name(&port->kobject, "veth_port"); 1107 kobject_set_name(&port->kobject, "veth_port");
1108 if (0 != kobject_add(&port->kobject)) 1108 if (0 != kobject_add(&port->kobject))
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index f4aba4355b19..cf30a1059ce0 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -61,9 +61,7 @@
61#include <net/pkt_sched.h> 61#include <net/pkt_sched.h>
62#include <linux/list.h> 62#include <linux/list.h>
63#include <linux/reboot.h> 63#include <linux/reboot.h>
64#ifdef NETIF_F_TSO
65#include <net/checksum.h> 64#include <net/checksum.h>
66#endif
67 65
68#include <linux/ethtool.h> 66#include <linux/ethtool.h>
69#include <linux/if_vlan.h> 67#include <linux/if_vlan.h>
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 82c044d6e08a..d6628bd9590a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -82,10 +82,8 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
82 {"tx_restart_queue", IXGB_STAT(restart_queue) }, 82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
83 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
85#ifdef NETIF_F_TSO
86 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)}, 85 {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
87 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)}, 86 {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
88#endif
89 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)}, 87 {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
90 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)}, 88 {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
91 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)}, 89 {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
@@ -240,7 +238,6 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
240 return 0; 238 return 0;
241} 239}
242 240
243#ifdef NETIF_F_TSO
244static int 241static int
245ixgb_set_tso(struct net_device *netdev, uint32_t data) 242ixgb_set_tso(struct net_device *netdev, uint32_t data)
246{ 243{
@@ -250,7 +247,6 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
250 netdev->features &= ~NETIF_F_TSO; 247 netdev->features &= ~NETIF_F_TSO;
251 return 0; 248 return 0;
252} 249}
253#endif /* NETIF_F_TSO */
254 250
255static uint32_t 251static uint32_t
256ixgb_get_msglevel(struct net_device *netdev) 252ixgb_get_msglevel(struct net_device *netdev)
@@ -722,10 +718,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
722 .set_sg = ethtool_op_set_sg, 718 .set_sg = ethtool_op_set_sg,
723 .get_msglevel = ixgb_get_msglevel, 719 .get_msglevel = ixgb_get_msglevel,
724 .set_msglevel = ixgb_set_msglevel, 720 .set_msglevel = ixgb_set_msglevel,
725#ifdef NETIF_F_TSO
726 .get_tso = ethtool_op_get_tso, 721 .get_tso = ethtool_op_get_tso,
727 .set_tso = ixgb_set_tso, 722 .set_tso = ixgb_set_tso,
728#endif
729 .get_strings = ixgb_get_strings, 723 .get_strings = ixgb_get_strings,
730 .phys_id = ixgb_phys_id, 724 .phys_id = ixgb_phys_id,
731 .get_stats_count = ixgb_get_stats_count, 725 .get_stats_count = ixgb_get_stats_count,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index a083a9189230..0c3682889344 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -456,9 +456,7 @@ ixgb_probe(struct pci_dev *pdev,
456 NETIF_F_HW_VLAN_TX | 456 NETIF_F_HW_VLAN_TX |
457 NETIF_F_HW_VLAN_RX | 457 NETIF_F_HW_VLAN_RX |
458 NETIF_F_HW_VLAN_FILTER; 458 NETIF_F_HW_VLAN_FILTER;
459#ifdef NETIF_F_TSO
460 netdev->features |= NETIF_F_TSO; 459 netdev->features |= NETIF_F_TSO;
461#endif
462#ifdef NETIF_F_LLTX 460#ifdef NETIF_F_LLTX
463 netdev->features |= NETIF_F_LLTX; 461 netdev->features |= NETIF_F_LLTX;
464#endif 462#endif
@@ -1176,7 +1174,6 @@ ixgb_watchdog(unsigned long data)
1176static int 1174static int
1177ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) 1175ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1178{ 1176{
1179#ifdef NETIF_F_TSO
1180 struct ixgb_context_desc *context_desc; 1177 struct ixgb_context_desc *context_desc;
1181 unsigned int i; 1178 unsigned int i;
1182 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1179 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
@@ -1233,7 +1230,6 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1233 1230
1234 return 1; 1231 return 1;
1235 } 1232 }
1236#endif
1237 1233
1238 return 0; 1234 return 0;
1239} 1235}
@@ -1609,7 +1605,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1609 struct pci_dev *pdev = adapter->pdev; 1605 struct pci_dev *pdev = adapter->pdev;
1610 1606
1611 /* Prevent stats update while adapter is being reset */ 1607 /* Prevent stats update while adapter is being reset */
1612 if (pdev->error_state && pdev->error_state != pci_channel_io_normal) 1608 if (pci_channel_offline(pdev))
1613 return; 1609 return;
1614 1610
1615 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1611 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 25b559b5d5ed..e67361e2bf5d 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -27,8 +27,6 @@
27 27
28#include "macb.h" 28#include "macb.h"
29 29
30#define to_net_dev(class) container_of(class, struct net_device, class_dev)
31
32#define RX_BUFFER_SIZE 128 30#define RX_BUFFER_SIZE 128
33#define RX_RING_SIZE 512 31#define RX_RING_SIZE 512
34#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 32#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE)
@@ -945,10 +943,10 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
945 return ret; 943 return ret;
946} 944}
947 945
948static ssize_t macb_mii_show(const struct class_device *cd, char *buf, 946static ssize_t macb_mii_show(const struct device *_dev, char *buf,
949 unsigned long addr) 947 unsigned long addr)
950{ 948{
951 struct net_device *dev = to_net_dev(cd); 949 struct net_device *dev = to_net_dev(_dev);
952 struct macb *bp = netdev_priv(dev); 950 struct macb *bp = netdev_priv(dev);
953 ssize_t ret = -EINVAL; 951 ssize_t ret = -EINVAL;
954 952
@@ -962,11 +960,13 @@ static ssize_t macb_mii_show(const struct class_device *cd, char *buf,
962} 960}
963 961
964#define MII_ENTRY(name, addr) \ 962#define MII_ENTRY(name, addr) \
965static ssize_t show_##name(struct class_device *cd, char *buf) \ 963static ssize_t show_##name(struct device *_dev, \
964 struct device_attribute *attr, \
965 char *buf) \
966{ \ 966{ \
967 return macb_mii_show(cd, buf, addr); \ 967 return macb_mii_show(_dev, buf, addr); \
968} \ 968} \
969static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 969static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
970 970
971MII_ENTRY(bmcr, MII_BMCR); 971MII_ENTRY(bmcr, MII_BMCR);
972MII_ENTRY(bmsr, MII_BMSR); 972MII_ENTRY(bmsr, MII_BMSR);
@@ -977,13 +977,13 @@ MII_ENTRY(lpa, MII_LPA);
977MII_ENTRY(expansion, MII_EXPANSION); 977MII_ENTRY(expansion, MII_EXPANSION);
978 978
979static struct attribute *macb_mii_attrs[] = { 979static struct attribute *macb_mii_attrs[] = {
980 &class_device_attr_bmcr.attr, 980 &dev_attr_bmcr.attr,
981 &class_device_attr_bmsr.attr, 981 &dev_attr_bmsr.attr,
982 &class_device_attr_physid1.attr, 982 &dev_attr_physid1.attr,
983 &class_device_attr_physid2.attr, 983 &dev_attr_physid2.attr,
984 &class_device_attr_advertise.attr, 984 &dev_attr_advertise.attr,
985 &class_device_attr_lpa.attr, 985 &dev_attr_lpa.attr,
986 &class_device_attr_expansion.attr, 986 &dev_attr_expansion.attr,
987 NULL, 987 NULL,
988}; 988};
989 989
@@ -994,17 +994,17 @@ static struct attribute_group macb_mii_group = {
994 994
995static void macb_unregister_sysfs(struct net_device *net) 995static void macb_unregister_sysfs(struct net_device *net)
996{ 996{
997 struct class_device *class_dev = &net->class_dev; 997 struct device *_dev = &net->dev;
998 998
999 sysfs_remove_group(&class_dev->kobj, &macb_mii_group); 999 sysfs_remove_group(&_dev->kobj, &macb_mii_group);
1000} 1000}
1001 1001
1002static int macb_register_sysfs(struct net_device *net) 1002static int macb_register_sysfs(struct net_device *net)
1003{ 1003{
1004 struct class_device *class_dev = &net->class_dev; 1004 struct device *_dev = &net->dev;
1005 int ret; 1005 int ret;
1006 1006
1007 ret = sysfs_create_group(&class_dev->kobj, &macb_mii_group); 1007 ret = sysfs_create_group(&_dev->kobj, &macb_mii_group);
1008 if (ret) 1008 if (ret)
1009 printk(KERN_WARNING 1009 printk(KERN_WARNING
1010 "%s: sysfs mii attribute registration failed: %d\n", 1010 "%s: sysfs mii attribute registration failed: %d\n",
@@ -1046,6 +1046,14 @@ static int __devinit macb_probe(struct platform_device *pdev)
1046 1046
1047 spin_lock_init(&bp->lock); 1047 spin_lock_init(&bp->lock);
1048 1048
1049#if defined(CONFIG_ARCH_AT91)
1050 bp->pclk = clk_get(&pdev->dev, "macb_clk");
1051 if (IS_ERR(bp->pclk)) {
1052 dev_err(&pdev->dev, "failed to get macb_clk\n");
1053 goto err_out_free_dev;
1054 }
1055 clk_enable(bp->pclk);
1056#else
1049 bp->pclk = clk_get(&pdev->dev, "pclk"); 1057 bp->pclk = clk_get(&pdev->dev, "pclk");
1050 if (IS_ERR(bp->pclk)) { 1058 if (IS_ERR(bp->pclk)) {
1051 dev_err(&pdev->dev, "failed to get pclk\n"); 1059 dev_err(&pdev->dev, "failed to get pclk\n");
@@ -1059,6 +1067,7 @@ static int __devinit macb_probe(struct platform_device *pdev)
1059 1067
1060 clk_enable(bp->pclk); 1068 clk_enable(bp->pclk);
1061 clk_enable(bp->hclk); 1069 clk_enable(bp->hclk);
1070#endif
1062 1071
1063 bp->regs = ioremap(regs->start, regs->end - regs->start + 1); 1072 bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
1064 if (!bp->regs) { 1073 if (!bp->regs) {
@@ -1119,9 +1128,17 @@ static int __devinit macb_probe(struct platform_device *pdev)
1119 1128
1120 pdata = pdev->dev.platform_data; 1129 pdata = pdev->dev.platform_data;
1121 if (pdata && pdata->is_rmii) 1130 if (pdata && pdata->is_rmii)
1131#if defined(CONFIG_ARCH_AT91)
1132 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
1133#else
1122 macb_writel(bp, USRIO, 0); 1134 macb_writel(bp, USRIO, 0);
1135#endif
1123 else 1136 else
1137#if defined(CONFIG_ARCH_AT91)
1138 macb_writel(bp, USRIO, MACB_BIT(CLKEN));
1139#else
1124 macb_writel(bp, USRIO, MACB_BIT(MII)); 1140 macb_writel(bp, USRIO, MACB_BIT(MII));
1141#endif
1125 1142
1126 bp->tx_pending = DEF_TX_RING_PENDING; 1143 bp->tx_pending = DEF_TX_RING_PENDING;
1127 1144
@@ -1148,9 +1165,11 @@ err_out_free_irq:
1148err_out_iounmap: 1165err_out_iounmap:
1149 iounmap(bp->regs); 1166 iounmap(bp->regs);
1150err_out_disable_clocks: 1167err_out_disable_clocks:
1168#ifndef CONFIG_ARCH_AT91
1151 clk_disable(bp->hclk); 1169 clk_disable(bp->hclk);
1152 clk_disable(bp->pclk);
1153 clk_put(bp->hclk); 1170 clk_put(bp->hclk);
1171#endif
1172 clk_disable(bp->pclk);
1154err_out_put_pclk: 1173err_out_put_pclk:
1155 clk_put(bp->pclk); 1174 clk_put(bp->pclk);
1156err_out_free_dev: 1175err_out_free_dev:
@@ -1173,9 +1192,11 @@ static int __devexit macb_remove(struct platform_device *pdev)
1173 unregister_netdev(dev); 1192 unregister_netdev(dev);
1174 free_irq(dev->irq, dev); 1193 free_irq(dev->irq, dev);
1175 iounmap(bp->regs); 1194 iounmap(bp->regs);
1195#ifndef CONFIG_ARCH_AT91
1176 clk_disable(bp->hclk); 1196 clk_disable(bp->hclk);
1177 clk_disable(bp->pclk);
1178 clk_put(bp->hclk); 1197 clk_put(bp->hclk);
1198#endif
1199 clk_disable(bp->pclk);
1179 clk_put(bp->pclk); 1200 clk_put(bp->pclk);
1180 free_netdev(dev); 1201 free_netdev(dev);
1181 platform_set_drvdata(pdev, NULL); 1202 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/macb.h b/drivers/net/macb.h
index 27bf0ae0f0bb..b3bb2182edd1 100644
--- a/drivers/net/macb.h
+++ b/drivers/net/macb.h
@@ -200,7 +200,7 @@
200#define MACB_SOF_OFFSET 30 200#define MACB_SOF_OFFSET 30
201#define MACB_SOF_SIZE 2 201#define MACB_SOF_SIZE 2
202 202
203/* Bitfields in USRIO */ 203/* Bitfields in USRIO (AVR32) */
204#define MACB_MII_OFFSET 0 204#define MACB_MII_OFFSET 0
205#define MACB_MII_SIZE 1 205#define MACB_MII_SIZE 1
206#define MACB_EAM_OFFSET 1 206#define MACB_EAM_OFFSET 1
@@ -210,6 +210,12 @@
210#define MACB_TX_PAUSE_ZERO_OFFSET 3 210#define MACB_TX_PAUSE_ZERO_OFFSET 3
211#define MACB_TX_PAUSE_ZERO_SIZE 1 211#define MACB_TX_PAUSE_ZERO_SIZE 1
212 212
213/* Bitfields in USRIO (AT91) */
214#define MACB_RMII_OFFSET 0
215#define MACB_RMII_SIZE 1
216#define MACB_CLKEN_OFFSET 1
217#define MACB_CLKEN_SIZE 1
218
213/* Bitfields in WOL */ 219/* Bitfields in WOL */
214#define MACB_IP_OFFSET 0 220#define MACB_IP_OFFSET 0
215#define MACB_IP_SIZE 16 221#define MACB_IP_SIZE 16
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 2907cfb12ada..9ec24f0d5d68 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/crc32.h> 16#include <linux/crc32.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/bitrev.h>
18#include <asm/prom.h> 19#include <asm/prom.h>
19#include <asm/dbdma.h> 20#include <asm/dbdma.h>
20#include <asm/io.h> 21#include <asm/io.h>
@@ -74,7 +75,6 @@ struct mace_data {
74#define PRIV_BYTES (sizeof(struct mace_data) \ 75#define PRIV_BYTES (sizeof(struct mace_data) \
75 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd)) 76 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
76 77
77static int bitrev(int);
78static int mace_open(struct net_device *dev); 78static int mace_open(struct net_device *dev);
79static int mace_close(struct net_device *dev); 79static int mace_close(struct net_device *dev);
80static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 80static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
@@ -96,18 +96,6 @@ static void __mace_set_address(struct net_device *dev, void *addr);
96 */ 96 */
97static unsigned char *dummy_buf; 97static unsigned char *dummy_buf;
98 98
99/* Bit-reverse one byte of an ethernet hardware address. */
100static inline int
101bitrev(int b)
102{
103 int d = 0, i;
104
105 for (i = 0; i < 8; ++i, b >>= 1)
106 d = (d << 1) | (b & 1);
107 return d;
108}
109
110
111static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 99static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
112{ 100{
113 struct device_node *mace = macio_get_of_node(mdev); 101 struct device_node *mace = macio_get_of_node(mdev);
@@ -173,7 +161,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
173 161
174 rev = addr[0] == 0 && addr[1] == 0xA0; 162 rev = addr[0] == 0 && addr[1] == 0xA0;
175 for (j = 0; j < 6; ++j) { 163 for (j = 0; j < 6; ++j) {
176 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; 164 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
177 } 165 }
178 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | 166 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
179 in_8(&mp->mace->chipid_lo); 167 in_8(&mp->mace->chipid_lo);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 464e4a6f3d5f..5d541e873041 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -22,6 +22,7 @@
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/bitrev.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/irq.h> 28#include <asm/irq.h>
@@ -81,19 +82,6 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id);
81static irqreturn_t mace_dma_intr(int irq, void *dev_id); 82static irqreturn_t mace_dma_intr(int irq, void *dev_id);
82static void mace_tx_timeout(struct net_device *dev); 83static void mace_tx_timeout(struct net_device *dev);
83 84
84/* Bit-reverse one byte of an ethernet hardware address. */
85
86static int bitrev(int b)
87{
88 int d = 0, i;
89
90 for (i = 0; i < 8; ++i, b >>= 1) {
91 d = (d << 1) | (b & 1);
92 }
93
94 return d;
95}
96
97/* 85/*
98 * Load a receive DMA channel with a base address and ring length 86 * Load a receive DMA channel with a base address and ring length
99 */ 87 */
@@ -219,12 +207,12 @@ struct net_device *mace_probe(int unit)
219 addr = (void *)MACE_PROM; 207 addr = (void *)MACE_PROM;
220 208
221 for (j = 0; j < 6; ++j) { 209 for (j = 0; j < 6; ++j) {
222 u8 v=bitrev(addr[j<<4]); 210 u8 v = bitrev8(addr[j<<4]);
223 checksum ^= v; 211 checksum ^= v;
224 dev->dev_addr[j] = v; 212 dev->dev_addr[j] = v;
225 } 213 }
226 for (; j < 8; ++j) { 214 for (; j < 8; ++j) {
227 checksum ^= bitrev(addr[j<<4]); 215 checksum ^= bitrev8(addr[j<<4]);
228 } 216 }
229 217
230 if (checksum != 0xFF) { 218 if (checksum != 0xFF) {
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 393d995f1919..8ca57a0a4c11 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -49,6 +49,7 @@
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/platform_device.h> 50#include <linux/platform_device.h>
51#include <linux/dma-mapping.h> 51#include <linux/dma-mapping.h>
52#include <linux/bitrev.h>
52 53
53#include <asm/bootinfo.h> 54#include <asm/bootinfo.h>
54#include <asm/system.h> 55#include <asm/system.h>
@@ -121,16 +122,12 @@ enum macsonic_type {
121 * For reversing the PROM address 122 * For reversing the PROM address
122 */ 123 */
123 124
124static unsigned char nibbletab[] = {0, 8, 4, 12, 2, 10, 6, 14,
125 1, 9, 5, 13, 3, 11, 7, 15};
126
127static inline void bit_reverse_addr(unsigned char addr[6]) 125static inline void bit_reverse_addr(unsigned char addr[6])
128{ 126{
129 int i; 127 int i;
130 128
131 for(i = 0; i < 6; i++) 129 for(i = 0; i < 6; i++)
132 addr[i] = ((nibbletab[addr[i] & 0xf] << 4) | 130 addr[i] = bitrev8(addr[i]);
133 nibbletab[(addr[i] >> 4) &0xf]);
134} 131}
135 132
136int __init macsonic_init(struct net_device* dev) 133int __init macsonic_init(struct net_device* dev)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b3bf86422734..d98e53efa2ef 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2780,7 +2780,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
2780 .get_link = mv643xx_eth_get_link, 2780 .get_link = mv643xx_eth_get_link,
2781 .get_sg = ethtool_op_get_sg, 2781 .get_sg = ethtool_op_get_sg,
2782 .set_sg = ethtool_op_set_sg, 2782 .set_sg = ethtool_op_set_sg,
2783 .get_strings = mv643xx_get_strings,
2784 .get_stats_count = mv643xx_get_stats_count, 2783 .get_stats_count = mv643xx_get_stats_count,
2785 .get_ethtool_stats = mv643xx_get_ethtool_stats, 2784 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2786 .get_strings = mv643xx_get_strings, 2785 .get_strings = mv643xx_get_strings,
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 61cbd4a60446..030924fb1ab3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1412,10 +1412,8 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
1412 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1412 .set_tx_csum = ethtool_op_set_tx_hw_csum,
1413 .get_sg = ethtool_op_get_sg, 1413 .get_sg = ethtool_op_get_sg,
1414 .set_sg = ethtool_op_set_sg, 1414 .set_sg = ethtool_op_set_sg,
1415#ifdef NETIF_F_TSO
1416 .get_tso = ethtool_op_get_tso, 1415 .get_tso = ethtool_op_get_tso,
1417 .set_tso = ethtool_op_set_tso, 1416 .set_tso = ethtool_op_set_tso,
1418#endif
1419 .get_strings = myri10ge_get_strings, 1417 .get_strings = myri10ge_get_strings,
1420 .get_stats_count = myri10ge_get_stats_count, 1418 .get_stats_count = myri10ge_get_stats_count,
1421 .get_ethtool_stats = myri10ge_get_ethtool_stats, 1419 .get_ethtool_stats = myri10ge_get_ethtool_stats,
@@ -1975,13 +1973,11 @@ again:
1975 mss = 0; 1973 mss = 0;
1976 max_segments = MXGEFW_MAX_SEND_DESC; 1974 max_segments = MXGEFW_MAX_SEND_DESC;
1977 1975
1978#ifdef NETIF_F_TSO
1979 if (skb->len > (dev->mtu + ETH_HLEN)) { 1976 if (skb->len > (dev->mtu + ETH_HLEN)) {
1980 mss = skb_shinfo(skb)->gso_size; 1977 mss = skb_shinfo(skb)->gso_size;
1981 if (mss != 0) 1978 if (mss != 0)
1982 max_segments = MYRI10GE_MAX_SEND_DESC_TSO; 1979 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
1983 } 1980 }
1984#endif /*NETIF_F_TSO */
1985 1981
1986 if ((unlikely(avail < max_segments))) { 1982 if ((unlikely(avail < max_segments))) {
1987 /* we are out of transmit resources */ 1983 /* we are out of transmit resources */
@@ -2013,7 +2009,6 @@ again:
2013 2009
2014 cum_len = 0; 2010 cum_len = 0;
2015 2011
2016#ifdef NETIF_F_TSO
2017 if (mss) { /* TSO */ 2012 if (mss) { /* TSO */
2018 /* this removes any CKSUM flag from before */ 2013 /* this removes any CKSUM flag from before */
2019 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST); 2014 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
@@ -2029,7 +2024,6 @@ again:
2029 * the checksum by parsing the header. */ 2024 * the checksum by parsing the header. */
2030 pseudo_hdr_offset = mss; 2025 pseudo_hdr_offset = mss;
2031 } else 2026 } else
2032#endif /*NETIF_F_TSO */
2033 /* Mark small packets, and pad out tiny packets */ 2027 /* Mark small packets, and pad out tiny packets */
2034 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) { 2028 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2035 flags |= MXGEFW_FLAGS_SMALL; 2029 flags |= MXGEFW_FLAGS_SMALL;
@@ -2097,7 +2091,6 @@ again:
2097 seglen = len; 2091 seglen = len;
2098 flags_next = flags & ~MXGEFW_FLAGS_FIRST; 2092 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2099 cum_len_next = cum_len + seglen; 2093 cum_len_next = cum_len + seglen;
2100#ifdef NETIF_F_TSO
2101 if (mss) { /* TSO */ 2094 if (mss) { /* TSO */
2102 (req - rdma_count)->rdma_count = rdma_count + 1; 2095 (req - rdma_count)->rdma_count = rdma_count + 1;
2103 2096
@@ -2124,7 +2117,6 @@ again:
2124 (small * MXGEFW_FLAGS_SMALL); 2117 (small * MXGEFW_FLAGS_SMALL);
2125 } 2118 }
2126 } 2119 }
2127#endif /* NETIF_F_TSO */
2128 req->addr_high = high_swapped; 2120 req->addr_high = high_swapped;
2129 req->addr_low = htonl(low); 2121 req->addr_low = htonl(low);
2130 req->pseudo_hdr_offset = htons(pseudo_hdr_offset); 2122 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
@@ -2161,14 +2153,12 @@ again:
2161 } 2153 }
2162 2154
2163 (req - rdma_count)->rdma_count = rdma_count; 2155 (req - rdma_count)->rdma_count = rdma_count;
2164#ifdef NETIF_F_TSO
2165 if (mss) 2156 if (mss)
2166 do { 2157 do {
2167 req--; 2158 req--;
2168 req->flags |= MXGEFW_FLAGS_TSO_LAST; 2159 req->flags |= MXGEFW_FLAGS_TSO_LAST;
2169 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | 2160 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
2170 MXGEFW_FLAGS_FIRST))); 2161 MXGEFW_FLAGS_FIRST)));
2171#endif
2172 idx = ((count - 1) + tx->req) & tx->mask; 2162 idx = ((count - 1) + tx->req) & tx->mask;
2173 tx->info[idx].last = 1; 2163 tx->info[idx].last = 1;
2174 if (tx->wc_fifo == NULL) 2164 if (tx->wc_fifo == NULL)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 59324b1693d6..3f3896e98879 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -63,11 +63,14 @@
63 63
64#include "netxen_nic_hw.h" 64#include "netxen_nic_hw.h"
65 65
66#define NETXEN_NIC_BUILD_NO "2"
67#define _NETXEN_NIC_LINUX_MAJOR 3 66#define _NETXEN_NIC_LINUX_MAJOR 3
68#define _NETXEN_NIC_LINUX_MINOR 3 67#define _NETXEN_NIC_LINUX_MINOR 3
69#define _NETXEN_NIC_LINUX_SUBVERSION 3 68#define _NETXEN_NIC_LINUX_SUBVERSION 3
70#define NETXEN_NIC_LINUX_VERSIONID "3.3.3" "-" NETXEN_NIC_BUILD_NO 69#define NETXEN_NIC_LINUX_VERSIONID "3.3.3"
70
71#define NUM_FLASH_SECTORS (64)
72#define FLASH_SECTOR_SIZE (64 * 1024)
73#define FLASH_TOTAL_SIZE (NUM_FLASH_SECTORS * FLASH_SECTOR_SIZE)
71 74
72#define RCV_DESC_RINGSIZE \ 75#define RCV_DESC_RINGSIZE \
73 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count) 76 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
@@ -85,6 +88,7 @@
85#define NETXEN_RCV_PRODUCER_OFFSET 0 88#define NETXEN_RCV_PRODUCER_OFFSET 0
86#define NETXEN_RCV_PEG_DB_ID 2 89#define NETXEN_RCV_PEG_DB_ID 2
87#define NETXEN_HOST_DUMMY_DMA_SIZE 1024 90#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
91#define FLASH_SUCCESS 0
88 92
89#define ADDR_IN_WINDOW1(off) \ 93#define ADDR_IN_WINDOW1(off) \
90 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 94 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
@@ -239,49 +243,39 @@ extern unsigned long long netxen_dma_mask;
239 243
240typedef u32 netxen_ctx_msg; 244typedef u32 netxen_ctx_msg;
241 245
242#define _netxen_set_bits(config_word, start, bits, val) {\
243 unsigned long long mask = (((1ULL << (bits)) - 1) << (start)); \
244 unsigned long long value = (val); \
245 (config_word) &= ~mask; \
246 (config_word) |= (((value) << (start)) & mask); \
247}
248
249#define netxen_set_msg_peg_id(config_word, val) \ 246#define netxen_set_msg_peg_id(config_word, val) \
250 _netxen_set_bits(config_word, 0, 2, val) 247 ((config_word) &= ~3, (config_word) |= val & 3)
251#define netxen_set_msg_privid(config_word) \ 248#define netxen_set_msg_privid(config_word) \
252 set_bit(2, (unsigned long*)&config_word) 249 ((config_word) |= 1 << 2)
253#define netxen_set_msg_count(config_word, val) \ 250#define netxen_set_msg_count(config_word, val) \
254 _netxen_set_bits(config_word, 3, 15, val) 251 ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3)
255#define netxen_set_msg_ctxid(config_word, val) \ 252#define netxen_set_msg_ctxid(config_word, val) \
256 _netxen_set_bits(config_word, 18, 10, val) 253 ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18)
257#define netxen_set_msg_opcode(config_word, val) \ 254#define netxen_set_msg_opcode(config_word, val) \
258 _netxen_set_bits(config_word, 28, 4, val) 255 ((config_word) &= ~(0xf<<24), (config_word) |= (val & 0xf) << 24)
259 256
260struct netxen_rcv_context { 257struct netxen_rcv_context {
261 u32 rcv_ring_addr_lo; 258 __le64 rcv_ring_addr;
262 u32 rcv_ring_addr_hi; 259 __le32 rcv_ring_size;
263 u32 rcv_ring_size; 260 __le32 rsrvd;
264 u32 rsrvd;
265}; 261};
266 262
267struct netxen_ring_ctx { 263struct netxen_ring_ctx {
268 264
269 /* one command ring */ 265 /* one command ring */
270 u64 cmd_consumer_offset; 266 __le64 cmd_consumer_offset;
271 u32 cmd_ring_addr_lo; 267 __le64 cmd_ring_addr;
272 u32 cmd_ring_addr_hi; 268 __le32 cmd_ring_size;
273 u32 cmd_ring_size; 269 __le32 rsrvd;
274 u32 rsrvd;
275 270
276 /* three receive rings */ 271 /* three receive rings */
277 struct netxen_rcv_context rcv_ctx[3]; 272 struct netxen_rcv_context rcv_ctx[3];
278 273
279 /* one status ring */ 274 /* one status ring */
280 u32 sts_ring_addr_lo; 275 __le64 sts_ring_addr;
281 u32 sts_ring_addr_hi; 276 __le32 sts_ring_size;
282 u32 sts_ring_size;
283 277
284 u32 ctx_id; 278 __le32 ctx_id;
285} __attribute__ ((aligned(64))); 279} __attribute__ ((aligned(64)));
286 280
287/* 281/*
@@ -305,81 +299,85 @@ struct netxen_ring_ctx {
305 ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) 299 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
306 300
307#define netxen_set_cmd_desc_flags(cmd_desc, val) \ 301#define netxen_set_cmd_desc_flags(cmd_desc, val) \
308 _netxen_set_bits((cmd_desc)->flags_opcode, 0, 7, val) 302 ((cmd_desc)->flags_opcode &= ~cpu_to_le16(0x7f), \
303 (cmd_desc)->flags_opcode |= cpu_to_le16((val) & 0x7f))
309#define netxen_set_cmd_desc_opcode(cmd_desc, val) \ 304#define netxen_set_cmd_desc_opcode(cmd_desc, val) \
310 _netxen_set_bits((cmd_desc)->flags_opcode, 7, 6, val) 305 ((cmd_desc)->flags_opcode &= ~cpu_to_le16(0x3f<<7), \
306 (cmd_desc)->flags_opcode |= cpu_to_le16((val) & (0x3f<<7)))
311 307
312#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val) \ 308#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val) \
313 _netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 0, 8, val); 309 ((cmd_desc)->num_of_buffers_total_length &= ~cpu_to_le32(0xff), \
310 (cmd_desc)->num_of_buffers_total_length |= cpu_to_le32((val) & 0xff))
314#define netxen_set_cmd_desc_totallength(cmd_desc, val) \ 311#define netxen_set_cmd_desc_totallength(cmd_desc, val) \
315 _netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 8, 24, val); 312 ((cmd_desc)->num_of_buffers_total_length &= cpu_to_le32(0xff), \
313 (cmd_desc)->num_of_buffers_total_length |= cpu_to_le32(val << 24))
316 314
317#define netxen_get_cmd_desc_opcode(cmd_desc) \ 315#define netxen_get_cmd_desc_opcode(cmd_desc) \
318 (((cmd_desc)->flags_opcode >> 7) & 0x003F) 316 ((le16_to_cpu((cmd_desc)->flags_opcode) >> 7) & 0x003F)
319#define netxen_get_cmd_desc_totallength(cmd_desc) \ 317#define netxen_get_cmd_desc_totallength(cmd_desc) \
320 (((cmd_desc)->num_of_buffers_total_length >> 8) & 0x0FFFFFF) 318 (le32_to_cpu((cmd_desc)->num_of_buffers_total_length) >> 8)
321 319
322struct cmd_desc_type0 { 320struct cmd_desc_type0 {
323 u8 tcp_hdr_offset; /* For LSO only */ 321 u8 tcp_hdr_offset; /* For LSO only */
324 u8 ip_hdr_offset; /* For LSO only */ 322 u8 ip_hdr_offset; /* For LSO only */
325 /* Bit pattern: 0-6 flags, 7-12 opcode, 13-15 unused */ 323 /* Bit pattern: 0-6 flags, 7-12 opcode, 13-15 unused */
326 u16 flags_opcode; 324 __le16 flags_opcode;
327 /* Bit pattern: 0-7 total number of segments, 325 /* Bit pattern: 0-7 total number of segments,
328 8-31 Total size of the packet */ 326 8-31 Total size of the packet */
329 u32 num_of_buffers_total_length; 327 __le32 num_of_buffers_total_length;
330 union { 328 union {
331 struct { 329 struct {
332 u32 addr_low_part2; 330 __le32 addr_low_part2;
333 u32 addr_high_part2; 331 __le32 addr_high_part2;
334 }; 332 };
335 u64 addr_buffer2; 333 __le64 addr_buffer2;
336 }; 334 };
337 335
338 u16 reference_handle; /* changed to u16 to add mss */ 336 __le16 reference_handle; /* changed to u16 to add mss */
339 u16 mss; /* passed by NDIS_PACKET for LSO */ 337 __le16 mss; /* passed by NDIS_PACKET for LSO */
340 /* Bit pattern 0-3 port, 0-3 ctx id */ 338 /* Bit pattern 0-3 port, 0-3 ctx id */
341 u8 port_ctxid; 339 u8 port_ctxid;
342 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ 340 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
343 u16 conn_id; /* IPSec offoad only */ 341 __le16 conn_id; /* IPSec offoad only */
344 342
345 union { 343 union {
346 struct { 344 struct {
347 u32 addr_low_part3; 345 __le32 addr_low_part3;
348 u32 addr_high_part3; 346 __le32 addr_high_part3;
349 }; 347 };
350 u64 addr_buffer3; 348 __le64 addr_buffer3;
351 }; 349 };
352 union { 350 union {
353 struct { 351 struct {
354 u32 addr_low_part1; 352 __le32 addr_low_part1;
355 u32 addr_high_part1; 353 __le32 addr_high_part1;
356 }; 354 };
357 u64 addr_buffer1; 355 __le64 addr_buffer1;
358 }; 356 };
359 357
360 u16 buffer1_length; 358 __le16 buffer1_length;
361 u16 buffer2_length; 359 __le16 buffer2_length;
362 u16 buffer3_length; 360 __le16 buffer3_length;
363 u16 buffer4_length; 361 __le16 buffer4_length;
364 362
365 union { 363 union {
366 struct { 364 struct {
367 u32 addr_low_part4; 365 __le32 addr_low_part4;
368 u32 addr_high_part4; 366 __le32 addr_high_part4;
369 }; 367 };
370 u64 addr_buffer4; 368 __le64 addr_buffer4;
371 }; 369 };
372 370
373 u64 unused; 371 __le64 unused;
374 372
375} __attribute__ ((aligned(64))); 373} __attribute__ ((aligned(64)));
376 374
377/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 375/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
378struct rcv_desc { 376struct rcv_desc {
379 u16 reference_handle; 377 __le16 reference_handle;
380 u16 reserved; 378 __le16 reserved;
381 u32 buffer_length; /* allocated buffer length (usually 2K) */ 379 __le32 buffer_length; /* allocated buffer length (usually 2K) */
382 u64 addr_buffer; 380 __le64 addr_buffer;
383}; 381};
384 382
385/* opcode field in status_desc */ 383/* opcode field in status_desc */
@@ -405,36 +403,36 @@ struct rcv_desc {
405 (((status_desc)->lro & 0x80) >> 7) 403 (((status_desc)->lro & 0x80) >> 7)
406 404
407#define netxen_get_sts_port(status_desc) \ 405#define netxen_get_sts_port(status_desc) \
408 ((status_desc)->status_desc_data & 0x0F) 406 (le64_to_cpu((status_desc)->status_desc_data) & 0x0F)
409#define netxen_get_sts_status(status_desc) \ 407#define netxen_get_sts_status(status_desc) \
410 (((status_desc)->status_desc_data >> 4) & 0x0F) 408 ((le64_to_cpu((status_desc)->status_desc_data) >> 4) & 0x0F)
411#define netxen_get_sts_type(status_desc) \ 409#define netxen_get_sts_type(status_desc) \
412 (((status_desc)->status_desc_data >> 8) & 0x0F) 410 ((le64_to_cpu((status_desc)->status_desc_data) >> 8) & 0x0F)
413#define netxen_get_sts_totallength(status_desc) \ 411#define netxen_get_sts_totallength(status_desc) \
414 (((status_desc)->status_desc_data >> 12) & 0xFFFF) 412 ((le64_to_cpu((status_desc)->status_desc_data) >> 12) & 0xFFFF)
415#define netxen_get_sts_refhandle(status_desc) \ 413#define netxen_get_sts_refhandle(status_desc) \
416 (((status_desc)->status_desc_data >> 28) & 0xFFFF) 414 ((le64_to_cpu((status_desc)->status_desc_data) >> 28) & 0xFFFF)
417#define netxen_get_sts_prot(status_desc) \ 415#define netxen_get_sts_prot(status_desc) \
418 (((status_desc)->status_desc_data >> 44) & 0x0F) 416 ((le64_to_cpu((status_desc)->status_desc_data) >> 44) & 0x0F)
419#define netxen_get_sts_owner(status_desc) \ 417#define netxen_get_sts_owner(status_desc) \
420 (((status_desc)->status_desc_data >> 56) & 0x03) 418 ((le64_to_cpu((status_desc)->status_desc_data) >> 56) & 0x03)
421#define netxen_get_sts_opcode(status_desc) \ 419#define netxen_get_sts_opcode(status_desc) \
422 (((status_desc)->status_desc_data >> 58) & 0x03F) 420 ((le64_to_cpu((status_desc)->status_desc_data) >> 58) & 0x03F)
423 421
424#define netxen_clear_sts_owner(status_desc) \ 422#define netxen_clear_sts_owner(status_desc) \
425 ((status_desc)->status_desc_data &= \ 423 ((status_desc)->status_desc_data &= \
426 ~(((unsigned long long)3) << 56 )) 424 ~cpu_to_le64(((unsigned long long)3) << 56 ))
427#define netxen_set_sts_owner(status_desc, val) \ 425#define netxen_set_sts_owner(status_desc, val) \
428 ((status_desc)->status_desc_data |= \ 426 ((status_desc)->status_desc_data |= \
429 (((unsigned long long)((val) & 0x3)) << 56 )) 427 cpu_to_le64(((unsigned long long)((val) & 0x3)) << 56 ))
430 428
431struct status_desc { 429struct status_desc {
432 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length 430 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
433 28-43 reference_handle, 44-47 protocol, 48-52 unused 431 28-43 reference_handle, 44-47 protocol, 48-52 unused
434 53-55 desc_cnt, 56-57 owner, 58-63 opcode 432 53-55 desc_cnt, 56-57 owner, 58-63 opcode
435 */ 433 */
436 u64 status_desc_data; 434 __le64 status_desc_data;
437 u32 hash_value; 435 __le32 hash_value;
438 u8 hash_type; 436 u8 hash_type;
439 u8 msg_type; 437 u8 msg_type;
440 u8 unused; 438 u8 unused;
@@ -1005,9 +1003,9 @@ void netxen_niu_gbe_set_mii_mode(struct netxen_adapter *adapter, int port,
1005void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, int port, 1003void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter, int port,
1006 long enable); 1004 long enable);
1007int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, long reg, 1005int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, long reg,
1008 __le32 * readval); 1006 __u32 * readval);
1009int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long phy, 1007int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, long phy,
1010 long reg, __le32 val); 1008 long reg, __u32 val);
1011 1009
1012/* Functions available from netxen_nic_hw.c */ 1010/* Functions available from netxen_nic_hw.c */
1013int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu); 1011int netxen_nic_set_mtu_xgb(struct netxen_port *port, int new_mtu);
@@ -1034,6 +1032,15 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
1034void netxen_load_firmware(struct netxen_adapter *adapter); 1032void netxen_load_firmware(struct netxen_adapter *adapter);
1035int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1033int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
1036int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); 1034int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
1035int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
1036 u8 *bytes, size_t size);
1037int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
1038 u8 *bytes, size_t size);
1039int netxen_flash_unlock(struct netxen_adapter *adapter);
1040int netxen_backup_crbinit(struct netxen_adapter *adapter);
1041int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
1042int netxen_flash_erase_primary(struct netxen_adapter *adapter);
1043
1037int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data); 1044int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
1038int netxen_rom_se(struct netxen_adapter *adapter, int addr); 1045int netxen_rom_se(struct netxen_adapter *adapter, int addr);
1039int netxen_do_rom_se(struct netxen_adapter *adapter, int addr); 1046int netxen_do_rom_se(struct netxen_adapter *adapter, int addr);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 34044616b3c8..cc0efe213e01 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/delay.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <linux/pci.h> 37#include <linux/pci.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -94,17 +95,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
94 95
95static int netxen_nic_get_eeprom_len(struct net_device *dev) 96static int netxen_nic_get_eeprom_len(struct net_device *dev)
96{ 97{
97 struct netxen_port *port = netdev_priv(dev); 98 return FLASH_TOTAL_SIZE;
98 struct netxen_adapter *adapter = port->adapter;
99 int n;
100
101 if ((netxen_rom_fast_read(adapter, 0, &n) == 0)
102 && (n & NETXEN_ROM_ROUNDUP)) {
103 n &= ~NETXEN_ROM_ROUNDUP;
104 if (n < NETXEN_MAX_EEPROM_LEN)
105 return n;
106 }
107 return 0;
108} 99}
109 100
110static void 101static void
@@ -218,7 +209,7 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
218{ 209{
219 struct netxen_port *port = netdev_priv(dev); 210 struct netxen_port *port = netdev_priv(dev);
220 struct netxen_adapter *adapter = port->adapter; 211 struct netxen_adapter *adapter = port->adapter;
221 __le32 status; 212 __u32 status;
222 213
223 /* read which mode */ 214 /* read which mode */
224 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 215 if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
@@ -226,7 +217,7 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
226 if (adapter->phy_write 217 if (adapter->phy_write
227 && adapter->phy_write(adapter, port->portnum, 218 && adapter->phy_write(adapter, port->portnum,
228 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 219 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
229 (__le32) ecmd->autoneg) != 0) 220 ecmd->autoneg) != 0)
230 return -EIO; 221 return -EIO;
231 else 222 else
232 port->link_autoneg = ecmd->autoneg; 223 port->link_autoneg = ecmd->autoneg;
@@ -279,7 +270,7 @@ static int netxen_nic_get_regs_len(struct net_device *dev)
279} 270}
280 271
281struct netxen_niu_regs { 272struct netxen_niu_regs {
282 __le32 reg[NETXEN_NIC_REGS_COUNT]; 273 __u32 reg[NETXEN_NIC_REGS_COUNT];
283}; 274};
284 275
285static struct netxen_niu_regs niu_registers[] = { 276static struct netxen_niu_regs niu_registers[] = {
@@ -372,7 +363,7 @@ netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
372{ 363{
373 struct netxen_port *port = netdev_priv(dev); 364 struct netxen_port *port = netdev_priv(dev);
374 struct netxen_adapter *adapter = port->adapter; 365 struct netxen_adapter *adapter = port->adapter;
375 __le32 mode, *regs_buff = p; 366 __u32 mode, *regs_buff = p;
376 void __iomem *addr; 367 void __iomem *addr;
377 int i, window; 368 int i, window;
378 369
@@ -415,7 +406,7 @@ static u32 netxen_nic_get_link(struct net_device *dev)
415{ 406{
416 struct netxen_port *port = netdev_priv(dev); 407 struct netxen_port *port = netdev_priv(dev);
417 struct netxen_adapter *adapter = port->adapter; 408 struct netxen_adapter *adapter = port->adapter;
418 __le32 status; 409 __u32 status;
419 410
420 /* read which mode */ 411 /* read which mode */
421 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 412 if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
@@ -440,18 +431,92 @@ netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
440 struct netxen_port *port = netdev_priv(dev); 431 struct netxen_port *port = netdev_priv(dev);
441 struct netxen_adapter *adapter = port->adapter; 432 struct netxen_adapter *adapter = port->adapter;
442 int offset; 433 int offset;
434 int ret;
443 435
444 if (eeprom->len == 0) 436 if (eeprom->len == 0)
445 return -EINVAL; 437 return -EINVAL;
446 438
447 eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16); 439 eeprom->magic = (port->pdev)->vendor | ((port->pdev)->device << 16);
448 for (offset = 0; offset < eeprom->len; offset++) 440 offset = eeprom->offset;
449 if (netxen_rom_fast_read 441
450 (adapter, (8 * offset) + 8, (int *)eeprom->data) == -1) 442 ret = netxen_rom_fast_read_words(adapter, offset, bytes,
451 return -EIO; 443 eeprom->len);
444 if (ret < 0)
445 return ret;
446
452 return 0; 447 return 0;
453} 448}
454 449
450static int
451netxen_nic_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
452 u8 * bytes)
453{
454 struct netxen_port *port = netdev_priv(dev);
455 struct netxen_adapter *adapter = port->adapter;
456 int offset = eeprom->offset;
457 static int flash_start;
458 static int ready_to_flash;
459 int ret;
460
461 if (flash_start == 0) {
462 ret = netxen_flash_unlock(adapter);
463 if (ret < 0) {
464 printk(KERN_ERR "%s: Flash unlock failed.\n",
465 netxen_nic_driver_name);
466 return ret;
467 }
468 printk(KERN_INFO "%s: flash unlocked. \n",
469 netxen_nic_driver_name);
470 ret = netxen_flash_erase_secondary(adapter);
471 if (ret != FLASH_SUCCESS) {
472 printk(KERN_ERR "%s: Flash erase failed.\n",
473 netxen_nic_driver_name);
474 return ret;
475 }
476 printk(KERN_INFO "%s: secondary flash erased successfully.\n",
477 netxen_nic_driver_name);
478 flash_start = 1;
479 return 0;
480 }
481
482 if (offset == BOOTLD_START) {
483 ret = netxen_flash_erase_primary(adapter);
484 if (ret != FLASH_SUCCESS) {
485 printk(KERN_ERR "%s: Flash erase failed.\n",
486 netxen_nic_driver_name);
487 return ret;
488 }
489
490 ret = netxen_rom_se(adapter, USER_START);
491 if (ret != FLASH_SUCCESS)
492 return ret;
493 ret = netxen_rom_se(adapter, FIXED_START);
494 if (ret != FLASH_SUCCESS)
495 return ret;
496
497 printk(KERN_INFO "%s: primary flash erased successfully\n",
498 netxen_nic_driver_name);
499
500 ret = netxen_backup_crbinit(adapter);
501 if (ret != FLASH_SUCCESS) {
502 printk(KERN_ERR "%s: CRBinit backup failed.\n",
503 netxen_nic_driver_name);
504 return ret;
505 }
506 printk(KERN_INFO "%s: CRBinit backup done.\n",
507 netxen_nic_driver_name);
508 ready_to_flash = 1;
509 }
510
511 if (!ready_to_flash) {
512 printk(KERN_ERR "%s: Invalid write sequence, returning...\n",
513 netxen_nic_driver_name);
514 return -EINVAL;
515 }
516
517 return netxen_rom_fast_write_words(adapter, offset, bytes, eeprom->len);
518}
519
455static void 520static void
456netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) 521netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
457{ 522{
@@ -482,13 +547,13 @@ netxen_nic_get_pauseparam(struct net_device *dev,
482{ 547{
483 struct netxen_port *port = netdev_priv(dev); 548 struct netxen_port *port = netdev_priv(dev);
484 struct netxen_adapter *adapter = port->adapter; 549 struct netxen_adapter *adapter = port->adapter;
485 __le32 val; 550 __u32 val;
486 551
487 if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 552 if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
488 /* get flow control settings */ 553 /* get flow control settings */
489 netxen_nic_read_w0(adapter, 554 netxen_nic_read_w0(adapter,
490 NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum), 555 NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
491 (u32 *) & val); 556 &val);
492 pause->rx_pause = netxen_gb_get_rx_flowctl(val); 557 pause->rx_pause = netxen_gb_get_rx_flowctl(val);
493 pause->tx_pause = netxen_gb_get_tx_flowctl(val); 558 pause->tx_pause = netxen_gb_get_tx_flowctl(val);
494 /* get autoneg settings */ 559 /* get autoneg settings */
@@ -502,7 +567,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
502{ 567{
503 struct netxen_port *port = netdev_priv(dev); 568 struct netxen_port *port = netdev_priv(dev);
504 struct netxen_adapter *adapter = port->adapter; 569 struct netxen_adapter *adapter = port->adapter;
505 __le32 val; 570 __u32 val;
506 unsigned int autoneg; 571 unsigned int autoneg;
507 572
508 /* read mode */ 573 /* read mode */
@@ -522,13 +587,13 @@ netxen_nic_set_pauseparam(struct net_device *dev,
522 587
523 netxen_nic_write_w0(adapter, 588 netxen_nic_write_w0(adapter,
524 NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum), 589 NETXEN_NIU_GB_MAC_CONFIG_0(port->portnum),
525 *(u32 *) (&val)); 590 *&val);
526 /* set autoneg */ 591 /* set autoneg */
527 autoneg = pause->autoneg; 592 autoneg = pause->autoneg;
528 if (adapter->phy_write 593 if (adapter->phy_write
529 && adapter->phy_write(adapter, port->portnum, 594 && adapter->phy_write(adapter, port->portnum,
530 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 595 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
531 (__le32) autoneg) != 0) 596 autoneg) != 0)
532 return -EIO; 597 return -EIO;
533 else { 598 else {
534 port->link_autoneg = pause->autoneg; 599 port->link_autoneg = pause->autoneg;
@@ -543,7 +608,7 @@ static int netxen_nic_reg_test(struct net_device *dev)
543 struct netxen_port *port = netdev_priv(dev); 608 struct netxen_port *port = netdev_priv(dev);
544 struct netxen_adapter *adapter = port->adapter; 609 struct netxen_adapter *adapter = port->adapter;
545 u32 data_read, data_written, save; 610 u32 data_read, data_written, save;
546 __le32 mode; 611 __u32 mode;
547 612
548 /* 613 /*
549 * first test the "Read Only" registers by writing which mode 614 * first test the "Read Only" registers by writing which mode
@@ -721,6 +786,7 @@ struct ethtool_ops netxen_nic_ethtool_ops = {
721 .get_link = netxen_nic_get_link, 786 .get_link = netxen_nic_get_link,
722 .get_eeprom_len = netxen_nic_get_eeprom_len, 787 .get_eeprom_len = netxen_nic_get_eeprom_len,
723 .get_eeprom = netxen_nic_get_eeprom, 788 .get_eeprom = netxen_nic_get_eeprom,
789 .set_eeprom = netxen_nic_set_eeprom,
724 .get_ringparam = netxen_nic_get_ringparam, 790 .get_ringparam = netxen_nic_get_ringparam,
725 .get_pauseparam = netxen_nic_get_pauseparam, 791 .get_pauseparam = netxen_nic_get_pauseparam,
726 .set_pauseparam = netxen_nic_set_pauseparam, 792 .set_pauseparam = netxen_nic_set_pauseparam,
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 191e2336e323..f263232f499f 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -95,7 +95,7 @@ void netxen_nic_set_multi(struct net_device *netdev)
95 struct netxen_port *port = netdev_priv(netdev); 95 struct netxen_port *port = netdev_priv(netdev);
96 struct netxen_adapter *adapter = port->adapter; 96 struct netxen_adapter *adapter = port->adapter;
97 struct dev_mc_list *mc_ptr; 97 struct dev_mc_list *mc_ptr;
98 __le32 netxen_mac_addr_cntl_data = 0; 98 __u32 netxen_mac_addr_cntl_data = 0;
99 99
100 mc_ptr = netdev->mc_list; 100 mc_ptr = netdev->mc_list;
101 if (netdev->flags & IFF_PROMISC) { 101 if (netdev->flags & IFF_PROMISC) {
@@ -236,8 +236,9 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
236 } 236 }
237 memset(addr, 0, sizeof(struct netxen_ring_ctx)); 237 memset(addr, 0, sizeof(struct netxen_ring_ctx));
238 adapter->ctx_desc = (struct netxen_ring_ctx *)addr; 238 adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
239 adapter->ctx_desc->cmd_consumer_offset = adapter->ctx_desc_phys_addr 239 adapter->ctx_desc->cmd_consumer_offset =
240 + sizeof(struct netxen_ring_ctx); 240 cpu_to_le64(adapter->ctx_desc_phys_addr +
241 sizeof(struct netxen_ring_ctx));
241 adapter->cmd_consumer = (uint32_t *) (((char *)addr) + 242 adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
242 sizeof(struct netxen_ring_ctx)); 243 sizeof(struct netxen_ring_ctx));
243 244
@@ -253,11 +254,10 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
253 return -ENOMEM; 254 return -ENOMEM;
254 } 255 }
255 256
256 adapter->ctx_desc->cmd_ring_addr_lo = 257 adapter->ctx_desc->cmd_ring_addr =
257 hw->cmd_desc_phys_addr & 0xffffffffUL; 258 cpu_to_le64(hw->cmd_desc_phys_addr);
258 adapter->ctx_desc->cmd_ring_addr_hi = 259 adapter->ctx_desc->cmd_ring_size =
259 ((u64) hw->cmd_desc_phys_addr >> 32); 260 cpu_to_le32(adapter->max_tx_desc_count);
260 adapter->ctx_desc->cmd_ring_size = adapter->max_tx_desc_count;
261 261
262 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; 262 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
263 263
@@ -278,12 +278,10 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
278 return err; 278 return err;
279 } 279 }
280 rcv_desc->desc_head = (struct rcv_desc *)addr; 280 rcv_desc->desc_head = (struct rcv_desc *)addr;
281 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_lo = 281 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
282 rcv_desc->phys_addr & 0xffffffffUL; 282 cpu_to_le64(rcv_desc->phys_addr);
283 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_hi =
284 ((u64) rcv_desc->phys_addr >> 32);
285 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = 283 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
286 rcv_desc->max_rx_desc_count; 284 cpu_to_le32(rcv_desc->max_rx_desc_count);
287 } 285 }
288 286
289 addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE, 287 addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE,
@@ -297,11 +295,10 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
297 return err; 295 return err;
298 } 296 }
299 recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; 297 recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
300 adapter->ctx_desc->sts_ring_addr_lo = 298 adapter->ctx_desc->sts_ring_addr =
301 recv_ctx->rcv_status_desc_phys_addr & 0xffffffffUL; 299 cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
302 adapter->ctx_desc->sts_ring_addr_hi = 300 adapter->ctx_desc->sts_ring_size =
303 ((u64) recv_ctx->rcv_status_desc_phys_addr >> 32); 301 cpu_to_le32(adapter->max_rx_desc_count);
304 adapter->ctx_desc->sts_ring_size = adapter->max_rx_desc_count;
305 302
306 } 303 }
307 /* Window = 1 */ 304 /* Window = 1 */
@@ -387,10 +384,6 @@ void netxen_tso_check(struct netxen_adapter *adapter,
387 } 384 }
388 adapter->stats.xmitcsummed++; 385 adapter->stats.xmitcsummed++;
389 desc->tcp_hdr_offset = skb->h.raw - skb->data; 386 desc->tcp_hdr_offset = skb->h.raw - skb->data;
390 netxen_set_cmd_desc_totallength(desc,
391 cpu_to_le32
392 (netxen_get_cmd_desc_totallength
393 (desc)));
394 desc->ip_hdr_offset = skb->nh.raw - skb->data; 387 desc->ip_hdr_offset = skb->nh.raw - skb->data;
395} 388}
396 389
@@ -867,9 +860,9 @@ netxen_crb_writelit_adapter(struct netxen_adapter *adapter, unsigned long off,
867void netxen_nic_set_link_parameters(struct netxen_port *port) 860void netxen_nic_set_link_parameters(struct netxen_port *port)
868{ 861{
869 struct netxen_adapter *adapter = port->adapter; 862 struct netxen_adapter *adapter = port->adapter;
870 __le32 status; 863 __u32 status;
871 __le32 autoneg; 864 __u32 autoneg;
872 __le32 mode; 865 __u32 mode;
873 866
874 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 867 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
875 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 868 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 0685633a9c1e..ab1112eb1b0d 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -124,28 +124,28 @@ typedef enum {
124 */ 124 */
125 125
126#define netxen_gb_enable_tx(config_word) \ 126#define netxen_gb_enable_tx(config_word) \
127 set_bit(0, (unsigned long*)(&config_word)) 127 ((config_word) |= 1 << 0)
128#define netxen_gb_enable_rx(config_word) \ 128#define netxen_gb_enable_rx(config_word) \
129 set_bit(2, (unsigned long*)(&config_word)) 129 ((config_word) |= 1 << 2)
130#define netxen_gb_tx_flowctl(config_word) \ 130#define netxen_gb_tx_flowctl(config_word) \
131 set_bit(4, (unsigned long*)(&config_word)) 131 ((config_word) |= 1 << 4)
132#define netxen_gb_rx_flowctl(config_word) \ 132#define netxen_gb_rx_flowctl(config_word) \
133 set_bit(5, (unsigned long*)(&config_word)) 133 ((config_word) |= 1 << 5)
134#define netxen_gb_tx_reset_pb(config_word) \ 134#define netxen_gb_tx_reset_pb(config_word) \
135 set_bit(16, (unsigned long*)(&config_word)) 135 ((config_word) |= 1 << 16)
136#define netxen_gb_rx_reset_pb(config_word) \ 136#define netxen_gb_rx_reset_pb(config_word) \
137 set_bit(17, (unsigned long*)(&config_word)) 137 ((config_word) |= 1 << 17)
138#define netxen_gb_tx_reset_mac(config_word) \ 138#define netxen_gb_tx_reset_mac(config_word) \
139 set_bit(18, (unsigned long*)(&config_word)) 139 ((config_word) |= 1 << 18)
140#define netxen_gb_rx_reset_mac(config_word) \ 140#define netxen_gb_rx_reset_mac(config_word) \
141 set_bit(19, (unsigned long*)(&config_word)) 141 ((config_word) |= 1 << 19)
142#define netxen_gb_soft_reset(config_word) \ 142#define netxen_gb_soft_reset(config_word) \
143 set_bit(31, (unsigned long*)(&config_word)) 143 ((config_word) |= 1 << 31)
144 144
145#define netxen_gb_unset_tx_flowctl(config_word) \ 145#define netxen_gb_unset_tx_flowctl(config_word) \
146 clear_bit(4, (unsigned long *)(&config_word)) 146 ((config_word) &= ~(1 << 4))
147#define netxen_gb_unset_rx_flowctl(config_word) \ 147#define netxen_gb_unset_rx_flowctl(config_word) \
148 clear_bit(5, (unsigned long*)(&config_word)) 148 ((config_word) &= ~(1 << 5))
149 149
150#define netxen_gb_get_tx_synced(config_word) \ 150#define netxen_gb_get_tx_synced(config_word) \
151 _netxen_crb_get_bit((config_word), 1) 151 _netxen_crb_get_bit((config_word), 1)
@@ -171,15 +171,15 @@ typedef enum {
171 */ 171 */
172 172
173#define netxen_gb_set_duplex(config_word) \ 173#define netxen_gb_set_duplex(config_word) \
174 set_bit(0, (unsigned long*)&config_word) 174 ((config_word) |= 1 << 0)
175#define netxen_gb_set_crc_enable(config_word) \ 175#define netxen_gb_set_crc_enable(config_word) \
176 set_bit(1, (unsigned long*)&config_word) 176 ((config_word) |= 1 << 1)
177#define netxen_gb_set_padshort(config_word) \ 177#define netxen_gb_set_padshort(config_word) \
178 set_bit(2, (unsigned long*)&config_word) 178 ((config_word) |= 1 << 2)
179#define netxen_gb_set_checklength(config_word) \ 179#define netxen_gb_set_checklength(config_word) \
180 set_bit(4, (unsigned long*)&config_word) 180 ((config_word) |= 1 << 4)
181#define netxen_gb_set_hugeframes(config_word) \ 181#define netxen_gb_set_hugeframes(config_word) \
182 set_bit(5, (unsigned long*)&config_word) 182 ((config_word) |= 1 << 5)
183#define netxen_gb_set_preamblelen(config_word, val) \ 183#define netxen_gb_set_preamblelen(config_word, val) \
184 ((config_word) |= ((val) << 12) & 0xF000) 184 ((config_word) |= ((val) << 12) & 0xF000)
185#define netxen_gb_set_intfmode(config_word, val) \ 185#define netxen_gb_set_intfmode(config_word, val) \
@@ -190,9 +190,9 @@ typedef enum {
190#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \ 190#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \
191 ((config_word) |= ((val) & 0x07)) 191 ((config_word) |= ((val) & 0x07))
192#define netxen_gb_mii_mgmt_reset(config_word) \ 192#define netxen_gb_mii_mgmt_reset(config_word) \
193 set_bit(31, (unsigned long*)&config_word) 193 ((config_word) |= 1 << 31)
194#define netxen_gb_mii_mgmt_unset(config_word) \ 194#define netxen_gb_mii_mgmt_unset(config_word) \
195 clear_bit(31, (unsigned long*)&config_word) 195 ((config_word) &= ~(1 << 31))
196 196
197/* 197/*
198 * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3) 198 * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
@@ -201,7 +201,7 @@ typedef enum {
201 */ 201 */
202 202
203#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \ 203#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \
204 set_bit(0, (unsigned long*)&config_word) 204 ((config_word) |= 1 << 0)
205#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \ 205#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \
206 ((config_word) |= ((val) & 0x1F)) 206 ((config_word) |= ((val) & 0x1F))
207#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \ 207#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \
@@ -274,9 +274,9 @@ typedef enum {
274#define netxen_set_phy_speed(config_word, val) \ 274#define netxen_set_phy_speed(config_word, val) \
275 ((config_word) |= ((val & 0x03) << 14)) 275 ((config_word) |= ((val & 0x03) << 14))
276#define netxen_set_phy_duplex(config_word) \ 276#define netxen_set_phy_duplex(config_word) \
277 set_bit(13, (unsigned long*)&config_word) 277 ((config_word) |= 1 << 13)
278#define netxen_clear_phy_duplex(config_word) \ 278#define netxen_clear_phy_duplex(config_word) \
279 clear_bit(13, (unsigned long*)&config_word) 279 ((config_word) &= ~(1 << 13))
280 280
281#define netxen_get_phy_jabber(config_word) \ 281#define netxen_get_phy_jabber(config_word) \
282 _netxen_crb_get_bit(config_word, 0) 282 _netxen_crb_get_bit(config_word, 0)
@@ -350,11 +350,11 @@ typedef enum {
350 _netxen_crb_get_bit(config_word, 15) 350 _netxen_crb_get_bit(config_word, 15)
351 351
352#define netxen_set_phy_int_link_status_changed(config_word) \ 352#define netxen_set_phy_int_link_status_changed(config_word) \
353 set_bit(10, (unsigned long*)&config_word) 353 ((config_word) |= 1 << 10)
354#define netxen_set_phy_int_autoneg_completed(config_word) \ 354#define netxen_set_phy_int_autoneg_completed(config_word) \
355 set_bit(11, (unsigned long*)&config_word) 355 ((config_word) |= 1 << 11)
356#define netxen_set_phy_int_speed_changed(config_word) \ 356#define netxen_set_phy_int_speed_changed(config_word) \
357 set_bit(14, (unsigned long*)&config_word) 357 ((config_word) |= 1 << 14)
358 358
359/* 359/*
360 * NIU Mode Register. 360 * NIU Mode Register.
@@ -382,22 +382,22 @@ typedef enum {
382 */ 382 */
383 383
384#define netxen_set_gb_drop_gb0(config_word) \ 384#define netxen_set_gb_drop_gb0(config_word) \
385 set_bit(0, (unsigned long*)&config_word) 385 ((config_word) |= 1 << 0)
386#define netxen_set_gb_drop_gb1(config_word) \ 386#define netxen_set_gb_drop_gb1(config_word) \
387 set_bit(1, (unsigned long*)&config_word) 387 ((config_word) |= 1 << 1)
388#define netxen_set_gb_drop_gb2(config_word) \ 388#define netxen_set_gb_drop_gb2(config_word) \
389 set_bit(2, (unsigned long*)&config_word) 389 ((config_word) |= 1 << 2)
390#define netxen_set_gb_drop_gb3(config_word) \ 390#define netxen_set_gb_drop_gb3(config_word) \
391 set_bit(3, (unsigned long*)&config_word) 391 ((config_word) |= 1 << 3)
392 392
393#define netxen_clear_gb_drop_gb0(config_word) \ 393#define netxen_clear_gb_drop_gb0(config_word) \
394 clear_bit(0, (unsigned long*)&config_word) 394 ((config_word) &= ~(1 << 0))
395#define netxen_clear_gb_drop_gb1(config_word) \ 395#define netxen_clear_gb_drop_gb1(config_word) \
396 clear_bit(1, (unsigned long*)&config_word) 396 ((config_word) &= ~(1 << 1))
397#define netxen_clear_gb_drop_gb2(config_word) \ 397#define netxen_clear_gb_drop_gb2(config_word) \
398 clear_bit(2, (unsigned long*)&config_word) 398 ((config_word) &= ~(1 << 2))
399#define netxen_clear_gb_drop_gb3(config_word) \ 399#define netxen_clear_gb_drop_gb3(config_word) \
400 clear_bit(3, (unsigned long*)&config_word) 400 ((config_word) &= ~(1 << 3))
401 401
402/* 402/*
403 * NIU XG MAC Config Register 403 * NIU XG MAC Config Register
@@ -413,7 +413,7 @@ typedef enum {
413 */ 413 */
414 414
415#define netxen_xg_soft_reset(config_word) \ 415#define netxen_xg_soft_reset(config_word) \
416 set_bit(4, (unsigned long*)&config_word) 416 ((config_word) |= 1 << 4)
417 417
418/* 418/*
419 * MAC Control Register 419 * MAC Control Register
@@ -433,19 +433,19 @@ typedef enum {
433#define netxen_nic_mcr_set_id_pool0(config, val) \ 433#define netxen_nic_mcr_set_id_pool0(config, val) \
434 ((config) |= ((val) &0x03)) 434 ((config) |= ((val) &0x03))
435#define netxen_nic_mcr_set_enable_xtnd0(config) \ 435#define netxen_nic_mcr_set_enable_xtnd0(config) \
436 (set_bit(3, (unsigned long *)&(config))) 436 ((config) |= 1 << 3)
437#define netxen_nic_mcr_set_id_pool1(config, val) \ 437#define netxen_nic_mcr_set_id_pool1(config, val) \
438 ((config) |= (((val) & 0x03) << 4)) 438 ((config) |= (((val) & 0x03) << 4))
439#define netxen_nic_mcr_set_enable_xtnd1(config) \ 439#define netxen_nic_mcr_set_enable_xtnd1(config) \
440 (set_bit(6, (unsigned long *)&(config))) 440 ((config) |= 1 << 6)
441#define netxen_nic_mcr_set_id_pool2(config, val) \ 441#define netxen_nic_mcr_set_id_pool2(config, val) \
442 ((config) |= (((val) & 0x03) << 8)) 442 ((config) |= (((val) & 0x03) << 8))
443#define netxen_nic_mcr_set_enable_xtnd2(config) \ 443#define netxen_nic_mcr_set_enable_xtnd2(config) \
444 (set_bit(10, (unsigned long *)&(config))) 444 ((config) |= 1 << 10)
445#define netxen_nic_mcr_set_id_pool3(config, val) \ 445#define netxen_nic_mcr_set_id_pool3(config, val) \
446 ((config) |= (((val) & 0x03) << 12)) 446 ((config) |= (((val) & 0x03) << 12))
447#define netxen_nic_mcr_set_enable_xtnd3(config) \ 447#define netxen_nic_mcr_set_enable_xtnd3(config) \
448 (set_bit(14, (unsigned long *)&(config))) 448 ((config) |= 1 << 14)
449#define netxen_nic_mcr_set_mode_select(config, val) \ 449#define netxen_nic_mcr_set_mode_select(config, val) \
450 ((config) |= (((val) & 0x03) << 24)) 450 ((config) |= (((val) & 0x03) << 24))
451#define netxen_nic_mcr_set_enable_pool(config, val) \ 451#define netxen_nic_mcr_set_enable_pool(config, val) \
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index c3e41f368554..f7bb8c90537c 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -110,6 +110,7 @@ static void crb_addr_transform_setup(void)
110 crb_addr_transform(CAM); 110 crb_addr_transform(CAM);
111 crb_addr_transform(C2C1); 111 crb_addr_transform(C2C1);
112 crb_addr_transform(C2C0); 112 crb_addr_transform(C2C0);
113 crb_addr_transform(SMB);
113} 114}
114 115
115int netxen_init_firmware(struct netxen_adapter *adapter) 116int netxen_init_firmware(struct netxen_adapter *adapter)
@@ -276,6 +277,7 @@ unsigned long netxen_decode_crb_addr(unsigned long addr)
276 277
277static long rom_max_timeout = 10000; 278static long rom_max_timeout = 10000;
278static long rom_lock_timeout = 1000000; 279static long rom_lock_timeout = 1000000;
280static long rom_write_timeout = 700;
279 281
280static inline int rom_lock(struct netxen_adapter *adapter) 282static inline int rom_lock(struct netxen_adapter *adapter)
281{ 283{
@@ -404,7 +406,7 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
404{ 406{
405 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 407 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
406 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3); 408 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
407 udelay(100); /* prevent bursting on CRB */ 409 udelay(70); /* prevent bursting on CRB */
408 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 410 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
409 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 411 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
410 if (netxen_wait_rom_done(adapter)) { 412 if (netxen_wait_rom_done(adapter)) {
@@ -413,13 +415,46 @@ do_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
413 } 415 }
414 /* reset abyte_cnt and dummy_byte_cnt */ 416 /* reset abyte_cnt and dummy_byte_cnt */
415 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 417 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
416 udelay(100); /* prevent bursting on CRB */ 418 udelay(70); /* prevent bursting on CRB */
417 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 419 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
418 420
419 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA); 421 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
420 return 0; 422 return 0;
421} 423}
422 424
425static inline int
426do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
427 u8 *bytes, size_t size)
428{
429 int addridx;
430 int ret = 0;
431
432 for (addridx = addr; addridx < (addr + size); addridx += 4) {
433 ret = do_rom_fast_read(adapter, addridx, (int *)bytes);
434 if (ret != 0)
435 break;
436 bytes += 4;
437 }
438
439 return ret;
440}
441
442int
443netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
444 u8 *bytes, size_t size)
445{
446 int ret;
447
448 ret = rom_lock(adapter);
449 if (ret < 0)
450 return ret;
451
452 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
453
454 netxen_rom_unlock(adapter);
455 return ret;
456}
457
423int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp) 458int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
424{ 459{
425 int ret; 460 int ret;
@@ -443,6 +478,152 @@ int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data)
443 netxen_rom_unlock(adapter); 478 netxen_rom_unlock(adapter);
444 return ret; 479 return ret;
445} 480}
481
482static inline int do_rom_fast_write_words(struct netxen_adapter *adapter,
483 int addr, u8 *bytes, size_t size)
484{
485 int addridx = addr;
486 int ret = 0;
487
488 while (addridx < (addr + size)) {
489 int last_attempt = 0;
490 int timeout = 0;
491 int data;
492
493 data = *(u32*)bytes;
494
495 ret = do_rom_fast_write(adapter, addridx, data);
496 if (ret < 0)
497 return ret;
498
499 while(1) {
500 int data1;
501
502 do_rom_fast_read(adapter, addridx, &data1);
503 if (data1 == data)
504 break;
505
506 if (timeout++ >= rom_write_timeout) {
507 if (last_attempt++ < 4) {
508 ret = do_rom_fast_write(adapter,
509 addridx, data);
510 if (ret < 0)
511 return ret;
512 }
513 else {
514 printk(KERN_INFO "Data write did not "
515 "succeed at address 0x%x\n", addridx);
516 break;
517 }
518 }
519 }
520
521 bytes += 4;
522 addridx += 4;
523 }
524
525 return ret;
526}
527
528int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
529 u8 *bytes, size_t size)
530{
531 int ret = 0;
532
533 ret = rom_lock(adapter);
534 if (ret < 0)
535 return ret;
536
537 ret = do_rom_fast_write_words(adapter, addr, bytes, size);
538 netxen_rom_unlock(adapter);
539
540 return ret;
541}
542
543int netxen_rom_wrsr(struct netxen_adapter *adapter, int data)
544{
545 int ret;
546
547 ret = netxen_rom_wren(adapter);
548 if (ret < 0)
549 return ret;
550
551 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_ROM_WDATA, data);
552 netxen_crb_writelit_adapter(adapter,
553 NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0x1);
554
555 ret = netxen_wait_rom_done(adapter);
556 if (ret < 0)
557 return ret;
558
559 return netxen_rom_wip_poll(adapter);
560}
561
562int netxen_rom_rdsr(struct netxen_adapter *adapter)
563{
564 int ret;
565
566 ret = rom_lock(adapter);
567 if (ret < 0)
568 return ret;
569
570 ret = netxen_do_rom_rdsr(adapter);
571 netxen_rom_unlock(adapter);
572 return ret;
573}
574
575int netxen_backup_crbinit(struct netxen_adapter *adapter)
576{
577 int ret = FLASH_SUCCESS;
578 int val;
579 char *buffer = kmalloc(FLASH_SECTOR_SIZE, GFP_KERNEL);
580
581 if (!buffer)
582 return -ENOMEM;
583 /* unlock sector 63 */
584 val = netxen_rom_rdsr(adapter);
585 val = val & 0xe3;
586 ret = netxen_rom_wrsr(adapter, val);
587 if (ret != FLASH_SUCCESS)
588 goto out_kfree;
589
590 ret = netxen_rom_wip_poll(adapter);
591 if (ret != FLASH_SUCCESS)
592 goto out_kfree;
593
594 /* copy sector 0 to sector 63 */
595 ret = netxen_rom_fast_read_words(adapter, CRBINIT_START,
596 buffer, FLASH_SECTOR_SIZE);
597 if (ret != FLASH_SUCCESS)
598 goto out_kfree;
599
600 ret = netxen_rom_fast_write_words(adapter, FIXED_START,
601 buffer, FLASH_SECTOR_SIZE);
602 if (ret != FLASH_SUCCESS)
603 goto out_kfree;
604
605 /* lock sector 63 */
606 val = netxen_rom_rdsr(adapter);
607 if (!(val & 0x8)) {
608 val |= (0x1 << 2);
609 /* lock sector 63 */
610 if (netxen_rom_wrsr(adapter, val) == 0) {
611 ret = netxen_rom_wip_poll(adapter);
612 if (ret != FLASH_SUCCESS)
613 goto out_kfree;
614
615 /* lock SR writes */
616 ret = netxen_rom_wip_poll(adapter);
617 if (ret != FLASH_SUCCESS)
618 goto out_kfree;
619 }
620 }
621
622out_kfree:
623 kfree(buffer);
624 return ret;
625}
626
446int netxen_do_rom_se(struct netxen_adapter *adapter, int addr) 627int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
447{ 628{
448 netxen_rom_wren(adapter); 629 netxen_rom_wren(adapter);
@@ -457,6 +638,27 @@ int netxen_do_rom_se(struct netxen_adapter *adapter, int addr)
457 return netxen_rom_wip_poll(adapter); 638 return netxen_rom_wip_poll(adapter);
458} 639}
459 640
641void check_erased_flash(struct netxen_adapter *adapter, int addr)
642{
643 int i;
644 int val;
645 int count = 0, erased_errors = 0;
646 int range;
647
648 range = (addr == USER_START) ? FIXED_START : addr + FLASH_SECTOR_SIZE;
649
650 for (i = addr; i < range; i += 4) {
651 netxen_rom_fast_read(adapter, i, &val);
652 if (val != 0xffffffff)
653 erased_errors++;
654 count++;
655 }
656
657 if (erased_errors)
658 printk(KERN_INFO "0x%x out of 0x%x words fail to be erased "
659 "for sector address: %x\n", erased_errors, count, addr);
660}
661
460int netxen_rom_se(struct netxen_adapter *adapter, int addr) 662int netxen_rom_se(struct netxen_adapter *adapter, int addr)
461{ 663{
462 int ret = 0; 664 int ret = 0;
@@ -465,6 +667,68 @@ int netxen_rom_se(struct netxen_adapter *adapter, int addr)
465 } 667 }
466 ret = netxen_do_rom_se(adapter, addr); 668 ret = netxen_do_rom_se(adapter, addr);
467 netxen_rom_unlock(adapter); 669 netxen_rom_unlock(adapter);
670 msleep(30);
671 check_erased_flash(adapter, addr);
672
673 return ret;
674}
675
676int
677netxen_flash_erase_sections(struct netxen_adapter *adapter, int start, int end)
678{
679 int ret = FLASH_SUCCESS;
680 int i;
681
682 for (i = start; i < end; i++) {
683 ret = netxen_rom_se(adapter, i * FLASH_SECTOR_SIZE);
684 if (ret)
685 break;
686 ret = netxen_rom_wip_poll(adapter);
687 if (ret < 0)
688 return ret;
689 }
690
691 return ret;
692}
693
694int
695netxen_flash_erase_secondary(struct netxen_adapter *adapter)
696{
697 int ret = FLASH_SUCCESS;
698 int start, end;
699
700 start = SECONDARY_START / FLASH_SECTOR_SIZE;
701 end = USER_START / FLASH_SECTOR_SIZE;
702 ret = netxen_flash_erase_sections(adapter, start, end);
703
704 return ret;
705}
706
707int
708netxen_flash_erase_primary(struct netxen_adapter *adapter)
709{
710 int ret = FLASH_SUCCESS;
711 int start, end;
712
713 start = PRIMARY_START / FLASH_SECTOR_SIZE;
714 end = SECONDARY_START / FLASH_SECTOR_SIZE;
715 ret = netxen_flash_erase_sections(adapter, start, end);
716
717 return ret;
718}
719
720int netxen_flash_unlock(struct netxen_adapter *adapter)
721{
722 int ret = 0;
723
724 ret = netxen_rom_wrsr(adapter, 0);
725 if (ret < 0)
726 return ret;
727
728 ret = netxen_rom_wren(adapter);
729 if (ret < 0)
730 return ret;
731
468 return ret; 732 return ret;
469} 733}
470 734
@@ -543,9 +807,13 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
543 } 807 }
544 for (i = 0; i < n; i++) { 808 for (i = 0; i < n; i++) {
545 809
546 off = 810 off = netxen_decode_crb_addr((unsigned long)buf[i].addr);
547 netxen_decode_crb_addr((unsigned long)buf[i].addr) + 811 if (off == NETXEN_ADDR_ERROR) {
548 NETXEN_PCI_CRBSPACE; 812 printk(KERN_ERR"CRB init value out of range %lx\n",
813 buf[i].addr);
814 continue;
815 }
816 off += NETXEN_PCI_CRBSPACE;
549 /* skipping cold reboot MAGIC */ 817 /* skipping cold reboot MAGIC */
550 if (off == NETXEN_CAM_RAM(0x1fc)) 818 if (off == NETXEN_CAM_RAM(0x1fc))
551 continue; 819 continue;
@@ -662,6 +930,7 @@ void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
662 int loops = 0; 930 int loops = 0;
663 931
664 if (!pegtune_val) { 932 if (!pegtune_val) {
933 val = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
665 while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) { 934 while (val != PHAN_INITIALIZE_COMPLETE && loops < 200000) {
666 udelay(100); 935 udelay(100);
667 schedule(); 936 schedule();
@@ -690,8 +959,7 @@ int netxen_nic_rx_has_work(struct netxen_adapter *adapter)
690 desc_head = recv_ctx->rcv_status_desc_head; 959 desc_head = recv_ctx->rcv_status_desc_head;
691 desc = &desc_head[consumer]; 960 desc = &desc_head[consumer];
692 961
693 if (((le16_to_cpu(netxen_get_sts_owner(desc))) 962 if (netxen_get_sts_owner(desc) & STATUS_OWNER_HOST)
694 & STATUS_OWNER_HOST))
695 return 1; 963 return 1;
696 } 964 }
697 965
@@ -787,11 +1055,11 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
787 struct netxen_port *port = adapter->port[netxen_get_sts_port(desc)]; 1055 struct netxen_port *port = adapter->port[netxen_get_sts_port(desc)];
788 struct pci_dev *pdev = port->pdev; 1056 struct pci_dev *pdev = port->pdev;
789 struct net_device *netdev = port->netdev; 1057 struct net_device *netdev = port->netdev;
790 int index = le16_to_cpu(netxen_get_sts_refhandle(desc)); 1058 int index = netxen_get_sts_refhandle(desc);
791 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); 1059 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
792 struct netxen_rx_buffer *buffer; 1060 struct netxen_rx_buffer *buffer;
793 struct sk_buff *skb; 1061 struct sk_buff *skb;
794 u32 length = le16_to_cpu(netxen_get_sts_totallength(desc)); 1062 u32 length = netxen_get_sts_totallength(desc);
795 u32 desc_ctx; 1063 u32 desc_ctx;
796 struct netxen_rcv_desc_ctx *rcv_desc; 1064 struct netxen_rcv_desc_ctx *rcv_desc;
797 int ret; 1065 int ret;
@@ -918,16 +1186,14 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
918 */ 1186 */
919 while (count < max) { 1187 while (count < max) {
920 desc = &desc_head[consumer]; 1188 desc = &desc_head[consumer];
921 if (! 1189 if (!(netxen_get_sts_owner(desc) & STATUS_OWNER_HOST)) {
922 (le16_to_cpu(netxen_get_sts_owner(desc)) &
923 STATUS_OWNER_HOST)) {
924 DPRINTK(ERR, "desc %p ownedby %x\n", desc, 1190 DPRINTK(ERR, "desc %p ownedby %x\n", desc,
925 netxen_get_sts_owner(desc)); 1191 netxen_get_sts_owner(desc));
926 break; 1192 break;
927 } 1193 }
928 netxen_process_rcv(adapter, ctxid, desc); 1194 netxen_process_rcv(adapter, ctxid, desc);
929 netxen_clear_sts_owner(desc); 1195 netxen_clear_sts_owner(desc);
930 netxen_set_sts_owner(desc, cpu_to_le16(STATUS_OWNER_PHANTOM)); 1196 netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
931 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); 1197 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
932 count++; 1198 count++;
933 } 1199 }
@@ -1232,7 +1498,7 @@ void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx,
1232 1498
1233 /* make a rcv descriptor */ 1499 /* make a rcv descriptor */
1234 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1500 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1235 pdesc->buffer_length = cpu_to_le16(rcv_desc->dma_size); 1501 pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size);
1236 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1502 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1237 DPRINTK(INFO, "done writing descripter\n"); 1503 DPRINTK(INFO, "done writing descripter\n");
1238 producer = 1504 producer =
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
index 06847d4252c3..be366e48007c 100644
--- a/drivers/net/netxen/netxen_nic_isr.c
+++ b/drivers/net/netxen/netxen_nic_isr.c
@@ -79,7 +79,7 @@ void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno,
79void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno, 79void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno,
80 u32 enable) 80 u32 enable)
81{ 81{
82 __le32 int_src; 82 __u32 int_src;
83 struct netxen_port *port; 83 struct netxen_port *port;
84 84
85 /* This should clear the interrupt source */ 85 /* This should clear the interrupt source */
@@ -110,7 +110,7 @@ void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno,
110 /* write it down later.. */ 110 /* write it down later.. */
111 if ((netxen_get_phy_int_speed_changed(int_src)) 111 if ((netxen_get_phy_int_speed_changed(int_src))
112 || (netxen_get_phy_int_link_status_changed(int_src))) { 112 || (netxen_get_phy_int_link_status_changed(int_src))) {
113 __le32 status; 113 __u32 status;
114 114
115 DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n"); 115 DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n");
116 116
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 96e1bee19ba0..69c1b9d23a1a 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -117,7 +117,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
117 void __iomem *mem_ptr1 = NULL; 117 void __iomem *mem_ptr1 = NULL;
118 void __iomem *mem_ptr2 = NULL; 118 void __iomem *mem_ptr2 = NULL;
119 119
120 u8 *db_ptr = NULL; 120 u8 __iomem *db_ptr = NULL;
121 unsigned long mem_base, mem_len, db_base, db_len; 121 unsigned long mem_base, mem_len, db_base, db_len;
122 int pci_using_dac, i, err; 122 int pci_using_dac, i, err;
123 int ring; 123 int ring;
@@ -191,7 +191,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
191 db_len); 191 db_len);
192 192
193 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES); 193 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
194 if (db_ptr == 0UL) { 194 if (!db_ptr) {
195 printk(KERN_ERR "%s: Failed to allocate doorbell map.", 195 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
196 netxen_nic_driver_name); 196 netxen_nic_driver_name);
197 err = -EIO; 197 err = -EIO;
@@ -818,7 +818,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
818 /* Take skb->data itself */ 818 /* Take skb->data itself */
819 pbuf = &adapter->cmd_buf_arr[producer]; 819 pbuf = &adapter->cmd_buf_arr[producer];
820 if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) { 820 if ((netdev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size > 0) {
821 pbuf->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 821 pbuf->mss = skb_shinfo(skb)->gso_size;
822 hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 822 hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
823 } else { 823 } else {
824 pbuf->mss = 0; 824 pbuf->mss = 0;
@@ -882,7 +882,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
882 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma); 882 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
883 break; 883 break;
884 case 3: 884 case 3:
885 hwdesc->buffer4_length = temp_len; 885 hwdesc->buffer4_length = cpu_to_le16(temp_len);
886 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma); 886 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
887 break; 887 break;
888 } 888 }
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 4987dc765d99..40d7003a371c 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -89,15 +89,15 @@ static inline int phy_unlock(struct netxen_adapter *adapter)
89 * 89 *
90 */ 90 */
91int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy, 91int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
92 long reg, __le32 * readval) 92 long reg, __u32 * readval)
93{ 93{
94 long timeout = 0; 94 long timeout = 0;
95 long result = 0; 95 long result = 0;
96 long restore = 0; 96 long restore = 0;
97 __le32 address; 97 __u32 address;
98 __le32 command; 98 __u32 command;
99 __le32 status; 99 __u32 status;
100 __le32 mac_cfg0; 100 __u32 mac_cfg0;
101 101
102 if (phy_lock(adapter) != 0) { 102 if (phy_lock(adapter) != 0) {
103 return -1; 103 return -1;
@@ -112,7 +112,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
112 &mac_cfg0, 4)) 112 &mac_cfg0, 4))
113 return -EIO; 113 return -EIO;
114 if (netxen_gb_get_soft_reset(mac_cfg0)) { 114 if (netxen_gb_get_soft_reset(mac_cfg0)) {
115 __le32 temp; 115 __u32 temp;
116 temp = 0; 116 temp = 0;
117 netxen_gb_tx_reset_pb(temp); 117 netxen_gb_tx_reset_pb(temp);
118 netxen_gb_rx_reset_pb(temp); 118 netxen_gb_rx_reset_pb(temp);
@@ -184,15 +184,15 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
184 * 184 *
185 */ 185 */
186int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, 186int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
187 long phy, long reg, __le32 val) 187 long phy, long reg, __u32 val)
188{ 188{
189 long timeout = 0; 189 long timeout = 0;
190 long result = 0; 190 long result = 0;
191 long restore = 0; 191 long restore = 0;
192 __le32 address; 192 __u32 address;
193 __le32 command; 193 __u32 command;
194 __le32 status; 194 __u32 status;
195 __le32 mac_cfg0; 195 __u32 mac_cfg0;
196 196
197 /* 197 /*
198 * MII mgmt all goes through port 0 MAC interface, so it 198 * MII mgmt all goes through port 0 MAC interface, so it
@@ -203,7 +203,7 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter,
203 &mac_cfg0, 4)) 203 &mac_cfg0, 4))
204 return -EIO; 204 return -EIO;
205 if (netxen_gb_get_soft_reset(mac_cfg0)) { 205 if (netxen_gb_get_soft_reset(mac_cfg0)) {
206 __le32 temp; 206 __u32 temp;
207 temp = 0; 207 temp = 0;
208 netxen_gb_tx_reset_pb(temp); 208 netxen_gb_tx_reset_pb(temp);
209 netxen_gb_rx_reset_pb(temp); 209 netxen_gb_rx_reset_pb(temp);
@@ -269,7 +269,7 @@ int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
269 int port) 269 int port)
270{ 270{
271 int result = 0; 271 int result = 0;
272 __le32 enable = 0; 272 __u32 enable = 0;
273 netxen_set_phy_int_link_status_changed(enable); 273 netxen_set_phy_int_link_status_changed(enable);
274 netxen_set_phy_int_autoneg_completed(enable); 274 netxen_set_phy_int_autoneg_completed(enable);
275 netxen_set_phy_int_speed_changed(enable); 275 netxen_set_phy_int_speed_changed(enable);
@@ -402,7 +402,7 @@ void netxen_niu_gbe_set_gmii_mode(struct netxen_adapter *adapter,
402int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port) 402int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
403{ 403{
404 int result = 0; 404 int result = 0;
405 __le32 status; 405 __u32 status;
406 if (adapter->disable_phy_interrupts) 406 if (adapter->disable_phy_interrupts)
407 adapter->disable_phy_interrupts(adapter, port); 407 adapter->disable_phy_interrupts(adapter, port);
408 mdelay(2); 408 mdelay(2);
@@ -410,7 +410,7 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
410 if (0 == 410 if (0 ==
411 netxen_niu_gbe_phy_read(adapter, port, 411 netxen_niu_gbe_phy_read(adapter, port,
412 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 412 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
413 (__le32 *) & status)) { 413 &status)) {
414 if (netxen_get_phy_link(status)) { 414 if (netxen_get_phy_link(status)) {
415 if (netxen_get_phy_speed(status) == 2) { 415 if (netxen_get_phy_speed(status) == 2) {
416 netxen_niu_gbe_set_gmii_mode(adapter, port, 1); 416 netxen_niu_gbe_set_gmii_mode(adapter, port, 1);
@@ -489,7 +489,7 @@ int netxen_niu_gbe_handle_phy_interrupt(struct netxen_adapter *adapter,
489 int port, long enable) 489 int port, long enable)
490{ 490{
491 int result = 0; 491 int result = 0;
492 __le32 int_src; 492 __u32 int_src;
493 493
494 printk(KERN_INFO PFX "NETXEN: Handling PHY interrupt on port %d" 494 printk(KERN_INFO PFX "NETXEN: Handling PHY interrupt on port %d"
495 " (device enable = %d)\n", (int)port, (int)enable); 495 " (device enable = %d)\n", (int)port, (int)enable);
@@ -530,7 +530,7 @@ int netxen_niu_gbe_handle_phy_interrupt(struct netxen_adapter *adapter,
530 printk(KERN_INFO PFX "autoneg_error "); 530 printk(KERN_INFO PFX "autoneg_error ");
531 if ((netxen_get_phy_int_speed_changed(int_src)) 531 if ((netxen_get_phy_int_speed_changed(int_src))
532 || (netxen_get_phy_int_link_status_changed(int_src))) { 532 || (netxen_get_phy_int_link_status_changed(int_src))) {
533 __le32 status; 533 __u32 status;
534 534
535 printk(KERN_INFO PFX 535 printk(KERN_INFO PFX
536 "speed_changed or link status changed"); 536 "speed_changed or link status changed");
@@ -583,9 +583,9 @@ int netxen_niu_gbe_handle_phy_interrupt(struct netxen_adapter *adapter,
583int netxen_niu_macaddr_get(struct netxen_adapter *adapter, 583int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
584 int phy, netxen_ethernet_macaddr_t * addr) 584 int phy, netxen_ethernet_macaddr_t * addr)
585{ 585{
586 u64 result = 0; 586 u32 stationhigh;
587 __le32 stationhigh; 587 u32 stationlow;
588 __le32 stationlow; 588 u8 val[8];
589 589
590 if (addr == NULL) 590 if (addr == NULL)
591 return -EINVAL; 591 return -EINVAL;
@@ -598,10 +598,10 @@ int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
598 if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), 598 if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy),
599 &stationlow, 4)) 599 &stationlow, 4))
600 return -EIO; 600 return -EIO;
601 ((__le32 *)val)[1] = cpu_to_le32(stationhigh);
602 ((__le32 *)val)[0] = cpu_to_le32(stationlow);
601 603
602 result = (u64) netxen_gb_get_stationaddress_low(stationlow); 604 memcpy(addr, val + 2, 6);
603 result |= (u64) stationhigh << 16;
604 memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t));
605 605
606 return 0; 606 return 0;
607} 607}
@@ -613,24 +613,25 @@ int netxen_niu_macaddr_get(struct netxen_adapter *adapter,
613int netxen_niu_macaddr_set(struct netxen_port *port, 613int netxen_niu_macaddr_set(struct netxen_port *port,
614 netxen_ethernet_macaddr_t addr) 614 netxen_ethernet_macaddr_t addr)
615{ 615{
616 __le32 temp = 0; 616 u8 temp[4];
617 u32 val;
617 struct netxen_adapter *adapter = port->adapter; 618 struct netxen_adapter *adapter = port->adapter;
618 int phy = port->portnum; 619 int phy = port->portnum;
619 unsigned char mac_addr[6]; 620 unsigned char mac_addr[6];
620 int i; 621 int i;
621 622
622 for (i = 0; i < 10; i++) { 623 for (i = 0; i < 10; i++) {
623 memcpy(&temp, addr, 2); 624 temp[0] = temp[1] = 0;
624 temp <<= 16; 625 memcpy(temp + 2, addr, 2);
626 val = le32_to_cpu(*(__le32 *)temp);
625 if (netxen_nic_hw_write_wx 627 if (netxen_nic_hw_write_wx
626 (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &temp, 4)) 628 (adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), &val, 4))
627 return -EIO; 629 return -EIO;
628 630
629 temp = 0; 631 memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32));
630 632 val = le32_to_cpu(*(__le32 *)temp);
631 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
632 if (netxen_nic_hw_write_wx 633 if (netxen_nic_hw_write_wx
633 (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &temp, 4)) 634 (adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), &val, 4))
634 return -2; 635 return -2;
635 636
636 netxen_niu_macaddr_get(adapter, phy, 637 netxen_niu_macaddr_get(adapter, phy,
@@ -659,9 +660,9 @@ int netxen_niu_macaddr_set(struct netxen_port *port,
659int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter, 660int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
660 int port, netxen_niu_gbe_ifmode_t mode) 661 int port, netxen_niu_gbe_ifmode_t mode)
661{ 662{
662 __le32 mac_cfg0; 663 __u32 mac_cfg0;
663 __le32 mac_cfg1; 664 __u32 mac_cfg1;
664 __le32 mii_cfg; 665 __u32 mii_cfg;
665 666
666 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 667 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
667 return -EINVAL; 668 return -EINVAL;
@@ -736,7 +737,7 @@ int netxen_niu_enable_gbe_port(struct netxen_adapter *adapter,
736/* Disable a GbE interface */ 737/* Disable a GbE interface */
737int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port) 738int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port)
738{ 739{
739 __le32 mac_cfg0; 740 __u32 mac_cfg0;
740 741
741 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 742 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
742 return -EINVAL; 743 return -EINVAL;
@@ -752,7 +753,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter, int port)
752/* Disable an XG interface */ 753/* Disable an XG interface */
753int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port) 754int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port)
754{ 755{
755 __le32 mac_cfg; 756 __u32 mac_cfg;
756 757
757 if (port != 0) 758 if (port != 0)
758 return -EINVAL; 759 return -EINVAL;
@@ -769,7 +770,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter, int port)
769int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port, 770int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port,
770 netxen_niu_prom_mode_t mode) 771 netxen_niu_prom_mode_t mode)
771{ 772{
772 __le32 reg; 773 __u32 reg;
773 774
774 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 775 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
775 return -EINVAL; 776 return -EINVAL;
@@ -826,22 +827,21 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, int port,
826int netxen_niu_xg_macaddr_set(struct netxen_port *port, 827int netxen_niu_xg_macaddr_set(struct netxen_port *port,
827 netxen_ethernet_macaddr_t addr) 828 netxen_ethernet_macaddr_t addr)
828{ 829{
829 __le32 temp = 0; 830 u8 temp[4];
831 u32 val;
830 struct netxen_adapter *adapter = port->adapter; 832 struct netxen_adapter *adapter = port->adapter;
831 833
832 memcpy(&temp, addr, 2); 834 temp[0] = temp[1] = 0;
833 temp = cpu_to_le32(temp); 835 memcpy(temp + 2, addr, 2);
834 temp <<= 16; 836 val = le32_to_cpu(*(__le32 *)temp);
835 if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, 837 if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
836 &temp, 4)) 838 &val, 4))
837 return -EIO; 839 return -EIO;
838 840
839 temp = 0;
840
841 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); 841 memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32));
842 temp = cpu_to_le32(temp); 842 val = le32_to_cpu(*(__le32 *)temp);
843 if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, 843 if (netxen_nic_hw_write_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI,
844 &temp, 4)) 844 &val, 4))
845 return -EIO; 845 return -EIO;
846 846
847 return 0; 847 return 0;
@@ -854,9 +854,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_port *port,
854int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy, 854int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy,
855 netxen_ethernet_macaddr_t * addr) 855 netxen_ethernet_macaddr_t * addr)
856{ 856{
857 __le32 stationhigh; 857 u32 stationhigh;
858 __le32 stationlow; 858 u32 stationlow;
859 u64 result; 859 u8 val[8];
860 860
861 if (addr == NULL) 861 if (addr == NULL)
862 return -EINVAL; 862 return -EINVAL;
@@ -869,10 +869,10 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy,
869 if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, 869 if (netxen_nic_hw_read_wx(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1,
870 &stationlow, 4)) 870 &stationlow, 4))
871 return -EIO; 871 return -EIO;
872 ((__le32 *)val)[1] = cpu_to_le32(stationhigh);
873 ((__le32 *)val)[0] = cpu_to_le32(stationlow);
872 874
873 result = ((u64) stationlow) >> 16; 875 memcpy(addr, val + 2, 6);
874 result |= (u64) stationhigh << 16;
875 memcpy(*addr, &result, sizeof(netxen_ethernet_macaddr_t));
876 876
877 return 0; 877 return 0;
878} 878}
@@ -880,7 +880,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, int phy,
880int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 880int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
881 int port, netxen_niu_prom_mode_t mode) 881 int port, netxen_niu_prom_mode_t mode)
882{ 882{
883 __le32 reg; 883 __u32 reg;
884 884
885 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 885 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
886 return -EINVAL; 886 return -EINVAL;
diff --git a/drivers/net/oaknet.c b/drivers/net/oaknet.c
deleted file mode 100644
index 702e3e95612a..000000000000
--- a/drivers/net/oaknet.c
+++ /dev/null
@@ -1,666 +0,0 @@
1/*
2 *
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: oaknet.c
6 *
7 * Description:
8 * Driver for the National Semiconductor DP83902AV Ethernet controller
9 * on-board the IBM PowerPC "Oak" evaluation board. Adapted from the
10 * various other 8390 drivers written by Donald Becker and Paul Gortmaker.
11 *
12 * Additional inspiration from the "tcd8390.c" driver from TiVo, Inc.
13 * and "enetLib.c" from IBM.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/errno.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/init.h>
23#include <linux/jiffies.h>
24
25#include <asm/board.h>
26#include <asm/io.h>
27
28#include "8390.h"
29
30
31/* Preprocessor Defines */
32
33#if !defined(TRUE) || TRUE != 1
34#define TRUE 1
35#endif
36
37#if !defined(FALSE) || FALSE != 0
38#define FALSE 0
39#endif
40
41#define OAKNET_START_PG 0x20 /* First page of TX buffer */
42#define OAKNET_STOP_PG 0x40 /* Last pagge +1 of RX ring */
43
44#define OAKNET_WAIT (2 * HZ / 100) /* 20 ms */
45
46/* Experimenting with some fixes for a broken driver... */
47
48#define OAKNET_DISINT
49#define OAKNET_HEADCHECK
50#define OAKNET_RWFIX
51
52
53/* Global Variables */
54
55static const char *name = "National DP83902AV";
56
57static struct net_device *oaknet_devs;
58
59
60/* Function Prototypes */
61
62static int oaknet_open(struct net_device *dev);
63static int oaknet_close(struct net_device *dev);
64
65static void oaknet_reset_8390(struct net_device *dev);
66static void oaknet_get_8390_hdr(struct net_device *dev,
67 struct e8390_pkt_hdr *hdr, int ring_page);
68static void oaknet_block_input(struct net_device *dev, int count,
69 struct sk_buff *skb, int ring_offset);
70static void oaknet_block_output(struct net_device *dev, int count,
71 const unsigned char *buf, int start_page);
72
73static void oaknet_dma_error(struct net_device *dev, const char *name);
74
75
76/*
77 * int oaknet_init()
78 *
79 * Description:
80 * This routine performs all the necessary platform-specific initiali-
81 * zation and set-up for the IBM "Oak" evaluation board's National
82 * Semiconductor DP83902AV "ST-NIC" Ethernet controller.
83 *
84 * Input(s):
85 * N/A
86 *
87 * Output(s):
88 * N/A
89 *
90 * Returns:
91 * 0 if OK, otherwise system error number on error.
92 *
93 */
94static int __init oaknet_init(void)
95{
96 register int i;
97 int reg0, regd;
98 int ret = -ENOMEM;
99 struct net_device *dev;
100#if 0
101 unsigned long ioaddr = OAKNET_IO_BASE;
102#else
103 unsigned long ioaddr = ioremap(OAKNET_IO_BASE, OAKNET_IO_SIZE);
104#endif
105 bd_t *bip = (bd_t *)__res;
106
107 if (!ioaddr)
108 return -ENOMEM;
109
110 dev = alloc_ei_netdev();
111 if (!dev)
112 goto out_unmap;
113
114 ret = -EBUSY;
115 if (!request_region(OAKNET_IO_BASE, OAKNET_IO_SIZE, name))
116 goto out_dev;
117
118 /* Quick register check to see if the device is really there. */
119
120 ret = -ENODEV;
121 if ((reg0 = ei_ibp(ioaddr)) == 0xFF)
122 goto out_region;
123
124 /*
125 * That worked. Now a more thorough check, using the multicast
126 * address registers, that the device is definitely out there
127 * and semi-functional.
128 */
129
130 ei_obp(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
131 regd = ei_ibp(ioaddr + 0x0D);
132 ei_obp(0xFF, ioaddr + 0x0D);
133 ei_obp(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
134 ei_ibp(ioaddr + EN0_COUNTER0);
135
136 /* It's no good. Fix things back up and leave. */
137
138 ret = -ENODEV;
139 if (ei_ibp(ioaddr + EN0_COUNTER0) != 0) {
140 ei_obp(reg0, ioaddr);
141 ei_obp(regd, ioaddr + 0x0D);
142 goto out_region;
143 }
144
145 SET_MODULE_OWNER(dev);
146
147 /*
148 * This controller is on an embedded board, so the base address
149 * and interrupt assignments are pre-assigned and unchageable.
150 */
151
152 dev->base_addr = ioaddr;
153 dev->irq = OAKNET_INT;
154
155 /*
156 * Disable all chip interrupts for now and ACK all pending
157 * interrupts.
158 */
159
160 ei_obp(0x0, ioaddr + EN0_IMR);
161 ei_obp(0xFF, ioaddr + EN0_ISR);
162
163 /* Attempt to get the interrupt line */
164
165 ret = -EAGAIN;
166 if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
167 printk("%s: unable to request interrupt %d.\n",
168 name, dev->irq);
169 goto out_region;
170 }
171
172 /* Tell the world about what and where we've found. */
173
174 printk("%s: %s at", dev->name, name);
175 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
176 dev->dev_addr[i] = bip->bi_enetaddr[i];
177 printk("%c%.2x", (i ? ':' : ' '), dev->dev_addr[i]);
178 }
179 printk(", found at %#lx, using IRQ %d.\n", dev->base_addr, dev->irq);
180
181 /* Set up some required driver fields and then we're done. */
182
183 ei_status.name = name;
184 ei_status.word16 = FALSE;
185 ei_status.tx_start_page = OAKNET_START_PG;
186 ei_status.rx_start_page = OAKNET_START_PG + TX_PAGES;
187 ei_status.stop_page = OAKNET_STOP_PG;
188
189 ei_status.reset_8390 = &oaknet_reset_8390;
190 ei_status.block_input = &oaknet_block_input;
191 ei_status.block_output = &oaknet_block_output;
192 ei_status.get_8390_hdr = &oaknet_get_8390_hdr;
193
194 dev->open = oaknet_open;
195 dev->stop = oaknet_close;
196#ifdef CONFIG_NET_POLL_CONTROLLER
197 dev->poll_controller = ei_poll;
198#endif
199
200 NS8390_init(dev, FALSE);
201 ret = register_netdev(dev);
202 if (ret)
203 goto out_irq;
204
205 oaknet_devs = dev;
206 return 0;
207
208out_irq;
209 free_irq(dev->irq, dev);
210out_region:
211 release_region(OAKNET_IO_BASE, OAKNET_IO_SIZE);
212out_dev:
213 free_netdev(dev);
214out_unmap:
215 iounmap(ioaddr);
216 return ret;
217}
218
219/*
220 * static int oaknet_open()
221 *
222 * Description:
223 * This routine is a modest wrapper around ei_open, the 8390-generic,
224 * driver open routine. This just increments the module usage count
225 * and passes along the status from ei_open.
226 *
227 * Input(s):
228 * *dev - Pointer to the device structure for this driver.
229 *
230 * Output(s):
231 * *dev - Pointer to the device structure for this driver, potentially
232 * modified by ei_open.
233 *
234 * Returns:
235 * 0 if OK, otherwise < 0 on error.
236 *
237 */
238static int
239oaknet_open(struct net_device *dev)
240{
241 int status = ei_open(dev);
242 return (status);
243}
244
245/*
246 * static int oaknet_close()
247 *
248 * Description:
249 * This routine is a modest wrapper around ei_close, the 8390-generic,
250 * driver close routine. This just decrements the module usage count
251 * and passes along the status from ei_close.
252 *
253 * Input(s):
254 * *dev - Pointer to the device structure for this driver.
255 *
256 * Output(s):
257 * *dev - Pointer to the device structure for this driver, potentially
258 * modified by ei_close.
259 *
260 * Returns:
261 * 0 if OK, otherwise < 0 on error.
262 *
263 */
264static int
265oaknet_close(struct net_device *dev)
266{
267 int status = ei_close(dev);
268 return (status);
269}
270
271/*
272 * static void oaknet_reset_8390()
273 *
274 * Description:
275 * This routine resets the DP83902 chip.
276 *
277 * Input(s):
278 * *dev - Pointer to the device structure for this driver.
279 *
280 * Output(s):
281 * N/A
282 *
283 * Returns:
284 * N/A
285 *
286 */
287static void
288oaknet_reset_8390(struct net_device *dev)
289{
290 int base = E8390_BASE;
291
292 /*
293 * We have no provision of reseting the controller as is done
294 * in other drivers, such as "ne.c". However, the following
295 * seems to work well enough in the TiVo driver.
296 */
297
298 printk("Resetting %s...\n", dev->name);
299 ei_obp(E8390_STOP | E8390_NODMA | E8390_PAGE0, base + E8390_CMD);
300 ei_status.txing = 0;
301 ei_status.dmaing = 0;
302}
303
304/*
305 * static void oaknet_get_8390_hdr()
306 *
307 * Description:
308 * This routine grabs the 8390-specific header. It's similar to the
309 * block input routine, but we don't need to be concerned with ring wrap
310 * as the header will be at the start of a page, so we optimize accordingly.
311 *
312 * Input(s):
313 * *dev - Pointer to the device structure for this driver.
314 * *hdr - Pointer to storage for the 8390-specific packet header.
315 * ring_page - ?
316 *
317 * Output(s):
318 * *hdr - Pointer to the 8390-specific packet header for the just-
319 * received frame.
320 *
321 * Returns:
322 * N/A
323 *
324 */
325static void
326oaknet_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
327 int ring_page)
328{
329 int base = dev->base_addr;
330
331 /*
332 * This should NOT happen. If it does, it is the LAST thing you'll
333 * see.
334 */
335
336 if (ei_status.dmaing) {
337 oaknet_dma_error(dev, "oaknet_get_8390_hdr");
338 return;
339 }
340
341 ei_status.dmaing |= 0x01;
342 outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, base + OAKNET_CMD);
343 outb_p(sizeof(struct e8390_pkt_hdr), base + EN0_RCNTLO);
344 outb_p(0, base + EN0_RCNTHI);
345 outb_p(0, base + EN0_RSARLO); /* On page boundary */
346 outb_p(ring_page, base + EN0_RSARHI);
347 outb_p(E8390_RREAD + E8390_START, base + OAKNET_CMD);
348
349 if (ei_status.word16)
350 insw(base + OAKNET_DATA, hdr,
351 sizeof(struct e8390_pkt_hdr) >> 1);
352 else
353 insb(base + OAKNET_DATA, hdr,
354 sizeof(struct e8390_pkt_hdr));
355
356 /* Byte-swap the packet byte count */
357
358 hdr->count = le16_to_cpu(hdr->count);
359
360 outb_p(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
361 ei_status.dmaing &= ~0x01;
362}
363
364/*
365 * XXX - Document me.
366 */
367static void
368oaknet_block_input(struct net_device *dev, int count, struct sk_buff *skb,
369 int ring_offset)
370{
371 int base = OAKNET_BASE;
372 char *buf = skb->data;
373
374 /*
375 * This should NOT happen. If it does, it is the LAST thing you'll
376 * see.
377 */
378
379 if (ei_status.dmaing) {
380 oaknet_dma_error(dev, "oaknet_block_input");
381 return;
382 }
383
384#ifdef OAKNET_DISINT
385 save_flags(flags);
386 cli();
387#endif
388
389 ei_status.dmaing |= 0x01;
390 ei_obp(E8390_NODMA + E8390_PAGE0 + E8390_START, base + E8390_CMD);
391 ei_obp(count & 0xff, base + EN0_RCNTLO);
392 ei_obp(count >> 8, base + EN0_RCNTHI);
393 ei_obp(ring_offset & 0xff, base + EN0_RSARLO);
394 ei_obp(ring_offset >> 8, base + EN0_RSARHI);
395 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
396 if (ei_status.word16) {
397 ei_isw(base + E8390_DATA, buf, count >> 1);
398 if (count & 0x01) {
399 buf[count - 1] = ei_ib(base + E8390_DATA);
400#ifdef OAKNET_HEADCHECK
401 bytes++;
402#endif
403 }
404 } else {
405 ei_isb(base + E8390_DATA, buf, count);
406 }
407#ifdef OAKNET_HEADCHECK
408 /*
409 * This was for the ALPHA version only, but enough people have
410 * been encountering problems so it is still here. If you see
411 * this message you either 1) have a slightly incompatible clone
412 * or 2) have noise/speed problems with your bus.
413 */
414
415 /* DMA termination address check... */
416 {
417 int addr, tries = 20;
418 do {
419 /* DON'T check for 'ei_ibp(EN0_ISR) & ENISR_RDC' here
420 -- it's broken for Rx on some cards! */
421 int high = ei_ibp(base + EN0_RSARHI);
422 int low = ei_ibp(base + EN0_RSARLO);
423 addr = (high << 8) + low;
424 if (((ring_offset + bytes) & 0xff) == low)
425 break;
426 } while (--tries > 0);
427 if (tries <= 0)
428 printk("%s: RX transfer address mismatch,"
429 "%#4.4x (expected) vs. %#4.4x (actual).\n",
430 dev->name, ring_offset + bytes, addr);
431 }
432#endif
433 ei_obp(ENISR_RDC, base + EN0_ISR); /* ACK Remote DMA interrupt */
434 ei_status.dmaing &= ~0x01;
435
436#ifdef OAKNET_DISINT
437 restore_flags(flags);
438#endif
439}
440
441/*
442 * static void oaknet_block_output()
443 *
444 * Description:
445 * This routine...
446 *
447 * Input(s):
448 * *dev - Pointer to the device structure for this driver.
449 * count - Number of bytes to be transferred.
450 * *buf -
451 * start_page -
452 *
453 * Output(s):
454 * N/A
455 *
456 * Returns:
457 * N/A
458 *
459 */
460static void
461oaknet_block_output(struct net_device *dev, int count,
462 const unsigned char *buf, int start_page)
463{
464 int base = E8390_BASE;
465#if 0
466 int bug;
467#endif
468 unsigned long start;
469#ifdef OAKNET_DISINT
470 unsigned long flags;
471#endif
472#ifdef OAKNET_HEADCHECK
473 int retries = 0;
474#endif
475
476 /* Round the count up for word writes. */
477
478 if (ei_status.word16 && (count & 0x1))
479 count++;
480
481 /*
482 * This should NOT happen. If it does, it is the LAST thing you'll
483 * see.
484 */
485
486 if (ei_status.dmaing) {
487 oaknet_dma_error(dev, "oaknet_block_output");
488 return;
489 }
490
491#ifdef OAKNET_DISINT
492 save_flags(flags);
493 cli();
494#endif
495
496 ei_status.dmaing |= 0x01;
497
498 /* Make sure we are in page 0. */
499
500 ei_obp(E8390_PAGE0 + E8390_START + E8390_NODMA, base + E8390_CMD);
501
502#ifdef OAKNET_HEADCHECK
503retry:
504#endif
505
506#if 0
507 /*
508 * The 83902 documentation states that the processor needs to
509 * do a "dummy read" before doing the remote write to work
510 * around a chip bug they don't feel like fixing.
511 */
512
513 bug = 0;
514 while (1) {
515 unsigned int rdhi;
516 unsigned int rdlo;
517
518 /* Now the normal output. */
519 ei_obp(ENISR_RDC, base + EN0_ISR);
520 ei_obp(count & 0xff, base + EN0_RCNTLO);
521 ei_obp(count >> 8, base + EN0_RCNTHI);
522 ei_obp(0x00, base + EN0_RSARLO);
523 ei_obp(start_page, base + EN0_RSARHI);
524
525 if (bug++)
526 break;
527
528 /* Perform the dummy read */
529 rdhi = ei_ibp(base + EN0_CRDAHI);
530 rdlo = ei_ibp(base + EN0_CRDALO);
531 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
532
533 while (1) {
534 unsigned int nrdhi;
535 unsigned int nrdlo;
536 nrdhi = ei_ibp(base + EN0_CRDAHI);
537 nrdlo = ei_ibp(base + EN0_CRDALO);
538 if ((rdhi != nrdhi) || (rdlo != nrdlo))
539 break;
540 }
541 }
542#else
543#ifdef OAKNET_RWFIX
544 /*
545 * Handle the read-before-write bug the same way as the
546 * Crynwr packet driver -- the Nat'l Semi. method doesn't work.
547 * Actually this doesn't always work either, but if you have
548 * problems with your 83902 this is better than nothing!
549 */
550
551 ei_obp(0x42, base + EN0_RCNTLO);
552 ei_obp(0x00, base + EN0_RCNTHI);
553 ei_obp(0x42, base + EN0_RSARLO);
554 ei_obp(0x00, base + EN0_RSARHI);
555 ei_obp(E8390_RREAD + E8390_START, base + E8390_CMD);
556 /* Make certain that the dummy read has occurred. */
557 udelay(6);
558#endif
559
560 ei_obp(ENISR_RDC, base + EN0_ISR);
561
562 /* Now the normal output. */
563 ei_obp(count & 0xff, base + EN0_RCNTLO);
564 ei_obp(count >> 8, base + EN0_RCNTHI);
565 ei_obp(0x00, base + EN0_RSARLO);
566 ei_obp(start_page, base + EN0_RSARHI);
567#endif /* 0/1 */
568
569 ei_obp(E8390_RWRITE + E8390_START, base + E8390_CMD);
570 if (ei_status.word16) {
571 ei_osw(E8390_BASE + E8390_DATA, buf, count >> 1);
572 } else {
573 ei_osb(E8390_BASE + E8390_DATA, buf, count);
574 }
575
576#ifdef OAKNET_DISINT
577 restore_flags(flags);
578#endif
579
580 start = jiffies;
581
582#ifdef OAKNET_HEADCHECK
583 /*
584 * This was for the ALPHA version only, but enough people have
585 * been encountering problems so it is still here.
586 */
587
588 {
589 /* DMA termination address check... */
590 int addr, tries = 20;
591 do {
592 int high = ei_ibp(base + EN0_RSARHI);
593 int low = ei_ibp(base + EN0_RSARLO);
594 addr = (high << 8) + low;
595 if ((start_page << 8) + count == addr)
596 break;
597 } while (--tries > 0);
598
599 if (tries <= 0) {
600 printk("%s: Tx packet transfer address mismatch,"
601 "%#4.4x (expected) vs. %#4.4x (actual).\n",
602 dev->name, (start_page << 8) + count, addr);
603 if (retries++ == 0)
604 goto retry;
605 }
606 }
607#endif
608
609 while ((ei_ibp(base + EN0_ISR) & ENISR_RDC) == 0) {
610 if (time_after(jiffies, start + OAKNET_WAIT)) {
611 printk("%s: timeout waiting for Tx RDC.\n", dev->name);
612 oaknet_reset_8390(dev);
613 NS8390_init(dev, TRUE);
614 break;
615 }
616 }
617
618 ei_obp(ENISR_RDC, base + EN0_ISR); /* Ack intr. */
619 ei_status.dmaing &= ~0x01;
620}
621
622/*
623 * static void oaknet_dma_error()
624 *
625 * Description:
626 * This routine prints out a last-ditch informative message to the console
627 * indicating that a DMA error occurred. If you see this, it's the last
628 * thing you'll see.
629 *
630 * Input(s):
631 * *dev - Pointer to the device structure for this driver.
632 * *name - Informative text (e.g. function name) indicating where the
633 * DMA error occurred.
634 *
635 * Output(s):
636 * N/A
637 *
638 * Returns:
639 * N/A
640 *
641 */
642static void
643oaknet_dma_error(struct net_device *dev, const char *name)
644{
645 printk(KERN_EMERG "%s: DMAing conflict in %s."
646 "[DMAstat:%d][irqlock:%d][intr:%ld]\n",
647 dev->name, name, ei_status.dmaing, ei_status.irqlock,
648 dev->interrupt);
649}
650
651/*
652 * Oak Ethernet module unload interface.
653 */
654static void __exit oaknet_cleanup_module (void)
655{
656 /* Convert to loop once driver supports multiple devices. */
657 unregister_netdev(oaknet_dev);
658 free_irq(oaknet_devs->irq, oaknet_devs);
659 release_region(oaknet_devs->base_addr, OAKNET_IO_SIZE);
660 iounmap(ioaddr);
661 free_netdev(oaknet_devs);
662}
663
664module_init(oaknet_init);
665module_exit(oaknet_cleanup_module);
666MODULE_LICENSE("GPL");
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
new file mode 100644
index 000000000000..d670ac74824f
--- /dev/null
+++ b/drivers/net/pasemi_mac.c
@@ -0,0 +1,1019 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <asm/dma-mapping.h>
29#include <linux/in.h>
30#include <linux/skbuff.h>
31
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <net/checksum.h>
35
36#include "pasemi_mac.h"
37
38
39/* TODO list
40 *
41 * - Get rid of pci_{read,write}_config(), map registers with ioremap
42 * for performance
43 * - PHY support
44 * - Multicast support
45 * - Large MTU support
46 * - Other performance improvements
47 */
48
49
50/* Must be a power of two */
51#define RX_RING_SIZE 512
52#define TX_RING_SIZE 512
53
54#define TX_DESC(mac, num) ((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
55#define TX_DESC_INFO(mac, num) ((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
56#define RX_DESC(mac, num) ((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
57#define RX_DESC_INFO(mac, num) ((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
58#define RX_BUFF(mac, num) ((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
59
60#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
61
62/* XXXOJN these should come out of the device tree some day */
63#define PAS_DMA_CAP_BASE 0xe00d0040
64#define PAS_DMA_CAP_SIZE 0x100
65#define PAS_DMA_COM_BASE 0xe00d0100
66#define PAS_DMA_COM_SIZE 0x100
67
68static struct pasdma_status *dma_status;
69
70static int pasemi_get_mac_addr(struct pasemi_mac *mac)
71{
72 struct pci_dev *pdev = mac->pdev;
73 struct device_node *dn = pci_device_to_OF_node(pdev);
74 const u8 *maddr;
75 u8 addr[6];
76
77 if (!dn) {
78 dev_dbg(&pdev->dev,
79 "No device node for mac, not configuring\n");
80 return -ENOENT;
81 }
82
83 maddr = get_property(dn, "mac-address", NULL);
84 if (maddr == NULL) {
85 dev_warn(&pdev->dev,
86 "no mac address in device tree, not configuring\n");
87 return -ENOENT;
88 }
89
90 if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
91 &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
92 dev_warn(&pdev->dev,
93 "can't parse mac address, not configuring\n");
94 return -EINVAL;
95 }
96
97 memcpy(mac->mac_addr, addr, sizeof(addr));
98 return 0;
99}
100
101static int pasemi_mac_setup_rx_resources(struct net_device *dev)
102{
103 struct pasemi_mac_rxring *ring;
104 struct pasemi_mac *mac = netdev_priv(dev);
105 int chan_id = mac->dma_rxch;
106
107 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
108
109 if (!ring)
110 goto out_ring;
111
112 spin_lock_init(&ring->lock);
113
114 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
115 RX_RING_SIZE, GFP_KERNEL);
116
117 if (!ring->desc_info)
118 goto out_desc_info;
119
120 /* Allocate descriptors */
121 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
122 RX_RING_SIZE *
123 sizeof(struct pas_dma_xct_descr),
124 &ring->dma, GFP_KERNEL);
125
126 if (!ring->desc)
127 goto out_desc;
128
129 memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
130
131 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
132 RX_RING_SIZE * sizeof(u64),
133 &ring->buf_dma, GFP_KERNEL);
134 if (!ring->buffers)
135 goto out_buffers;
136
137 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
138
139 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
140 PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
141
142 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
143 PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
144 PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
145
146 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
147 PAS_DMA_RXCHAN_CFG_HBU(1));
148
149 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
150 PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
151
152 pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
153 PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
154 PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
155
156 ring->next_to_fill = 0;
157 ring->next_to_clean = 0;
158
159 snprintf(ring->irq_name, sizeof(ring->irq_name),
160 "%s rx", dev->name);
161 mac->rx = ring;
162
163 return 0;
164
165out_buffers:
166 dma_free_coherent(&mac->dma_pdev->dev,
167 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
168 mac->rx->desc, mac->rx->dma);
169out_desc:
170 kfree(ring->desc_info);
171out_desc_info:
172 kfree(ring);
173out_ring:
174 return -ENOMEM;
175}
176
177
178static int pasemi_mac_setup_tx_resources(struct net_device *dev)
179{
180 struct pasemi_mac *mac = netdev_priv(dev);
181 u32 val;
182 int chan_id = mac->dma_txch;
183 struct pasemi_mac_txring *ring;
184
185 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
186 if (!ring)
187 goto out_ring;
188
189 spin_lock_init(&ring->lock);
190
191 ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
192 TX_RING_SIZE, GFP_KERNEL);
193 if (!ring->desc_info)
194 goto out_desc_info;
195
196 /* Allocate descriptors */
197 ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
198 TX_RING_SIZE *
199 sizeof(struct pas_dma_xct_descr),
200 &ring->dma, GFP_KERNEL);
201 if (!ring->desc)
202 goto out_desc;
203
204 memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
205
206 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
207 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
208 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
209 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
210
211 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
212
213 pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
214 PAS_DMA_TXCHAN_CFG_TY_IFACE |
215 PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
216 PAS_DMA_TXCHAN_CFG_UP |
217 PAS_DMA_TXCHAN_CFG_WT(2));
218
219 ring->next_to_use = 0;
220 ring->next_to_clean = 0;
221
222 snprintf(ring->irq_name, sizeof(ring->irq_name),
223 "%s tx", dev->name);
224 mac->tx = ring;
225
226 return 0;
227
228out_desc:
229 kfree(ring->desc_info);
230out_desc_info:
231 kfree(ring);
232out_ring:
233 return -ENOMEM;
234}
235
236static void pasemi_mac_free_tx_resources(struct net_device *dev)
237{
238 struct pasemi_mac *mac = netdev_priv(dev);
239 unsigned int i;
240 struct pasemi_mac_buffer *info;
241 struct pas_dma_xct_descr *dp;
242
243 for (i = 0; i < TX_RING_SIZE; i++) {
244 info = &TX_DESC_INFO(mac, i);
245 dp = &TX_DESC(mac, i);
246 if (info->dma) {
247 if (info->skb) {
248 pci_unmap_single(mac->dma_pdev,
249 info->dma,
250 info->skb->len,
251 PCI_DMA_TODEVICE);
252 dev_kfree_skb_any(info->skb);
253 }
254 info->dma = 0;
255 info->skb = NULL;
256 dp->mactx = 0;
257 dp->ptr = 0;
258 }
259 }
260
261 dma_free_coherent(&mac->dma_pdev->dev,
262 TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
263 mac->tx->desc, mac->tx->dma);
264
265 kfree(mac->tx->desc_info);
266 kfree(mac->tx);
267 mac->tx = NULL;
268}
269
270static void pasemi_mac_free_rx_resources(struct net_device *dev)
271{
272 struct pasemi_mac *mac = netdev_priv(dev);
273 unsigned int i;
274 struct pasemi_mac_buffer *info;
275 struct pas_dma_xct_descr *dp;
276
277 for (i = 0; i < RX_RING_SIZE; i++) {
278 info = &RX_DESC_INFO(mac, i);
279 dp = &RX_DESC(mac, i);
280 if (info->dma) {
281 if (info->skb) {
282 pci_unmap_single(mac->dma_pdev,
283 info->dma,
284 info->skb->len,
285 PCI_DMA_FROMDEVICE);
286 dev_kfree_skb_any(info->skb);
287 }
288 info->dma = 0;
289 info->skb = NULL;
290 dp->macrx = 0;
291 dp->ptr = 0;
292 }
293 }
294
295 dma_free_coherent(&mac->dma_pdev->dev,
296 RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
297 mac->rx->desc, mac->rx->dma);
298
299 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
300 mac->rx->buffers, mac->rx->buf_dma);
301
302 kfree(mac->rx->desc_info);
303 kfree(mac->rx);
304 mac->rx = NULL;
305}
306
307static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
308{
309 struct pasemi_mac *mac = netdev_priv(dev);
310 unsigned int i;
311 int start = mac->rx->next_to_fill;
312 unsigned int count;
313
314 count = (mac->rx->next_to_clean + RX_RING_SIZE -
315 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
316
317 /* Check to see if we're doing first-time setup */
318 if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
319 count = RX_RING_SIZE;
320
321 if (count <= 0)
322 return;
323
324 for (i = start; i < start + count; i++) {
325 struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
326 u64 *buff = &RX_BUFF(mac, i);
327 struct sk_buff *skb;
328 dma_addr_t dma;
329
330 skb = dev_alloc_skb(BUF_SIZE);
331
332 if (!skb) {
333 count = i - start;
334 break;
335 }
336
337 skb->dev = dev;
338
339 dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
340 PCI_DMA_FROMDEVICE);
341
342 if (dma_mapping_error(dma)) {
343 dev_kfree_skb_irq(info->skb);
344 count = i - start;
345 break;
346 }
347
348 info->skb = skb;
349 info->dma = dma;
350 *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
351 }
352
353 wmb();
354
355 pci_write_config_dword(mac->dma_pdev,
356 PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
357 count);
358 pci_write_config_dword(mac->dma_pdev,
359 PAS_DMA_RXINT_INCR(mac->dma_if),
360 count);
361
362 mac->rx->next_to_fill += count;
363}
364
365static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
366{
367 unsigned int i;
368 int start, count;
369
370 spin_lock(&mac->rx->lock);
371
372 start = mac->rx->next_to_clean;
373 count = 0;
374
375 for (i = start; i < (start + RX_RING_SIZE) && count < limit; i++) {
376 struct pas_dma_xct_descr *dp;
377 struct pasemi_mac_buffer *info;
378 struct sk_buff *skb;
379 unsigned int j, len;
380 dma_addr_t dma;
381
382 rmb();
383
384 dp = &RX_DESC(mac, i);
385
386 if (!(dp->macrx & XCT_MACRX_O))
387 break;
388
389 count++;
390
391 info = NULL;
392
393 /* We have to scan for our skb since there's no way
394 * to back-map them from the descriptor, and if we
395 * have several receive channels then they might not
396 * show up in the same order as they were put on the
397 * interface ring.
398 */
399
400 dma = (dp->ptr & XCT_PTR_ADDR_M);
401 for (j = start; j < (start + RX_RING_SIZE); j++) {
402 info = &RX_DESC_INFO(mac, j);
403 if (info->dma == dma)
404 break;
405 }
406
407 BUG_ON(!info);
408 BUG_ON(info->dma != dma);
409
410 pci_unmap_single(mac->dma_pdev, info->dma, info->skb->len,
411 PCI_DMA_FROMDEVICE);
412
413 skb = info->skb;
414
415 len = (dp->macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
416
417 skb_put(skb, len);
418
419 skb->protocol = eth_type_trans(skb, mac->netdev);
420
421 if ((dp->macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
422 skb->ip_summed = CHECKSUM_COMPLETE;
423 skb->csum = (dp->macrx & XCT_MACRX_CSUM_M) >>
424 XCT_MACRX_CSUM_S;
425 } else
426 skb->ip_summed = CHECKSUM_NONE;
427
428 mac->stats.rx_bytes += len;
429 mac->stats.rx_packets++;
430
431 netif_receive_skb(skb);
432
433 info->dma = 0;
434 info->skb = NULL;
435 dp->ptr = 0;
436 dp->macrx = 0;
437 }
438
439 mac->rx->next_to_clean += count;
440 pasemi_mac_replenish_rx_ring(mac->netdev);
441
442 spin_unlock(&mac->rx->lock);
443
444 return count;
445}
446
447static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
448{
449 int i;
450 struct pasemi_mac_buffer *info;
451 struct pas_dma_xct_descr *dp;
452 int start, count;
453 int flags;
454
455 spin_lock_irqsave(&mac->tx->lock, flags);
456
457 start = mac->tx->next_to_clean;
458 count = 0;
459
460 for (i = start; i < mac->tx->next_to_use; i++) {
461 dp = &TX_DESC(mac, i);
462 if (!dp || (dp->mactx & XCT_MACTX_O))
463 break;
464
465 count++;
466
467 info = &TX_DESC_INFO(mac, i);
468
469 pci_unmap_single(mac->dma_pdev, info->dma,
470 info->skb->len, PCI_DMA_TODEVICE);
471 dev_kfree_skb_irq(info->skb);
472
473 info->skb = NULL;
474 info->dma = 0;
475 dp->mactx = 0;
476 dp->ptr = 0;
477 }
478 mac->tx->next_to_clean += count;
479 spin_unlock_irqrestore(&mac->tx->lock, flags);
480
481 return count;
482}
483
484
485static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
486{
487 struct net_device *dev = data;
488 struct pasemi_mac *mac = netdev_priv(dev);
489 unsigned int reg;
490
491 if (!(*mac->rx_status & PAS_STATUS_INT))
492 return IRQ_NONE;
493
494 netif_rx_schedule(dev);
495 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
496 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0));
497
498 reg = PAS_IOB_DMA_RXCH_RESET_PINTC | PAS_IOB_DMA_RXCH_RESET_SINTC |
499 PAS_IOB_DMA_RXCH_RESET_DINTC;
500 if (*mac->rx_status & PAS_STATUS_TIMER)
501 reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
502
503 pci_write_config_dword(mac->iob_pdev,
504 PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
505
506
507 return IRQ_HANDLED;
508}
509
510static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
511{
512 struct net_device *dev = data;
513 struct pasemi_mac *mac = netdev_priv(dev);
514 unsigned int reg;
515 int was_full;
516
517 was_full = mac->tx->next_to_clean - mac->tx->next_to_use == TX_RING_SIZE;
518
519 if (!(*mac->tx_status & PAS_STATUS_INT))
520 return IRQ_NONE;
521
522 pasemi_mac_clean_tx(mac);
523
524 reg = PAS_IOB_DMA_TXCH_RESET_PINTC | PAS_IOB_DMA_TXCH_RESET_SINTC;
525 if (*mac->tx_status & PAS_STATUS_TIMER)
526 reg |= PAS_IOB_DMA_TXCH_RESET_TINTC;
527
528 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
529 reg);
530
531 if (was_full)
532 netif_wake_queue(dev);
533
534 return IRQ_HANDLED;
535}
536
537static int pasemi_mac_open(struct net_device *dev)
538{
539 struct pasemi_mac *mac = netdev_priv(dev);
540 unsigned int flags;
541 int ret;
542
543 /* enable rx section */
544 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
545 PAS_DMA_COM_RXCMD_EN);
546
547 /* enable tx section */
548 pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
549 PAS_DMA_COM_TXCMD_EN);
550
551 flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
552 PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
553 PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
554
555 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
556
557 flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
558 PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
559
560 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
561
562 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
563 PAS_IOB_DMA_RXCH_CFG_CNTTH(30));
564
565 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
566 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
567
568 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
569
570 ret = pasemi_mac_setup_rx_resources(dev);
571 if (ret)
572 goto out_rx_resources;
573
574 ret = pasemi_mac_setup_tx_resources(dev);
575 if (ret)
576 goto out_tx_resources;
577
578 pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
579 PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
580 PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
581
582 /* enable rx if */
583 pci_write_config_dword(mac->dma_pdev,
584 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
585 PAS_DMA_RXINT_RCMDSTA_EN);
586
587 /* enable rx channel */
588 pci_write_config_dword(mac->dma_pdev,
589 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
590 PAS_DMA_RXCHAN_CCMDSTA_EN |
591 PAS_DMA_RXCHAN_CCMDSTA_DU);
592
593 /* enable tx channel */
594 pci_write_config_dword(mac->dma_pdev,
595 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
596 PAS_DMA_TXCHAN_TCMDSTA_EN);
597
598 pasemi_mac_replenish_rx_ring(dev);
599
600 netif_start_queue(dev);
601 netif_poll_enable(dev);
602
603 ret = request_irq(mac->dma_pdev->irq + mac->dma_txch,
604 &pasemi_mac_tx_intr, IRQF_DISABLED,
605 mac->tx->irq_name, dev);
606 if (ret) {
607 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
608 mac->dma_pdev->irq + mac->dma_txch, ret);
609 goto out_tx_int;
610 }
611
612 ret = request_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch,
613 &pasemi_mac_rx_intr, IRQF_DISABLED,
614 mac->rx->irq_name, dev);
615 if (ret) {
616 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
617 mac->dma_pdev->irq + 20 + mac->dma_rxch, ret);
618 goto out_rx_int;
619 }
620
621 return 0;
622
623out_rx_int:
624 free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
625out_tx_int:
626 netif_poll_disable(dev);
627 netif_stop_queue(dev);
628 pasemi_mac_free_tx_resources(dev);
629out_tx_resources:
630 pasemi_mac_free_rx_resources(dev);
631out_rx_resources:
632
633 return ret;
634}
635
636#define MAX_RETRIES 5000
637
638static int pasemi_mac_close(struct net_device *dev)
639{
640 struct pasemi_mac *mac = netdev_priv(dev);
641 unsigned int stat;
642 int retries;
643
644 netif_stop_queue(dev);
645
646 /* Clean out any pending buffers */
647 pasemi_mac_clean_tx(mac);
648 pasemi_mac_clean_rx(mac, RX_RING_SIZE);
649
650 /* Disable interface */
651 pci_write_config_dword(mac->dma_pdev,
652 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
653 PAS_DMA_TXCHAN_TCMDSTA_ST);
654 pci_write_config_dword(mac->dma_pdev,
655 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
656 PAS_DMA_RXINT_RCMDSTA_ST);
657 pci_write_config_dword(mac->dma_pdev,
658 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
659 PAS_DMA_RXCHAN_CCMDSTA_ST);
660
661 for (retries = 0; retries < MAX_RETRIES; retries++) {
662 pci_read_config_dword(mac->dma_pdev,
663 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
664 &stat);
665 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
666 break;
667 cond_resched();
668 }
669
670 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
671 dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
672 }
673
674 for (retries = 0; retries < MAX_RETRIES; retries++) {
675 pci_read_config_dword(mac->dma_pdev,
676 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
677 &stat);
678 if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
679 break;
680 cond_resched();
681 }
682
683 if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
684 dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
685 }
686
687 for (retries = 0; retries < MAX_RETRIES; retries++) {
688 pci_read_config_dword(mac->dma_pdev,
689 PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
690 &stat);
691 if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
692 break;
693 cond_resched();
694 }
695
696 if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT)) {
697 dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
698 }
699
700 /* Then, disable the channel. This must be done separately from
701 * stopping, since you can't disable when active.
702 */
703
704 pci_write_config_dword(mac->dma_pdev,
705 PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
706 pci_write_config_dword(mac->dma_pdev,
707 PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
708 pci_write_config_dword(mac->dma_pdev,
709 PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
710
711 free_irq(mac->dma_pdev->irq + mac->dma_txch, dev);
712 free_irq(mac->dma_pdev->irq + 20 + mac->dma_rxch, dev);
713
714 /* Free resources */
715 pasemi_mac_free_rx_resources(dev);
716 pasemi_mac_free_tx_resources(dev);
717
718 return 0;
719}
720
721static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
722{
723 struct pasemi_mac *mac = netdev_priv(dev);
724 struct pasemi_mac_txring *txring;
725 struct pasemi_mac_buffer *info;
726 struct pas_dma_xct_descr *dp;
727 u64 dflags;
728 dma_addr_t map;
729 int flags;
730
731 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
732
733 if (skb->ip_summed == CHECKSUM_PARTIAL) {
734 switch (skb->nh.iph->protocol) {
735 case IPPROTO_TCP:
736 dflags |= XCT_MACTX_CSUM_TCP;
737 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
738 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
739 break;
740 case IPPROTO_UDP:
741 dflags |= XCT_MACTX_CSUM_UDP;
742 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2);
743 dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data);
744 break;
745 }
746 }
747
748 map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
749
750 if (dma_mapping_error(map))
751 return NETDEV_TX_BUSY;
752
753 txring = mac->tx;
754
755 spin_lock_irqsave(&txring->lock, flags);
756
757 if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
758 spin_unlock_irqrestore(&txring->lock, flags);
759 pasemi_mac_clean_tx(mac);
760 spin_lock_irqsave(&txring->lock, flags);
761
762 if (txring->next_to_clean - txring->next_to_use ==
763 TX_RING_SIZE) {
764 /* Still no room -- stop the queue and wait for tx
765 * intr when there's room.
766 */
767 netif_stop_queue(dev);
768 goto out_err;
769 }
770 }
771
772
773 dp = &TX_DESC(mac, txring->next_to_use);
774 info = &TX_DESC_INFO(mac, txring->next_to_use);
775
776 dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
777 dp->ptr = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
778 info->dma = map;
779 info->skb = skb;
780
781 txring->next_to_use++;
782 mac->stats.tx_packets++;
783 mac->stats.tx_bytes += skb->len;
784
785 spin_unlock_irqrestore(&txring->lock, flags);
786
787 pci_write_config_dword(mac->dma_pdev,
788 PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
789
790 return NETDEV_TX_OK;
791
792out_err:
793 spin_unlock_irqrestore(&txring->lock, flags);
794 pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
795 return NETDEV_TX_BUSY;
796}
797
798static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
799{
800 struct pasemi_mac *mac = netdev_priv(dev);
801
802 return &mac->stats;
803}
804
805static void pasemi_mac_set_rx_mode(struct net_device *dev)
806{
807 struct pasemi_mac *mac = netdev_priv(dev);
808 unsigned int flags;
809
810 pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
811
812 /* Set promiscuous */
813 if (dev->flags & IFF_PROMISC)
814 flags |= PAS_MAC_CFG_PCFG_PR;
815 else
816 flags &= ~PAS_MAC_CFG_PCFG_PR;
817
818 pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
819}
820
821
822static int pasemi_mac_poll(struct net_device *dev, int *budget)
823{
824 int pkts, limit = min(*budget, dev->quota);
825 struct pasemi_mac *mac = netdev_priv(dev);
826
827 pkts = pasemi_mac_clean_rx(mac, limit);
828
829 if (pkts < limit) {
830 /* all done, no more packets present */
831 netif_rx_complete(dev);
832
833 /* re-enable receive interrupts */
834 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
835 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(1000000));
836 return 0;
837 } else {
838 /* used up our quantum, so reschedule */
839 dev->quota -= pkts;
840 *budget -= pkts;
841 return 1;
842 }
843}
844
845static int __devinit
846pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
847{
848 static int index = 0;
849 struct net_device *dev;
850 struct pasemi_mac *mac;
851 int err;
852
853 err = pci_enable_device(pdev);
854 if (err)
855 return err;
856
857 dev = alloc_etherdev(sizeof(struct pasemi_mac));
858 if (dev == NULL) {
859 dev_err(&pdev->dev,
860 "pasemi_mac: Could not allocate ethernet device.\n");
861 err = -ENOMEM;
862 goto out_disable_device;
863 }
864
865 SET_MODULE_OWNER(dev);
866 pci_set_drvdata(pdev, dev);
867 SET_NETDEV_DEV(dev, &pdev->dev);
868
869 mac = netdev_priv(dev);
870
871 mac->pdev = pdev;
872 mac->netdev = dev;
873 mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
874
875 if (!mac->dma_pdev) {
876 dev_err(&pdev->dev, "Can't find DMA Controller\n");
877 err = -ENODEV;
878 goto out_free_netdev;
879 }
880
881 mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
882
883 if (!mac->iob_pdev) {
884 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
885 err = -ENODEV;
886 goto out_put_dma_pdev;
887 }
888
889 /* These should come out of the device tree eventually */
890 mac->dma_txch = index;
891 mac->dma_rxch = index;
892
893 /* We probe GMAC before XAUI, but the DMA interfaces are
894 * in XAUI, GMAC order.
895 */
896 if (index < 4)
897 mac->dma_if = index + 2;
898 else
899 mac->dma_if = index - 4;
900 index++;
901
902 switch (pdev->device) {
903 case 0xa005:
904 mac->type = MAC_TYPE_GMAC;
905 break;
906 case 0xa006:
907 mac->type = MAC_TYPE_XAUI;
908 break;
909 default:
910 err = -ENODEV;
911 goto out;
912 }
913
914 /* get mac addr from device tree */
915 if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
916 err = -ENODEV;
917 goto out;
918 }
919 memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
920
921 dev->open = pasemi_mac_open;
922 dev->stop = pasemi_mac_close;
923 dev->hard_start_xmit = pasemi_mac_start_tx;
924 dev->get_stats = pasemi_mac_get_stats;
925 dev->set_multicast_list = pasemi_mac_set_rx_mode;
926 dev->weight = 64;
927 dev->poll = pasemi_mac_poll;
928 dev->features = NETIF_F_HW_CSUM;
929
930 /* The dma status structure is located in the I/O bridge, and
931 * is cache coherent.
932 */
933 if (!dma_status)
934 /* XXXOJN This should come from the device tree */
935 dma_status = __ioremap(0xfd800000, 0x1000, 0);
936
937 mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
938 mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
939
940 err = register_netdev(dev);
941
942 if (err) {
943 dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
944 err);
945 goto out;
946 } else
947 printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
948 "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
949 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
950 mac->dma_if, mac->dma_txch, mac->dma_rxch,
951 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
952 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
953
954 return err;
955
956out:
957 pci_dev_put(mac->iob_pdev);
958out_put_dma_pdev:
959 pci_dev_put(mac->dma_pdev);
960out_free_netdev:
961 free_netdev(dev);
962out_disable_device:
963 pci_disable_device(pdev);
964 return err;
965
966}
967
968static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
969{
970 struct net_device *netdev = pci_get_drvdata(pdev);
971 struct pasemi_mac *mac;
972
973 if (!netdev)
974 return;
975
976 mac = netdev_priv(netdev);
977
978 unregister_netdev(netdev);
979
980 pci_disable_device(pdev);
981 pci_dev_put(mac->dma_pdev);
982 pci_dev_put(mac->iob_pdev);
983
984 pci_set_drvdata(pdev, NULL);
985 free_netdev(netdev);
986}
987
988static struct pci_device_id pasemi_mac_pci_tbl[] = {
989 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
990 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
991};
992
993MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
994
995static struct pci_driver pasemi_mac_driver = {
996 .name = "pasemi_mac",
997 .id_table = pasemi_mac_pci_tbl,
998 .probe = pasemi_mac_probe,
999 .remove = __devexit_p(pasemi_mac_remove),
1000};
1001
1002static void __exit pasemi_mac_cleanup_module(void)
1003{
1004 pci_unregister_driver(&pasemi_mac_driver);
1005 __iounmap(dma_status);
1006 dma_status = NULL;
1007}
1008
1009int pasemi_mac_init_module(void)
1010{
1011 return pci_register_driver(&pasemi_mac_driver);
1012}
1013
1014MODULE_LICENSE("GPL");
1015MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
1016MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
1017
1018module_init(pasemi_mac_init_module);
1019module_exit(pasemi_mac_cleanup_module);
diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h
new file mode 100644
index 000000000000..c3e37e46a18a
--- /dev/null
+++ b/drivers/net/pasemi_mac.h
@@ -0,0 +1,460 @@
1/*
2 * Copyright (C) 2006 PA Semi, Inc
3 *
4 * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and
5 * hardware register layouts.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef PASEMI_MAC_H
22#define PASEMI_MAC_H
23
24#include <linux/ethtool.h>
25#include <linux/netdevice.h>
26#include <linux/spinlock.h>
27
28struct pasemi_mac_txring {
29 spinlock_t lock;
30 struct pas_dma_xct_descr *desc;
31 dma_addr_t dma;
32 unsigned int size;
33 unsigned int next_to_use;
34 unsigned int next_to_clean;
35 struct pasemi_mac_buffer *desc_info;
36 char irq_name[10]; /* "eth%d tx" */
37};
38
39struct pasemi_mac_rxring {
40 spinlock_t lock;
41 struct pas_dma_xct_descr *desc; /* RX channel descriptor ring */
42 dma_addr_t dma;
43 u64 *buffers; /* RX interface buffer ring */
44 dma_addr_t buf_dma;
45 unsigned int size;
46 unsigned int next_to_fill;
47 unsigned int next_to_clean;
48 struct pasemi_mac_buffer *desc_info;
49 char irq_name[10]; /* "eth%d rx" */
50};
51
52struct pasemi_mac {
53 struct net_device *netdev;
54 struct pci_dev *pdev;
55 struct pci_dev *dma_pdev;
56 struct pci_dev *iob_pdev;
57 struct net_device_stats stats;
58
59 /* Pointer to the cacheable per-channel status registers */
60 u64 *rx_status;
61 u64 *tx_status;
62
63 u8 type;
64#define MAC_TYPE_GMAC 1
65#define MAC_TYPE_XAUI 2
66 u32 dma_txch;
67 u32 dma_if;
68 u32 dma_rxch;
69
70 u8 mac_addr[6];
71
72 struct timer_list rxtimer;
73
74 struct pasemi_mac_txring *tx;
75 struct pasemi_mac_rxring *rx;
76};
77
78/* Software status descriptor (desc_info) */
79struct pasemi_mac_buffer {
80 struct sk_buff *skb;
81 dma_addr_t dma;
82};
83
84
85/* status register layout in IOB region, at 0xfb800000 */
86struct pasdma_status {
87 u64 rx_sta[64];
88 u64 tx_sta[20];
89};
90
91/* descriptor structure */
92struct pas_dma_xct_descr {
93 union {
94 u64 mactx;
95 u64 macrx;
96 };
97 union {
98 u64 ptr;
99 u64 rxb;
100 };
101};
102
103/* MAC CFG register offsets */
104
105enum {
106 PAS_MAC_CFG_PCFG = 0x80,
107 PAS_MAC_CFG_TXP = 0x98,
108 PAS_MAC_IPC_CHNL = 0x208,
109};
110
111/* MAC CFG register fields */
112#define PAS_MAC_CFG_PCFG_PE 0x80000000
113#define PAS_MAC_CFG_PCFG_CE 0x40000000
114#define PAS_MAC_CFG_PCFG_BU 0x20000000
115#define PAS_MAC_CFG_PCFG_TT 0x10000000
116#define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000
117#define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000
118#define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000
119#define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000
120#define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000
121#define PAS_MAC_CFG_PCFG_T24 0x02000000
122#define PAS_MAC_CFG_PCFG_PR 0x01000000
123#define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000
124#define PAS_MAC_CFG_PCFG_CRO_S 16
125#define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00
126#define PAS_MAC_CFG_PCFG_IPO_S 8
127#define PAS_MAC_CFG_PCFG_S1 0x00000080
128#define PAS_MAC_CFG_PCFG_IO_M 0x00000060
129#define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000
130#define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020
131#define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040
132#define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060
133#define PAS_MAC_CFG_PCFG_LP 0x00000010
134#define PAS_MAC_CFG_PCFG_TS 0x00000008
135#define PAS_MAC_CFG_PCFG_HD 0x00000004
136#define PAS_MAC_CFG_PCFG_SPD_M 0x00000003
137#define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000
138#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
139#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
140#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
141#define PAS_MAC_CFG_TXP_FCF 0x01000000
142#define PAS_MAC_CFG_TXP_FCE 0x00800000
143#define PAS_MAC_CFG_TXP_FC 0x00400000
144#define PAS_MAC_CFG_TXP_FPC_M 0x00300000
145#define PAS_MAC_CFG_TXP_FPC_S 20
146#define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \
147 PAS_MAC_CFG_TXP_FPC_M)
148#define PAS_MAC_CFG_TXP_RT 0x00080000
149#define PAS_MAC_CFG_TXP_BL 0x00040000
150#define PAS_MAC_CFG_TXP_SL_M 0x00030000
151#define PAS_MAC_CFG_TXP_SL_S 16
152#define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \
153 PAS_MAC_CFG_TXP_SL_M)
154#define PAS_MAC_CFG_TXP_COB_M 0x0000f000
155#define PAS_MAC_CFG_TXP_COB_S 12
156#define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \
157 PAS_MAC_CFG_TXP_COB_M)
158#define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00
159#define PAS_MAC_CFG_TXP_TIFT_S 8
160#define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \
161 PAS_MAC_CFG_TXP_TIFT_M)
162#define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff
163#define PAS_MAC_CFG_TXP_TIFG_S 0
164#define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \
165 PAS_MAC_CFG_TXP_TIFG_M)
166
167#define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000
168#define PAS_MAC_IPC_CHNL_DCHNO_S 16
169#define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \
170 PAS_MAC_IPC_CHNL_DCHNO_M)
171#define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f
172#define PAS_MAC_IPC_CHNL_BCH_S 0
173#define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \
174 PAS_MAC_IPC_CHNL_BCH_M)
175
176/* All these registers live in the PCI configuration space for the DMA PCI
177 * device. Use the normal PCI config access functions for them.
178 */
179enum {
180 PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
181 PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
182 PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
183 PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
184};
185#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
186#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
187#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
188#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
189
190
191/* Per-interface and per-channel registers */
192#define _PAS_DMA_RXINT_STRIDE 0x20
193#define PAS_DMA_RXINT_RCMDSTA(i) (0x200+(i)*_PAS_DMA_RXINT_STRIDE)
194#define PAS_DMA_RXINT_RCMDSTA_EN 0x00000001
195#define PAS_DMA_RXINT_RCMDSTA_ST 0x00000002
196#define PAS_DMA_RXINT_RCMDSTA_OO 0x00000100
197#define PAS_DMA_RXINT_RCMDSTA_BP 0x00000200
198#define PAS_DMA_RXINT_RCMDSTA_DR 0x00000400
199#define PAS_DMA_RXINT_RCMDSTA_BT 0x00000800
200#define PAS_DMA_RXINT_RCMDSTA_TB 0x00001000
201#define PAS_DMA_RXINT_RCMDSTA_ACT 0x00010000
202#define PAS_DMA_RXINT_RCMDSTA_DROPS_M 0xfffe0000
203#define PAS_DMA_RXINT_RCMDSTA_DROPS_S 17
204#define PAS_DMA_RXINT_INCR(i) (0x210+(i)*_PAS_DMA_RXINT_STRIDE)
205#define PAS_DMA_RXINT_INCR_INCR_M 0x0000ffff
206#define PAS_DMA_RXINT_INCR_INCR_S 0
207#define PAS_DMA_RXINT_INCR_INCR(x) ((x) & 0x0000ffff)
208#define PAS_DMA_RXINT_BASEL(i) (0x218+(i)*_PAS_DMA_RXINT_STRIDE)
209#define PAS_DMA_RXINT_BASEL_BRBL(x) ((x) & ~0x3f)
210#define PAS_DMA_RXINT_BASEU(i) (0x21c+(i)*_PAS_DMA_RXINT_STRIDE)
211#define PAS_DMA_RXINT_BASEU_BRBH(x) ((x) & 0xfff)
212#define PAS_DMA_RXINT_BASEU_SIZ_M 0x3fff0000 /* # of cache lines worth of buffer ring */
213#define PAS_DMA_RXINT_BASEU_SIZ_S 16 /* 0 = 16K */
214#define PAS_DMA_RXINT_BASEU_SIZ(x) (((x) << PAS_DMA_RXINT_BASEU_SIZ_S) & \
215 PAS_DMA_RXINT_BASEU_SIZ_M)
216
217
218#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
219#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
220#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
221#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
222#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
223#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
224#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
225#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
226#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
227#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
228#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
229#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
230#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
231#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
232#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
233#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
234#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
235 PAS_DMA_TXCHAN_CFG_TATTR_M)
236#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
237#define PAS_DMA_TXCHAN_CFG_WT_S 6
238#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
239 PAS_DMA_TXCHAN_CFG_WT_M)
240#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
241#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
242#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
243#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
244#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
245#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
246#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
247#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
248 PAS_DMA_TXCHAN_BASEL_BRBL_M)
249#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
250#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
251#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
252#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
253 PAS_DMA_TXCHAN_BASEU_BRBH_M)
254/* # of cache lines worth of buffer ring */
255#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
256#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
257#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
258 PAS_DMA_TXCHAN_BASEU_SIZ_M)
259
260#define _PAS_DMA_RXCHAN_STRIDE 0x20 /* Size per channel */
261#define _PAS_DMA_RXCHAN_CCMDSTA 0x800 /* Command / Status */
262#define _PAS_DMA_RXCHAN_CFG 0x804 /* Configuration */
263#define _PAS_DMA_RXCHAN_INCR 0x810 /* Descriptor increment */
264#define _PAS_DMA_RXCHAN_CNT 0x814 /* Descriptor count/offset */
265#define _PAS_DMA_RXCHAN_BASEL 0x818 /* Descriptor ring base (low) */
266#define _PAS_DMA_RXCHAN_BASEU 0x81c /* (high) */
267#define PAS_DMA_RXCHAN_CCMDSTA(c) (0x800+(c)*_PAS_DMA_RXCHAN_STRIDE)
268#define PAS_DMA_RXCHAN_CCMDSTA_EN 0x00000001 /* Enabled */
269#define PAS_DMA_RXCHAN_CCMDSTA_ST 0x00000002 /* Stop interface */
270#define PAS_DMA_RXCHAN_CCMDSTA_ACT 0x00010000 /* Active */
271#define PAS_DMA_RXCHAN_CCMDSTA_DU 0x00020000
272#define PAS_DMA_RXCHAN_CFG(c) (0x804+(c)*_PAS_DMA_RXCHAN_STRIDE)
273#define PAS_DMA_RXCHAN_CFG_HBU_M 0x00000380
274#define PAS_DMA_RXCHAN_CFG_HBU_S 7
275#define PAS_DMA_RXCHAN_CFG_HBU(x) (((x) << PAS_DMA_RXCHAN_CFG_HBU_S) & \
276 PAS_DMA_RXCHAN_CFG_HBU_M)
277#define PAS_DMA_RXCHAN_INCR(c) (0x810+(c)*_PAS_DMA_RXCHAN_STRIDE)
278#define PAS_DMA_RXCHAN_BASEL(c) (0x818+(c)*_PAS_DMA_RXCHAN_STRIDE)
279#define PAS_DMA_RXCHAN_BASEL_BRBL_M 0xffffffc0
280#define PAS_DMA_RXCHAN_BASEL_BRBL_S 0
281#define PAS_DMA_RXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_RXCHAN_BASEL_BRBL_S) & \
282 PAS_DMA_RXCHAN_BASEL_BRBL_M)
283#define PAS_DMA_RXCHAN_BASEU(c) (0x81c+(c)*_PAS_DMA_RXCHAN_STRIDE)
284#define PAS_DMA_RXCHAN_BASEU_BRBH_M 0x00000fff
285#define PAS_DMA_RXCHAN_BASEU_BRBH_S 0
286#define PAS_DMA_RXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_RXCHAN_BASEU_BRBH_S) & \
287 PAS_DMA_RXCHAN_BASEU_BRBH_M)
288/* # of cache lines worth of buffer ring */
289#define PAS_DMA_RXCHAN_BASEU_SIZ_M 0x3fff0000
290#define PAS_DMA_RXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
291#define PAS_DMA_RXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_RXCHAN_BASEU_SIZ_S) & \
292 PAS_DMA_RXCHAN_BASEU_SIZ_M)
293
294#define PAS_STATUS_PCNT_M 0x000000000000ffffull
295#define PAS_STATUS_PCNT_S 0
296#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
297#define PAS_STATUS_DCNT_S 16
298#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
299#define PAS_STATUS_BPCNT_S 32
300#define PAS_STATUS_TIMER 0x1000000000000000ull
301#define PAS_STATUS_ERROR 0x2000000000000000ull
302#define PAS_STATUS_SOFT 0x4000000000000000ull
303#define PAS_STATUS_INT 0x8000000000000000ull
304
305#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
306#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
307#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
308#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
309 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
310#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
311#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
312#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
313#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
314 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
315#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
316#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
317#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
318#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
319#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
320 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
321#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
322#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
323#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
324#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
325#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
326 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
327#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
328#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
329#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 0
330#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
331 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
332#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
333#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
334#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
335#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
336#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
337#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
338#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
339#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
340#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 0
341#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
342 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
343#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
344#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
345#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
346#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
347#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
348#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
349
350#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
351#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
352#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
353#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
354 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
355
356/* Transmit descriptor fields */
357#define XCT_MACTX_T 0x8000000000000000ull
358#define XCT_MACTX_ST 0x4000000000000000ull
359#define XCT_MACTX_NORES 0x0000000000000000ull
360#define XCT_MACTX_8BRES 0x1000000000000000ull
361#define XCT_MACTX_24BRES 0x2000000000000000ull
362#define XCT_MACTX_40BRES 0x3000000000000000ull
363#define XCT_MACTX_I 0x0800000000000000ull
364#define XCT_MACTX_O 0x0400000000000000ull
365#define XCT_MACTX_E 0x0200000000000000ull
366#define XCT_MACTX_VLAN_M 0x0180000000000000ull
367#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
368#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
369#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
370#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
371#define XCT_MACTX_CRC_M 0x0060000000000000ull
372#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
373#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
374#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
375#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
376#define XCT_MACTX_SS 0x0010000000000000ull
377#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
378#define XCT_MACTX_LLEN_S 32ull
379#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
380 XCT_MACTX_LLEN_M)
381#define XCT_MACTX_IPH_M 0x00000000f8000000ull
382#define XCT_MACTX_IPH_S 27ull
383#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
384 XCT_MACTX_IPH_M)
385#define XCT_MACTX_IPO_M 0x0000000007c00000ull
386#define XCT_MACTX_IPO_S 22ull
387#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
388 XCT_MACTX_IPO_M)
389#define XCT_MACTX_CSUM_M 0x0000000000000060ull
390#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
391#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
392#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
393#define XCT_MACTX_V6 0x0000000000000010ull
394#define XCT_MACTX_C 0x0000000000000004ull
395#define XCT_MACTX_AL2 0x0000000000000002ull
396
397/* Receive descriptor fields */
398#define XCT_MACRX_T 0x8000000000000000ull
399#define XCT_MACRX_ST 0x4000000000000000ull
400#define XCT_MACRX_NORES 0x0000000000000000ull
401#define XCT_MACRX_8BRES 0x1000000000000000ull
402#define XCT_MACRX_24BRES 0x2000000000000000ull
403#define XCT_MACRX_40BRES 0x3000000000000000ull
404#define XCT_MACRX_O 0x0400000000000000ull
405#define XCT_MACRX_E 0x0200000000000000ull
406#define XCT_MACRX_FF 0x0100000000000000ull
407#define XCT_MACRX_PF 0x0080000000000000ull
408#define XCT_MACRX_OB 0x0040000000000000ull
409#define XCT_MACRX_OD 0x0020000000000000ull
410#define XCT_MACRX_FS 0x0010000000000000ull
411#define XCT_MACRX_NB_M 0x000fc00000000000ull
412#define XCT_MACRX_NB_S 46ULL
413#define XCT_MACRX_NB(x) ((((long)(x)) << XCT_MACRX_NB_S) & \
414 XCT_MACRX_NB_M)
415#define XCT_MACRX_LLEN_M 0x00003fff00000000ull
416#define XCT_MACRX_LLEN_S 32ULL
417#define XCT_MACRX_LLEN(x) ((((long)(x)) << XCT_MACRX_LLEN_S) & \
418 XCT_MACRX_LLEN_M)
419#define XCT_MACRX_CRC 0x0000000080000000ull
420#define XCT_MACRX_LEN_M 0x0000000060000000ull
421#define XCT_MACRX_LEN_TOOSHORT 0x0000000020000000ull
422#define XCT_MACRX_LEN_BELOWMIN 0x0000000040000000ull
423#define XCT_MACRX_LEN_TRUNC 0x0000000060000000ull
424#define XCT_MACRX_CAST_M 0x0000000018000000ull
425#define XCT_MACRX_CAST_UNI 0x0000000000000000ull
426#define XCT_MACRX_CAST_MULTI 0x0000000008000000ull
427#define XCT_MACRX_CAST_BROAD 0x0000000010000000ull
428#define XCT_MACRX_CAST_PAUSE 0x0000000018000000ull
429#define XCT_MACRX_VLC_M 0x0000000006000000ull
430#define XCT_MACRX_FM 0x0000000001000000ull
431#define XCT_MACRX_HTY_M 0x0000000000c00000ull
432#define XCT_MACRX_HTY_IPV4_OK 0x0000000000000000ull
433#define XCT_MACRX_HTY_IPV6 0x0000000000400000ull
434#define XCT_MACRX_HTY_IPV4_BAD 0x0000000000800000ull
435#define XCT_MACRX_HTY_NONIP 0x0000000000c00000ull
436#define XCT_MACRX_IPP_M 0x00000000003f0000ull
437#define XCT_MACRX_IPP_S 16
438#define XCT_MACRX_CSUM_M 0x000000000000ffffull
439#define XCT_MACRX_CSUM_S 0
440
441#define XCT_PTR_T 0x8000000000000000ull
442#define XCT_PTR_LEN_M 0x7ffff00000000000ull
443#define XCT_PTR_LEN_S 44
444#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
445 XCT_PTR_LEN_M)
446#define XCT_PTR_ADDR_M 0x00000fffffffffffull
447#define XCT_PTR_ADDR_S 0
448#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
449 XCT_PTR_ADDR_M)
450
451/* Receive interface buffer fields */
452#define XCT_RXB_LEN_M 0x0ffff00000000000ull
453#define XCT_RXB_LEN_S 44
454#define XCT_RXB_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & XCT_PTR_LEN_M)
455#define XCT_RXB_ADDR_M 0x00000fffffffffffull
456#define XCT_RXB_ADDR_S 0
457#define XCT_RXB_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & XCT_PTR_ADDR_M)
458
459
460#endif /* PASEMI_MAC_H */
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8844c20eac2d..2429b274f0b0 100644..100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -22,6 +22,7 @@
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/ip.h> 24#include <linux/ip.h>
25#include <linux/in.h>
25#include <linux/if_arp.h> 26#include <linux/if_arp.h>
26#include <linux/if_ether.h> 27#include <linux/if_ether.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -63,6 +64,7 @@ MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63 64
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = { 65static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
66 /* required last entry */ 68 /* required last entry */
67 {0,} 69 {0,}
68}; 70};
@@ -1475,6 +1477,10 @@ static int ql_mii_setup(struct ql3_adapter *qdev)
1475 2) << 7)) 1477 2) << 7))
1476 return -1; 1478 return -1;
1477 1479
1480 if (qdev->device_id == QL3032_DEVICE_ID)
1481 ql_write_page0_reg(qdev,
1482 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1483
1478 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1484 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1479 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1485 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1480 1486
@@ -1706,18 +1712,42 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1706 struct ob_mac_iocb_rsp *mac_rsp) 1712 struct ob_mac_iocb_rsp *mac_rsp)
1707{ 1713{
1708 struct ql_tx_buf_cb *tx_cb; 1714 struct ql_tx_buf_cb *tx_cb;
1715 int i;
1709 1716
1710 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1717 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1711 pci_unmap_single(qdev->pdev, 1718 pci_unmap_single(qdev->pdev,
1712 pci_unmap_addr(tx_cb, mapaddr), 1719 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1713 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 1720 pci_unmap_len(&tx_cb->map[0], maplen),
1714 dev_kfree_skb_irq(tx_cb->skb); 1721 PCI_DMA_TODEVICE);
1722 tx_cb->seg_count--;
1723 if (tx_cb->seg_count) {
1724 for (i = 1; i < tx_cb->seg_count; i++) {
1725 pci_unmap_page(qdev->pdev,
1726 pci_unmap_addr(&tx_cb->map[i],
1727 mapaddr),
1728 pci_unmap_len(&tx_cb->map[i], maplen),
1729 PCI_DMA_TODEVICE);
1730 }
1731 }
1715 qdev->stats.tx_packets++; 1732 qdev->stats.tx_packets++;
1716 qdev->stats.tx_bytes += tx_cb->skb->len; 1733 qdev->stats.tx_bytes += tx_cb->skb->len;
1734 dev_kfree_skb_irq(tx_cb->skb);
1717 tx_cb->skb = NULL; 1735 tx_cb->skb = NULL;
1718 atomic_inc(&qdev->tx_count); 1736 atomic_inc(&qdev->tx_count);
1719} 1737}
1720 1738
1739/*
1740 * The difference between 3022 and 3032 for inbound completions:
1741 * 3022 uses two buffers per completion. The first buffer contains
1742 * (some) header info, the second the remainder of the headers plus
1743 * the data. For this chip we reserve some space at the top of the
1744 * receive buffer so that the header info in buffer one can be
1745 * prepended to the buffer two. Buffer two is the sent up while
1746 * buffer one is returned to the hardware to be reused.
1747 * 3032 receives all of it's data and headers in one buffer for a
1748 * simpler process. 3032 also supports checksum verification as
1749 * can be seen in ql_process_macip_rx_intr().
1750 */
1721static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 1751static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1722 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 1752 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1723{ 1753{
@@ -1740,14 +1770,17 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1740 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1770 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1741 qdev->small_buf_release_cnt++; 1771 qdev->small_buf_release_cnt++;
1742 1772
1743 /* start of first buffer */ 1773 if (qdev->device_id == QL3022_DEVICE_ID) {
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1774 /* start of first buffer (3022 only) */
1745 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1775 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1746 qdev->lrg_buf_release_cnt++; 1776 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1747 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1777 qdev->lrg_buf_release_cnt++;
1748 qdev->lrg_buf_index = 0; 1778 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) {
1749 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1779 qdev->lrg_buf_index = 0;
1750 curr_ial_ptr++; 1780 }
1781 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1782 curr_ial_ptr++;
1783 }
1751 1784
1752 /* start of second buffer */ 1785 /* start of second buffer */
1753 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1786 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1778,7 +1811,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1778 qdev->ndev->last_rx = jiffies; 1811 qdev->ndev->last_rx = jiffies;
1779 lrg_buf_cb2->skb = NULL; 1812 lrg_buf_cb2->skb = NULL;
1780 1813
1781 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1814 if (qdev->device_id == QL3022_DEVICE_ID)
1815 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1782 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1816 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1783} 1817}
1784 1818
@@ -1790,7 +1824,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1790 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 1824 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1791 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 1825 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1792 u32 *curr_ial_ptr; 1826 u32 *curr_ial_ptr;
1793 struct sk_buff *skb1, *skb2; 1827 struct sk_buff *skb1 = NULL, *skb2;
1794 struct net_device *ndev = qdev->ndev; 1828 struct net_device *ndev = qdev->ndev;
1795 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 1829 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1796 u16 size = 0; 1830 u16 size = 0;
@@ -1806,16 +1840,20 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1806 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset; 1840 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1807 qdev->small_buf_release_cnt++; 1841 qdev->small_buf_release_cnt++;
1808 1842
1809 /* start of first buffer */ 1843 if (qdev->device_id == QL3022_DEVICE_ID) {
1810 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1844 /* start of first buffer on 3022 */
1811 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index]; 1845 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1812 1846 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1813 qdev->lrg_buf_release_cnt++; 1847 qdev->lrg_buf_release_cnt++;
1814 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1848 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1815 qdev->lrg_buf_index = 0; 1849 qdev->lrg_buf_index = 0;
1816 skb1 = lrg_buf_cb1->skb; 1850 skb1 = lrg_buf_cb1->skb;
1817 curr_ial_ptr++; /* 64-bit pointers require two incs. */ 1851 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1818 curr_ial_ptr++; 1852 curr_ial_ptr++;
1853 size = ETH_HLEN;
1854 if (*((u16 *) skb1->data) != 0xFFFF)
1855 size += VLAN_ETH_HLEN - ETH_HLEN;
1856 }
1819 1857
1820 /* start of second buffer */ 1858 /* start of second buffer */
1821 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr); 1859 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
@@ -1825,18 +1863,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1825 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS) 1863 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1826 qdev->lrg_buf_index = 0; 1864 qdev->lrg_buf_index = 0;
1827 1865
1828 qdev->stats.rx_packets++;
1829 qdev->stats.rx_bytes += length;
1830
1831 /*
1832 * Copy the ethhdr from first buffer to second. This
1833 * is necessary for IP completions.
1834 */
1835 if (*((u16 *) skb1->data) != 0xFFFF)
1836 size = VLAN_ETH_HLEN;
1837 else
1838 size = ETH_HLEN;
1839
1840 skb_put(skb2, length); /* Just the second buffer length here. */ 1866 skb_put(skb2, length); /* Just the second buffer length here. */
1841 pci_unmap_single(qdev->pdev, 1867 pci_unmap_single(qdev->pdev,
1842 pci_unmap_addr(lrg_buf_cb2, mapaddr), 1868 pci_unmap_addr(lrg_buf_cb2, mapaddr),
@@ -1844,16 +1870,40 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1844 PCI_DMA_FROMDEVICE); 1870 PCI_DMA_FROMDEVICE);
1845 prefetch(skb2->data); 1871 prefetch(skb2->data);
1846 1872
1847 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1848 skb2->dev = qdev->ndev;
1849 skb2->ip_summed = CHECKSUM_NONE; 1873 skb2->ip_summed = CHECKSUM_NONE;
1874 if (qdev->device_id == QL3022_DEVICE_ID) {
1875 /*
1876 * Copy the ethhdr from first buffer to second. This
1877 * is necessary for 3022 IP completions.
1878 */
1879 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1880 } else {
1881 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1882 if (checksum &
1883 (IB_IP_IOCB_RSP_3032_ICE |
1884 IB_IP_IOCB_RSP_3032_CE |
1885 IB_IP_IOCB_RSP_3032_NUC)) {
1886 printk(KERN_ERR
1887 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1888 __func__,
1889 ((checksum &
1890 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
1891 "UDP"),checksum);
1892 } else if (checksum & IB_IP_IOCB_RSP_3032_TCP) {
1893 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1894 }
1895 }
1896 skb2->dev = qdev->ndev;
1850 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 1897 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1851 1898
1852 netif_receive_skb(skb2); 1899 netif_receive_skb(skb2);
1900 qdev->stats.rx_packets++;
1901 qdev->stats.rx_bytes += length;
1853 ndev->last_rx = jiffies; 1902 ndev->last_rx = jiffies;
1854 lrg_buf_cb2->skb = NULL; 1903 lrg_buf_cb2->skb = NULL;
1855 1904
1856 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 1905 if (qdev->device_id == QL3022_DEVICE_ID)
1906 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1857 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 1907 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1858} 1908}
1859 1909
@@ -1880,12 +1930,14 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1880 break; 1930 break;
1881 1931
1882 case OPCODE_IB_MAC_IOCB: 1932 case OPCODE_IB_MAC_IOCB:
1933 case OPCODE_IB_3032_MAC_IOCB:
1883 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 1934 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1884 net_rsp); 1935 net_rsp);
1885 (*rx_cleaned)++; 1936 (*rx_cleaned)++;
1886 break; 1937 break;
1887 1938
1888 case OPCODE_IB_IP_IOCB: 1939 case OPCODE_IB_IP_IOCB:
1940 case OPCODE_IB_3032_IP_IOCB:
1889 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 1941 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1890 net_rsp); 1942 net_rsp);
1891 (*rx_cleaned)++; 1943 (*rx_cleaned)++;
@@ -2032,13 +2084,96 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2032 return IRQ_RETVAL(handled); 2084 return IRQ_RETVAL(handled);
2033} 2085}
2034 2086
2087/*
2088 * Get the total number of segments needed for the
2089 * given number of fragments. This is necessary because
2090 * outbound address lists (OAL) will be used when more than
2091 * two frags are given. Each address list has 5 addr/len
2092 * pairs. The 5th pair in each AOL is used to point to
2093 * the next AOL if more frags are coming.
2094 * That is why the frags:segment count ratio is not linear.
2095 */
2096static int ql_get_seg_count(unsigned short frags)
2097{
2098 switch(frags) {
2099 case 0: return 1; /* just the skb->data seg */
2100 case 1: return 2; /* skb->data + 1 frag */
2101 case 2: return 3; /* skb->data + 2 frags */
2102 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2103 case 4: return 6;
2104 case 5: return 7;
2105 case 6: return 8;
2106 case 7: return 10;
2107 case 8: return 11;
2108 case 9: return 12;
2109 case 10: return 13;
2110 case 11: return 15;
2111 case 12: return 16;
2112 case 13: return 17;
2113 case 14: return 18;
2114 case 15: return 20;
2115 case 16: return 21;
2116 case 17: return 22;
2117 case 18: return 23;
2118 }
2119 return -1;
2120}
2121
2122static void ql_hw_csum_setup(struct sk_buff *skb,
2123 struct ob_mac_iocb_req *mac_iocb_ptr)
2124{
2125 struct ethhdr *eth;
2126 struct iphdr *ip = NULL;
2127 u8 offset = ETH_HLEN;
2128
2129 eth = (struct ethhdr *)(skb->data);
2130
2131 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2132 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2133 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2134 ((struct vlan_ethhdr *)skb->data)->
2135 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2136 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2137 offset = VLAN_ETH_HLEN;
2138 }
2139
2140 if (ip) {
2141 if (ip->protocol == IPPROTO_TCP) {
2142 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC;
2143 mac_iocb_ptr->ip_hdr_off = offset;
2144 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2145 } else if (ip->protocol == IPPROTO_UDP) {
2146 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC;
2147 mac_iocb_ptr->ip_hdr_off = offset;
2148 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2149 }
2150 }
2151}
2152
2153/*
2154 * The difference between 3022 and 3032 sends:
2155 * 3022 only supports a simple single segment transmission.
2156 * 3032 supports checksumming and scatter/gather lists (fragments).
2157 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2158 * in the IOCB plus a chain of outbound address lists (OAL) that
2159 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2160 * will used to point to an OAL when more ALP entries are required.
2161 * The IOCB is always the top of the chain followed by one or more
2162 * OALs (when necessary).
2163 */
2035static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2164static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2036{ 2165{
2037 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2166 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2038 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2167 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2039 struct ql_tx_buf_cb *tx_cb; 2168 struct ql_tx_buf_cb *tx_cb;
2169 u32 tot_len = skb->len;
2170 struct oal *oal;
2171 struct oal_entry *oal_entry;
2172 int len;
2040 struct ob_mac_iocb_req *mac_iocb_ptr; 2173 struct ob_mac_iocb_req *mac_iocb_ptr;
2041 u64 map; 2174 u64 map;
2175 int seg_cnt, seg = 0;
2176 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2042 2177
2043 if (unlikely(atomic_read(&qdev->tx_count) < 2)) { 2178 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2044 if (!netif_queue_stopped(ndev)) 2179 if (!netif_queue_stopped(ndev))
@@ -2046,21 +2181,79 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2046 return NETDEV_TX_BUSY; 2181 return NETDEV_TX_BUSY;
2047 } 2182 }
2048 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ; 2183 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2184 seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
2185 if(seg_cnt == -1) {
2186 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2187 return NETDEV_TX_OK;
2188
2189 }
2049 mac_iocb_ptr = tx_cb->queue_entry; 2190 mac_iocb_ptr = tx_cb->queue_entry;
2050 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2191 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2051 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2192 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2052 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2193 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2053 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2194 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2054 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len); 2195 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2055 tx_cb->skb = skb; 2196 tx_cb->skb = skb;
2056 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2057 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map)); 2198 ql_hw_csum_setup(skb, mac_iocb_ptr);
2058 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map)); 2199 len = skb_headlen(skb);
2059 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E); 2200 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2060 pci_unmap_addr_set(tx_cb, mapaddr, map); 2201 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2061 pci_unmap_len_set(tx_cb, maplen, skb->len); 2202 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2062 atomic_dec(&qdev->tx_count); 2203 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2204 oal_entry->len = cpu_to_le32(len);
2205 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2206 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2207 seg++;
2208
2209 if (!skb_shinfo(skb)->nr_frags) {
2210 /* Terminate the last segment. */
2211 oal_entry->len =
2212 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2213 } else {
2214 int i;
2215 oal = tx_cb->oal;
2216 for (i=0; i<frag_cnt; i++,seg++) {
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2218 oal_entry++;
2219 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2220 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2221 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2222 (seg == 17 && seg_cnt > 18)) {
2223 /* Continuation entry points to outbound address list. */
2224 map = pci_map_single(qdev->pdev, oal,
2225 sizeof(struct oal),
2226 PCI_DMA_TODEVICE);
2227 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2228 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2229 oal_entry->len =
2230 cpu_to_le32(sizeof(struct oal) |
2231 OAL_CONT_ENTRY);
2232 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2233 map);
2234 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2235 len);
2236 oal_entry = (struct oal_entry *)oal;
2237 oal++;
2238 seg++;
2239 }
2063 2240
2241 map =
2242 pci_map_page(qdev->pdev, frag->page,
2243 frag->page_offset, frag->size,
2244 PCI_DMA_TODEVICE);
2245 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2246 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2247 oal_entry->len = cpu_to_le32(frag->size);
2248 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2249 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2250 frag->size);
2251 }
2252 /* Terminate the last segment. */
2253 oal_entry->len =
2254 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2255 }
2256 wmb();
2064 qdev->req_producer_index++; 2257 qdev->req_producer_index++;
2065 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2258 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2066 qdev->req_producer_index = 0; 2259 qdev->req_producer_index = 0;
@@ -2074,8 +2267,10 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2074 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n", 2267 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2075 ndev->name, qdev->req_producer_index, skb->len); 2268 ndev->name, qdev->req_producer_index, skb->len);
2076 2269
2270 atomic_dec(&qdev->tx_count);
2077 return NETDEV_TX_OK; 2271 return NETDEV_TX_OK;
2078} 2272}
2273
2079static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2274static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2080{ 2275{
2081 qdev->req_q_size = 2276 qdev->req_q_size =
@@ -2359,7 +2554,22 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2359 return 0; 2554 return 0;
2360} 2555}
2361 2556
2362static void ql_create_send_free_list(struct ql3_adapter *qdev) 2557static void ql_free_send_free_list(struct ql3_adapter *qdev)
2558{
2559 struct ql_tx_buf_cb *tx_cb;
2560 int i;
2561
2562 tx_cb = &qdev->tx_buf[0];
2563 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2564 if (tx_cb->oal) {
2565 kfree(tx_cb->oal);
2566 tx_cb->oal = NULL;
2567 }
2568 tx_cb++;
2569 }
2570}
2571
2572static int ql_create_send_free_list(struct ql3_adapter *qdev)
2363{ 2573{
2364 struct ql_tx_buf_cb *tx_cb; 2574 struct ql_tx_buf_cb *tx_cb;
2365 int i; 2575 int i;
@@ -2368,11 +2578,16 @@ static void ql_create_send_free_list(struct ql3_adapter *qdev)
2368 2578
2369 /* Create free list of transmit buffers */ 2579 /* Create free list of transmit buffers */
2370 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2580 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2581
2371 tx_cb = &qdev->tx_buf[i]; 2582 tx_cb = &qdev->tx_buf[i];
2372 tx_cb->skb = NULL; 2583 tx_cb->skb = NULL;
2373 tx_cb->queue_entry = req_q_curr; 2584 tx_cb->queue_entry = req_q_curr;
2374 req_q_curr++; 2585 req_q_curr++;
2586 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2587 if (tx_cb->oal == NULL)
2588 return -1;
2375 } 2589 }
2590 return 0;
2376} 2591}
2377 2592
2378static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2593static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
@@ -2447,12 +2662,14 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2447 2662
2448 /* Initialize the large buffer queue. */ 2663 /* Initialize the large buffer queue. */
2449 ql_init_large_buffers(qdev); 2664 ql_init_large_buffers(qdev);
2450 ql_create_send_free_list(qdev); 2665 if (ql_create_send_free_list(qdev))
2666 goto err_free_list;
2451 2667
2452 qdev->rsp_current = qdev->rsp_q_virt_addr; 2668 qdev->rsp_current = qdev->rsp_q_virt_addr;
2453 2669
2454 return 0; 2670 return 0;
2455 2671err_free_list:
2672 ql_free_send_free_list(qdev);
2456err_small_buffers: 2673err_small_buffers:
2457 ql_free_buffer_queues(qdev); 2674 ql_free_buffer_queues(qdev);
2458err_buffer_queues: 2675err_buffer_queues:
@@ -2468,6 +2685,7 @@ err_req_rsp:
2468 2685
2469static void ql_free_mem_resources(struct ql3_adapter *qdev) 2686static void ql_free_mem_resources(struct ql3_adapter *qdev)
2470{ 2687{
2688 ql_free_send_free_list(qdev);
2471 ql_free_large_buffers(qdev); 2689 ql_free_large_buffers(qdev);
2472 ql_free_small_buffers(qdev); 2690 ql_free_small_buffers(qdev);
2473 ql_free_buffer_queues(qdev); 2691 ql_free_buffer_queues(qdev);
@@ -2766,11 +2984,20 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
2766 } 2984 }
2767 2985
2768 /* Enable Ethernet Function */ 2986 /* Enable Ethernet Function */
2769 value = 2987 if (qdev->device_id == QL3032_DEVICE_ID) {
2770 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 2988 value =
2771 PORT_CONTROL_HH); 2989 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
2772 ql_write_page0_reg(qdev, &port_regs->portControl, 2990 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4);
2773 ((value << 16) | value)); 2991 ql_write_page0_reg(qdev, &port_regs->functionControl,
2992 ((value << 16) | value));
2993 } else {
2994 value =
2995 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2996 PORT_CONTROL_HH);
2997 ql_write_page0_reg(qdev, &port_regs->portControl,
2998 ((value << 16) | value));
2999 }
3000
2774 3001
2775out: 3002out:
2776 return status; 3003 return status;
@@ -2917,8 +3144,10 @@ static void ql_display_dev_info(struct net_device *ndev)
2917 struct pci_dev *pdev = qdev->pdev; 3144 struct pci_dev *pdev = qdev->pdev;
2918 3145
2919 printk(KERN_INFO PFX 3146 printk(KERN_INFO PFX
2920 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n", 3147 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
2921 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot); 3148 DRV_NAME, qdev->index, qdev->chip_rev_id,
3149 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3150 qdev->pci_slot);
2922 printk(KERN_INFO PFX 3151 printk(KERN_INFO PFX
2923 "%s Interface.\n", 3152 "%s Interface.\n",
2924 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER"); 3153 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
@@ -3212,15 +3441,22 @@ static void ql_reset_work(struct work_struct *work)
3212 * Loop through the active list and return the skb. 3441 * Loop through the active list and return the skb.
3213 */ 3442 */
3214 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3443 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3444 int j;
3215 tx_cb = &qdev->tx_buf[i]; 3445 tx_cb = &qdev->tx_buf[i];
3216 if (tx_cb->skb) { 3446 if (tx_cb->skb) {
3217
3218 printk(KERN_DEBUG PFX 3447 printk(KERN_DEBUG PFX
3219 "%s: Freeing lost SKB.\n", 3448 "%s: Freeing lost SKB.\n",
3220 qdev->ndev->name); 3449 qdev->ndev->name);
3221 pci_unmap_single(qdev->pdev, 3450 pci_unmap_single(qdev->pdev,
3222 pci_unmap_addr(tx_cb, mapaddr), 3451 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3223 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE); 3452 pci_unmap_len(&tx_cb->map[0], maplen),
3453 PCI_DMA_TODEVICE);
3454 for(j=1;j<tx_cb->seg_count;j++) {
3455 pci_unmap_page(qdev->pdev,
3456 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3457 pci_unmap_len(&tx_cb->map[j],maplen),
3458 PCI_DMA_TODEVICE);
3459 }
3224 dev_kfree_skb(tx_cb->skb); 3460 dev_kfree_skb(tx_cb->skb);
3225 tx_cb->skb = NULL; 3461 tx_cb->skb = NULL;
3226 } 3462 }
@@ -3379,21 +3615,24 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3379 SET_MODULE_OWNER(ndev); 3615 SET_MODULE_OWNER(ndev);
3380 SET_NETDEV_DEV(ndev, &pdev->dev); 3616 SET_NETDEV_DEV(ndev, &pdev->dev);
3381 3617
3382 if (pci_using_dac)
3383 ndev->features |= NETIF_F_HIGHDMA;
3384
3385 pci_set_drvdata(pdev, ndev); 3618 pci_set_drvdata(pdev, ndev);
3386 3619
3387 qdev = netdev_priv(ndev); 3620 qdev = netdev_priv(ndev);
3388 qdev->index = cards_found; 3621 qdev->index = cards_found;
3389 qdev->ndev = ndev; 3622 qdev->ndev = ndev;
3390 qdev->pdev = pdev; 3623 qdev->pdev = pdev;
3624 qdev->device_id = pci_entry->device;
3391 qdev->port_link_state = LS_DOWN; 3625 qdev->port_link_state = LS_DOWN;
3392 if (msi) 3626 if (msi)
3393 qdev->msi = 1; 3627 qdev->msi = 1;
3394 3628
3395 qdev->msg_enable = netif_msg_init(debug, default_msg); 3629 qdev->msg_enable = netif_msg_init(debug, default_msg);
3396 3630
3631 if (pci_using_dac)
3632 ndev->features |= NETIF_F_HIGHDMA;
3633 if (qdev->device_id == QL3032_DEVICE_ID)
3634 ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
3635
3397 qdev->mem_map_registers = 3636 qdev->mem_map_registers =
3398 ioremap_nocache(pci_resource_start(pdev, 1), 3637 ioremap_nocache(pci_resource_start(pdev, 1),
3399 pci_resource_len(qdev->pdev, 1)); 3638 pci_resource_len(qdev->pdev, 1));
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index ea94de7fd071..b2d76ea68827 100644..100755
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -21,7 +21,9 @@
21 21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0 22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9 23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_3032_MAC_IOCB 0x09
24#define OPCODE_IB_IP_IOCB 0xFA 25#define OPCODE_IB_IP_IOCB 0xFA
26#define OPCODE_IB_3032_IP_IOCB 0x0A
25#define OPCODE_IB_TCP_IOCB 0xFB 27#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE 28#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB 29#define OPCODE_BUFFER_ALERT_IOCB 0xFB
@@ -37,18 +39,23 @@
37struct ob_mac_iocb_req { 39struct ob_mac_iocb_req {
38 u8 opcode; 40 u8 opcode;
39 u8 flags; 41 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0 42#define OB_MAC_IOCB_REQ_MA 0xe0
41#define OB_MAC_IOCB_REQ_F 0x20 43#define OB_MAC_IOCB_REQ_F 0x10
42#define OB_MAC_IOCB_REQ_X 0x10 44#define OB_MAC_IOCB_REQ_X 0x08
43#define OB_MAC_IOCB_REQ_D 0x02 45#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01 46#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0; 47 u8 flags1;
48#define OB_3032MAC_IOCB_REQ_IC 0x04
49#define OB_3032MAC_IOCB_REQ_TC 0x02
50#define OB_3032MAC_IOCB_REQ_UC 0x01
51 u8 reserved0;
46 52
47 __le32 transaction_id; 53 __le32 transaction_id;
48 __le16 data_len; 54 __le16 data_len;
49 __le16 reserved1; 55 u8 ip_hdr_off;
56 u8 ip_hdr_len;
57 __le32 reserved1;
50 __le32 reserved2; 58 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low; 59 __le32 buf_addr0_low;
53 __le32 buf_addr0_high; 60 __le32 buf_addr0_high;
54 __le32 buf_0_len; 61 __le32 buf_0_len;
@@ -58,8 +65,8 @@ struct ob_mac_iocb_req {
58 __le32 buf_addr2_low; 65 __le32 buf_addr2_low;
59 __le32 buf_addr2_high; 66 __le32 buf_addr2_high;
60 __le32 buf_2_len; 67 __le32 buf_2_len;
68 __le32 reserved3;
61 __le32 reserved4; 69 __le32 reserved4;
62 __le32 reserved5;
63}; 70};
64/* 71/*
65 * The following constants define control bits for buffer 72 * The following constants define control bits for buffer
@@ -74,6 +81,7 @@ struct ob_mac_iocb_rsp {
74 u8 opcode; 81 u8 opcode;
75 u8 flags; 82 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08 83#define OB_MAC_IOCB_RSP_P 0x08
84#define OB_MAC_IOCB_RSP_L 0x04
77#define OB_MAC_IOCB_RSP_S 0x02 85#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01 86#define OB_MAC_IOCB_RSP_I 0x01
79 87
@@ -85,6 +93,7 @@ struct ob_mac_iocb_rsp {
85 93
86struct ib_mac_iocb_rsp { 94struct ib_mac_iocb_rsp {
87 u8 opcode; 95 u8 opcode;
96#define IB_MAC_IOCB_RSP_V 0x80
88 u8 flags; 97 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80 98#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40 99#define IB_MAC_IOCB_RSP_H1 0x40
@@ -138,6 +147,7 @@ struct ob_ip_iocb_req {
138struct ob_ip_iocb_rsp { 147struct ob_ip_iocb_rsp {
139 u8 opcode; 148 u8 opcode;
140 u8 flags; 149 u8 flags;
150#define OB_MAC_IOCB_RSP_H 0x10
141#define OB_MAC_IOCB_RSP_E 0x08 151#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04 152#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02 153#define OB_MAC_IOCB_RSP_S 0x02
@@ -220,6 +230,10 @@ struct ob_tcp_iocb_rsp {
220 230
221struct ib_ip_iocb_rsp { 231struct ib_ip_iocb_rsp {
222 u8 opcode; 232 u8 opcode;
233#define IB_IP_IOCB_RSP_3032_V 0x80
234#define IB_IP_IOCB_RSP_3032_O 0x40
235#define IB_IP_IOCB_RSP_3032_I 0x20
236#define IB_IP_IOCB_RSP_3032_R 0x10
223 u8 flags; 237 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80 238#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40 239#define IB_IP_IOCB_RSP_H1 0x40
@@ -230,6 +244,12 @@ struct ib_ip_iocb_rsp {
230 244
231 __le16 length; 245 __le16 length;
232 __le16 checksum; 246 __le16 checksum;
247#define IB_IP_IOCB_RSP_3032_ICE 0x01
248#define IB_IP_IOCB_RSP_3032_CE 0x02
249#define IB_IP_IOCB_RSP_3032_NUC 0x04
250#define IB_IP_IOCB_RSP_3032_UDP 0x08
251#define IB_IP_IOCB_RSP_3032_TCP 0x10
252#define IB_IP_IOCB_RSP_3032_IPE 0x20
233 __le16 reserved; 253 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01 254#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low; 255 __le32 ial_low;
@@ -524,6 +544,21 @@ enum {
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005, 544 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006, 545 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007, 546 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
547 IP_ADDR_INDEX_REG_6 = 0x0008,
548 IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
549 IP_ADDR_INDEX_REG_E = 0x0040,
550};
551enum {
552 QL3032_PORT_CONTROL_DS = 0x0001,
553 QL3032_PORT_CONTROL_HH = 0x0002,
554 QL3032_PORT_CONTROL_EIv6 = 0x0004,
555 QL3032_PORT_CONTROL_EIv4 = 0x0008,
556 QL3032_PORT_CONTROL_ET = 0x0010,
557 QL3032_PORT_CONTROL_EF = 0x0020,
558 QL3032_PORT_CONTROL_DRM = 0x0040,
559 QL3032_PORT_CONTROL_RLB = 0x0080,
560 QL3032_PORT_CONTROL_RCB = 0x0100,
561 QL3032_PORT_CONTROL_KIE = 0x0200,
527}; 562};
528 563
529enum { 564enum {
@@ -657,7 +692,8 @@ struct ql3xxx_port_registers {
657 u32 internalRamWDataReg; 692 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow; 693 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh; 694 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2]; 695 u32 tcpConfiguration;
696 u32 functionControl;
661 u32 fpgaRevID; 697 u32 fpgaRevID;
662 u32 localRamAddr; 698 u32 localRamAddr;
663 u32 localRamDataAutoIncr; 699 u32 localRamDataAutoIncr;
@@ -963,6 +999,7 @@ struct eeprom_data {
963 999
964#define QL3XXX_VENDOR_ID 0x1077 1000#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022 1001#define QL3022_DEVICE_ID 0x3022
1002#define QL3032_DEVICE_ID 0x3032
966 1003
967/* MTU & Frame Size stuff */ 1004/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN 1005#define NORMAL_MTU_SIZE ETH_DATA_LEN
@@ -1038,11 +1075,41 @@ struct ql_rcv_buf_cb {
1038 int index; 1075 int index;
1039}; 1076};
1040 1077
1078/*
1079 * Original IOCB has 3 sg entries:
1080 * first points to skb-data area
1081 * second points to first frag
1082 * third points to next oal.
1083 * OAL has 5 entries:
1084 * 1 thru 4 point to frags
1085 * fifth points to next oal.
1086 */
1087#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
1088
1089struct oal_entry {
1090 u32 dma_lo;
1091 u32 dma_hi;
1092 u32 len;
1093#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
1094#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
1095 u32 reserved;
1096};
1097
1098struct oal {
1099 struct oal_entry oal_entry[5];
1100};
1101
1102struct map_list {
1103 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1104 DECLARE_PCI_UNMAP_LEN(maplen);
1105};
1106
1041struct ql_tx_buf_cb { 1107struct ql_tx_buf_cb {
1042 struct sk_buff *skb; 1108 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ; 1109 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1110 int seg_count;
1045 DECLARE_PCI_UNMAP_LEN(maplen); 1111 struct oal *oal;
1112 struct map_list map[MAX_SKB_FRAGS+1];
1046}; 1113};
1047 1114
1048/* definitions for type field */ 1115/* definitions for type field */
@@ -1189,6 +1256,7 @@ struct ql3_adapter {
1189 struct delayed_work reset_work; 1256 struct delayed_work reset_work;
1190 struct delayed_work tx_timeout_work; 1257 struct delayed_work tx_timeout_work;
1191 u32 max_frame_size; 1258 u32 max_frame_size;
1259 u32 device_id;
1192}; 1260};
1193 1261
1194#endif /* _QLA3XXX_H_ */ 1262#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 577babd4c938..5598d86380b4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2016,7 +2016,7 @@ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
2016 if (!skb) 2016 if (!skb)
2017 goto err_out; 2017 goto err_out;
2018 2018
2019 skb_reserve(skb, (align - 1) & (u32)skb->data); 2019 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
2020 *sk_buff = skb; 2020 *sk_buff = skb;
2021 2021
2022 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 2022 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
@@ -2487,7 +2487,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
2487 2487
2488 skb = dev_alloc_skb(pkt_size + align); 2488 skb = dev_alloc_skb(pkt_size + align);
2489 if (skb) { 2489 if (skb) {
2490 skb_reserve(skb, (align - 1) & (u32)skb->data); 2490 skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
2491 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); 2491 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
2492 *sk_buff = skb; 2492 *sk_buff = skb;
2493 rtl8169_mark_to_asic(desc, rx_buf_sz); 2493 rtl8169_mark_to_asic(desc, rx_buf_sz);
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index a914fef44309..0e345cbc2bf9 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -15,7 +15,7 @@
15 15
16#define TBD 0 16#define TBD 0
17 17
18typedef struct _XENA_dev_config { 18struct XENA_dev_config {
19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */ 19/* Convention: mHAL_XXX is mask, vHAL_XXX is value */
20 20
21/* General Control-Status Registers */ 21/* General Control-Status Registers */
@@ -300,6 +300,7 @@ typedef struct _XENA_dev_config {
300 u64 gpio_control; 300 u64 gpio_control;
301#define GPIO_CTRL_GPIO_0 BIT(8) 301#define GPIO_CTRL_GPIO_0 BIT(8)
302 u64 misc_control; 302 u64 misc_control;
303#define FAULT_BEHAVIOUR BIT(0)
303#define EXT_REQ_EN BIT(1) 304#define EXT_REQ_EN BIT(1)
304#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3) 305#define MISC_LINK_STABILITY_PRD(val) vBIT(val,29,3)
305 306
@@ -851,9 +852,9 @@ typedef struct _XENA_dev_config {
851#define SPI_CONTROL_DONE BIT(6) 852#define SPI_CONTROL_DONE BIT(6)
852 u64 spi_data; 853 u64 spi_data;
853#define SPI_DATA_WRITE(data,len) vBIT(data,0,len) 854#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
854} XENA_dev_config_t; 855};
855 856
856#define XENA_REG_SPACE sizeof(XENA_dev_config_t) 857#define XENA_REG_SPACE sizeof(struct XENA_dev_config)
857#define XENA_EEPROM_SPACE (0x01 << 11) 858#define XENA_EEPROM_SPACE (0x01 << 11)
858 859
859#endif /* _REGS_H */ 860#endif /* _REGS_H */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 1dd66b8ea0fa..8646b64994ab 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -77,7 +77,7 @@
77#include "s2io.h" 77#include "s2io.h"
78#include "s2io-regs.h" 78#include "s2io-regs.h"
79 79
80#define DRV_VERSION "2.0.15.2" 80#define DRV_VERSION "2.0.16.1"
81 81
82/* S2io Driver name & version. */ 82/* S2io Driver name & version. */
83static char s2io_driver_name[] = "Neterion"; 83static char s2io_driver_name[] = "Neterion";
@@ -86,7 +86,7 @@ static char s2io_driver_version[] = DRV_VERSION;
86static int rxd_size[4] = {32,48,48,64}; 86static int rxd_size[4] = {32,48,48,64};
87static int rxd_count[4] = {127,85,85,63}; 87static int rxd_count[4] = {127,85,85,63};
88 88
89static inline int RXD_IS_UP2DT(RxD_t *rxdp) 89static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
90{ 90{
91 int ret; 91 int ret;
92 92
@@ -111,9 +111,9 @@ static inline int RXD_IS_UP2DT(RxD_t *rxdp)
111#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status)) 111#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
112#define PANIC 1 112#define PANIC 1
113#define LOW 2 113#define LOW 2
114static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring) 114static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
115{ 115{
116 mac_info_t *mac_control; 116 struct mac_info *mac_control;
117 117
118 mac_control = &sp->mac_control; 118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode]) 119 if (rxb_size <= rxd_count[sp->rxd_mode])
@@ -286,7 +286,7 @@ static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
286static void s2io_vlan_rx_register(struct net_device *dev, 286static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp) 287 struct vlan_group *grp)
288{ 288{
289 nic_t *nic = dev->priv; 289 struct s2io_nic *nic = dev->priv;
290 unsigned long flags; 290 unsigned long flags;
291 291
292 spin_lock_irqsave(&nic->tx_lock, flags); 292 spin_lock_irqsave(&nic->tx_lock, flags);
@@ -297,7 +297,7 @@ static void s2io_vlan_rx_register(struct net_device *dev,
297/* Unregister the vlan */ 297/* Unregister the vlan */
298static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 298static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299{ 299{
300 nic_t *nic = dev->priv; 300 struct s2io_nic *nic = dev->priv;
301 unsigned long flags; 301 unsigned long flags;
302 302
303 spin_lock_irqsave(&nic->tx_lock, flags); 303 spin_lock_irqsave(&nic->tx_lock, flags);
@@ -401,9 +401,10 @@ S2IO_PARM_INT(lro, 0);
401 * aggregation happens until we hit max IP pkt size(64K) 401 * aggregation happens until we hit max IP pkt size(64K)
402 */ 402 */
403S2IO_PARM_INT(lro_max_pkts, 0xFFFF); 403S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404#ifndef CONFIG_S2IO_NAPI
405S2IO_PARM_INT(indicate_max_pkts, 0); 404S2IO_PARM_INT(indicate_max_pkts, 0);
406#endif 405
406S2IO_PARM_INT(napi, 1);
407S2IO_PARM_INT(ufo, 0);
407 408
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 409static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 410 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
@@ -457,14 +458,14 @@ static int init_shared_mem(struct s2io_nic *nic)
457 u32 size; 458 u32 size;
458 void *tmp_v_addr, *tmp_v_addr_next; 459 void *tmp_v_addr, *tmp_v_addr_next;
459 dma_addr_t tmp_p_addr, tmp_p_addr_next; 460 dma_addr_t tmp_p_addr, tmp_p_addr_next;
460 RxD_block_t *pre_rxd_blk = NULL; 461 struct RxD_block *pre_rxd_blk = NULL;
461 int i, j, blk_cnt, rx_sz, tx_sz; 462 int i, j, blk_cnt;
462 int lst_size, lst_per_page; 463 int lst_size, lst_per_page;
463 struct net_device *dev = nic->dev; 464 struct net_device *dev = nic->dev;
464 unsigned long tmp; 465 unsigned long tmp;
465 buffAdd_t *ba; 466 struct buffAdd *ba;
466 467
467 mac_info_t *mac_control; 468 struct mac_info *mac_control;
468 struct config_param *config; 469 struct config_param *config;
469 470
470 mac_control = &nic->mac_control; 471 mac_control = &nic->mac_control;
@@ -482,13 +483,12 @@ static int init_shared_mem(struct s2io_nic *nic)
482 return -EINVAL; 483 return -EINVAL;
483 } 484 }
484 485
485 lst_size = (sizeof(TxD_t) * config->max_txds); 486 lst_size = (sizeof(struct TxD) * config->max_txds);
486 tx_sz = lst_size * size;
487 lst_per_page = PAGE_SIZE / lst_size; 487 lst_per_page = PAGE_SIZE / lst_size;
488 488
489 for (i = 0; i < config->tx_fifo_num; i++) { 489 for (i = 0; i < config->tx_fifo_num; i++) {
490 int fifo_len = config->tx_cfg[i].fifo_len; 490 int fifo_len = config->tx_cfg[i].fifo_len;
491 int list_holder_size = fifo_len * sizeof(list_info_hold_t); 491 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
492 mac_control->fifos[i].list_info = kmalloc(list_holder_size, 492 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
493 GFP_KERNEL); 493 GFP_KERNEL);
494 if (!mac_control->fifos[i].list_info) { 494 if (!mac_control->fifos[i].list_info) {
@@ -579,10 +579,9 @@ static int init_shared_mem(struct s2io_nic *nic)
579 mac_control->rings[i].block_count; 579 mac_control->rings[i].block_count;
580 } 580 }
581 if (nic->rxd_mode == RXD_MODE_1) 581 if (nic->rxd_mode == RXD_MODE_1)
582 size = (size * (sizeof(RxD1_t))); 582 size = (size * (sizeof(struct RxD1)));
583 else 583 else
584 size = (size * (sizeof(RxD3_t))); 584 size = (size * (sizeof(struct RxD3)));
585 rx_sz = size;
586 585
587 for (i = 0; i < config->rx_ring_num; i++) { 586 for (i = 0; i < config->rx_ring_num; i++) {
588 mac_control->rings[i].rx_curr_get_info.block_index = 0; 587 mac_control->rings[i].rx_curr_get_info.block_index = 0;
@@ -600,7 +599,7 @@ static int init_shared_mem(struct s2io_nic *nic)
600 (rxd_count[nic->rxd_mode] + 1); 599 (rxd_count[nic->rxd_mode] + 1);
601 /* Allocating all the Rx blocks */ 600 /* Allocating all the Rx blocks */
602 for (j = 0; j < blk_cnt; j++) { 601 for (j = 0; j < blk_cnt; j++) {
603 rx_block_info_t *rx_blocks; 602 struct rx_block_info *rx_blocks;
604 int l; 603 int l;
605 604
606 rx_blocks = &mac_control->rings[i].rx_blocks[j]; 605 rx_blocks = &mac_control->rings[i].rx_blocks[j];
@@ -620,9 +619,11 @@ static int init_shared_mem(struct s2io_nic *nic)
620 memset(tmp_v_addr, 0, size); 619 memset(tmp_v_addr, 0, size);
621 rx_blocks->block_virt_addr = tmp_v_addr; 620 rx_blocks->block_virt_addr = tmp_v_addr;
622 rx_blocks->block_dma_addr = tmp_p_addr; 621 rx_blocks->block_dma_addr = tmp_p_addr;
623 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)* 622 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
624 rxd_count[nic->rxd_mode], 623 rxd_count[nic->rxd_mode],
625 GFP_KERNEL); 624 GFP_KERNEL);
625 if (!rx_blocks->rxds)
626 return -ENOMEM;
626 for (l=0; l<rxd_count[nic->rxd_mode];l++) { 627 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
627 rx_blocks->rxds[l].virt_addr = 628 rx_blocks->rxds[l].virt_addr =
628 rx_blocks->block_virt_addr + 629 rx_blocks->block_virt_addr +
@@ -645,7 +646,7 @@ static int init_shared_mem(struct s2io_nic *nic)
645 mac_control->rings[i].rx_blocks[(j + 1) % 646 mac_control->rings[i].rx_blocks[(j + 1) %
646 blk_cnt].block_dma_addr; 647 blk_cnt].block_dma_addr;
647 648
648 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 649 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
649 pre_rxd_blk->reserved_2_pNext_RxD_block = 650 pre_rxd_blk->reserved_2_pNext_RxD_block =
650 (unsigned long) tmp_v_addr_next; 651 (unsigned long) tmp_v_addr_next;
651 pre_rxd_blk->pNext_RxD_Blk_physical = 652 pre_rxd_blk->pNext_RxD_Blk_physical =
@@ -661,14 +662,14 @@ static int init_shared_mem(struct s2io_nic *nic)
661 blk_cnt = config->rx_cfg[i].num_rxd / 662 blk_cnt = config->rx_cfg[i].num_rxd /
662 (rxd_count[nic->rxd_mode]+ 1); 663 (rxd_count[nic->rxd_mode]+ 1);
663 mac_control->rings[i].ba = 664 mac_control->rings[i].ba =
664 kmalloc((sizeof(buffAdd_t *) * blk_cnt), 665 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
665 GFP_KERNEL); 666 GFP_KERNEL);
666 if (!mac_control->rings[i].ba) 667 if (!mac_control->rings[i].ba)
667 return -ENOMEM; 668 return -ENOMEM;
668 for (j = 0; j < blk_cnt; j++) { 669 for (j = 0; j < blk_cnt; j++) {
669 int k = 0; 670 int k = 0;
670 mac_control->rings[i].ba[j] = 671 mac_control->rings[i].ba[j] =
671 kmalloc((sizeof(buffAdd_t) * 672 kmalloc((sizeof(struct buffAdd) *
672 (rxd_count[nic->rxd_mode] + 1)), 673 (rxd_count[nic->rxd_mode] + 1)),
673 GFP_KERNEL); 674 GFP_KERNEL);
674 if (!mac_control->rings[i].ba[j]) 675 if (!mac_control->rings[i].ba[j])
@@ -700,7 +701,7 @@ static int init_shared_mem(struct s2io_nic *nic)
700 } 701 }
701 702
702 /* Allocation and initialization of Statistics block */ 703 /* Allocation and initialization of Statistics block */
703 size = sizeof(StatInfo_t); 704 size = sizeof(struct stat_block);
704 mac_control->stats_mem = pci_alloc_consistent 705 mac_control->stats_mem = pci_alloc_consistent
705 (nic->pdev, size, &mac_control->stats_mem_phy); 706 (nic->pdev, size, &mac_control->stats_mem_phy);
706 707
@@ -715,7 +716,7 @@ static int init_shared_mem(struct s2io_nic *nic)
715 mac_control->stats_mem_sz = size; 716 mac_control->stats_mem_sz = size;
716 717
717 tmp_v_addr = mac_control->stats_mem; 718 tmp_v_addr = mac_control->stats_mem;
718 mac_control->stats_info = (StatInfo_t *) tmp_v_addr; 719 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
719 memset(tmp_v_addr, 0, size); 720 memset(tmp_v_addr, 0, size);
720 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name, 721 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
721 (unsigned long long) tmp_p_addr); 722 (unsigned long long) tmp_p_addr);
@@ -735,7 +736,7 @@ static void free_shared_mem(struct s2io_nic *nic)
735 int i, j, blk_cnt, size; 736 int i, j, blk_cnt, size;
736 void *tmp_v_addr; 737 void *tmp_v_addr;
737 dma_addr_t tmp_p_addr; 738 dma_addr_t tmp_p_addr;
738 mac_info_t *mac_control; 739 struct mac_info *mac_control;
739 struct config_param *config; 740 struct config_param *config;
740 int lst_size, lst_per_page; 741 int lst_size, lst_per_page;
741 struct net_device *dev = nic->dev; 742 struct net_device *dev = nic->dev;
@@ -746,7 +747,7 @@ static void free_shared_mem(struct s2io_nic *nic)
746 mac_control = &nic->mac_control; 747 mac_control = &nic->mac_control;
747 config = &nic->config; 748 config = &nic->config;
748 749
749 lst_size = (sizeof(TxD_t) * config->max_txds); 750 lst_size = (sizeof(struct TxD) * config->max_txds);
750 lst_per_page = PAGE_SIZE / lst_size; 751 lst_per_page = PAGE_SIZE / lst_size;
751 752
752 for (i = 0; i < config->tx_fifo_num; i++) { 753 for (i = 0; i < config->tx_fifo_num; i++) {
@@ -809,7 +810,7 @@ static void free_shared_mem(struct s2io_nic *nic)
809 if (!mac_control->rings[i].ba[j]) 810 if (!mac_control->rings[i].ba[j])
810 continue; 811 continue;
811 while (k != rxd_count[nic->rxd_mode]) { 812 while (k != rxd_count[nic->rxd_mode]) {
812 buffAdd_t *ba = 813 struct buffAdd *ba =
813 &mac_control->rings[i].ba[j][k]; 814 &mac_control->rings[i].ba[j][k];
814 kfree(ba->ba_0_org); 815 kfree(ba->ba_0_org);
815 kfree(ba->ba_1_org); 816 kfree(ba->ba_1_org);
@@ -835,9 +836,9 @@ static void free_shared_mem(struct s2io_nic *nic)
835 * s2io_verify_pci_mode - 836 * s2io_verify_pci_mode -
836 */ 837 */
837 838
838static int s2io_verify_pci_mode(nic_t *nic) 839static int s2io_verify_pci_mode(struct s2io_nic *nic)
839{ 840{
840 XENA_dev_config_t __iomem *bar0 = nic->bar0; 841 struct XENA_dev_config __iomem *bar0 = nic->bar0;
841 register u64 val64 = 0; 842 register u64 val64 = 0;
842 int mode; 843 int mode;
843 844
@@ -868,9 +869,9 @@ static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
868/** 869/**
869 * s2io_print_pci_mode - 870 * s2io_print_pci_mode -
870 */ 871 */
871static int s2io_print_pci_mode(nic_t *nic) 872static int s2io_print_pci_mode(struct s2io_nic *nic)
872{ 873{
873 XENA_dev_config_t __iomem *bar0 = nic->bar0; 874 struct XENA_dev_config __iomem *bar0 = nic->bar0;
874 register u64 val64 = 0; 875 register u64 val64 = 0;
875 int mode; 876 int mode;
876 struct config_param *config = &nic->config; 877 struct config_param *config = &nic->config;
@@ -938,13 +939,13 @@ static int s2io_print_pci_mode(nic_t *nic)
938 939
939static int init_nic(struct s2io_nic *nic) 940static int init_nic(struct s2io_nic *nic)
940{ 941{
941 XENA_dev_config_t __iomem *bar0 = nic->bar0; 942 struct XENA_dev_config __iomem *bar0 = nic->bar0;
942 struct net_device *dev = nic->dev; 943 struct net_device *dev = nic->dev;
943 register u64 val64 = 0; 944 register u64 val64 = 0;
944 void __iomem *add; 945 void __iomem *add;
945 u32 time; 946 u32 time;
946 int i, j; 947 int i, j;
947 mac_info_t *mac_control; 948 struct mac_info *mac_control;
948 struct config_param *config; 949 struct config_param *config;
949 int dtx_cnt = 0; 950 int dtx_cnt = 0;
950 unsigned long long mem_share; 951 unsigned long long mem_share;
@@ -1414,7 +1415,7 @@ static int init_nic(struct s2io_nic *nic)
1414 1415
1415 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1416 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1416 TTI_DATA2_MEM_TX_UFC_B(0x20) | 1417 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1417 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80); 1418 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1418 writeq(val64, &bar0->tti_data2_mem); 1419 writeq(val64, &bar0->tti_data2_mem);
1419 1420
1420 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD; 1421 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
@@ -1610,7 +1611,8 @@ static int init_nic(struct s2io_nic *nic)
1610 * that does not start on an ADB to reduce disconnects. 1611 * that does not start on an ADB to reduce disconnects.
1611 */ 1612 */
1612 if (nic->device_type == XFRAME_II_DEVICE) { 1613 if (nic->device_type == XFRAME_II_DEVICE) {
1613 val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3); 1614 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1615 MISC_LINK_STABILITY_PRD(3);
1614 writeq(val64, &bar0->misc_control); 1616 writeq(val64, &bar0->misc_control);
1615 val64 = readq(&bar0->pic_control2); 1617 val64 = readq(&bar0->pic_control2);
1616 val64 &= ~(BIT(13)|BIT(14)|BIT(15)); 1618 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
@@ -1626,7 +1628,7 @@ static int init_nic(struct s2io_nic *nic)
1626#define LINK_UP_DOWN_INTERRUPT 1 1628#define LINK_UP_DOWN_INTERRUPT 1
1627#define MAC_RMAC_ERR_TIMER 2 1629#define MAC_RMAC_ERR_TIMER 2
1628 1630
1629static int s2io_link_fault_indication(nic_t *nic) 1631static int s2io_link_fault_indication(struct s2io_nic *nic)
1630{ 1632{
1631 if (nic->intr_type != INTA) 1633 if (nic->intr_type != INTA)
1632 return MAC_RMAC_ERR_TIMER; 1634 return MAC_RMAC_ERR_TIMER;
@@ -1649,14 +1651,14 @@ static int s2io_link_fault_indication(nic_t *nic)
1649 1651
1650static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag) 1652static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1651{ 1653{
1652 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1654 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1653 register u64 val64 = 0, temp64 = 0; 1655 register u64 val64 = 0, temp64 = 0;
1654 1656
1655 /* Top level interrupt classification */ 1657 /* Top level interrupt classification */
1656 /* PIC Interrupts */ 1658 /* PIC Interrupts */
1657 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) { 1659 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1658 /* Enable PIC Intrs in the general intr mask register */ 1660 /* Enable PIC Intrs in the general intr mask register */
1659 val64 = TXPIC_INT_M | PIC_RX_INT_M; 1661 val64 = TXPIC_INT_M;
1660 if (flag == ENABLE_INTRS) { 1662 if (flag == ENABLE_INTRS) {
1661 temp64 = readq(&bar0->general_int_mask); 1663 temp64 = readq(&bar0->general_int_mask);
1662 temp64 &= ~((u64) val64); 1664 temp64 &= ~((u64) val64);
@@ -1694,70 +1696,6 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1694 } 1696 }
1695 } 1697 }
1696 1698
1697 /* DMA Interrupts */
1698 /* Enabling/Disabling Tx DMA interrupts */
1699 if (mask & TX_DMA_INTR) {
1700 /* Enable TxDMA Intrs in the general intr mask register */
1701 val64 = TXDMA_INT_M;
1702 if (flag == ENABLE_INTRS) {
1703 temp64 = readq(&bar0->general_int_mask);
1704 temp64 &= ~((u64) val64);
1705 writeq(temp64, &bar0->general_int_mask);
1706 /*
1707 * Keep all interrupts other than PFC interrupt
1708 * and PCC interrupt disabled in DMA level.
1709 */
1710 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1711 TXDMA_PCC_INT_M);
1712 writeq(val64, &bar0->txdma_int_mask);
1713 /*
1714 * Enable only the MISC error 1 interrupt in PFC block
1715 */
1716 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1717 writeq(val64, &bar0->pfc_err_mask);
1718 /*
1719 * Enable only the FB_ECC error interrupt in PCC block
1720 */
1721 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1722 writeq(val64, &bar0->pcc_err_mask);
1723 } else if (flag == DISABLE_INTRS) {
1724 /*
1725 * Disable TxDMA Intrs in the general intr mask
1726 * register
1727 */
1728 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1729 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1730 temp64 = readq(&bar0->general_int_mask);
1731 val64 |= temp64;
1732 writeq(val64, &bar0->general_int_mask);
1733 }
1734 }
1735
1736 /* Enabling/Disabling Rx DMA interrupts */
1737 if (mask & RX_DMA_INTR) {
1738 /* Enable RxDMA Intrs in the general intr mask register */
1739 val64 = RXDMA_INT_M;
1740 if (flag == ENABLE_INTRS) {
1741 temp64 = readq(&bar0->general_int_mask);
1742 temp64 &= ~((u64) val64);
1743 writeq(temp64, &bar0->general_int_mask);
1744 /*
1745 * All RxDMA block interrupts are disabled for now
1746 * TODO
1747 */
1748 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1749 } else if (flag == DISABLE_INTRS) {
1750 /*
1751 * Disable RxDMA Intrs in the general intr mask
1752 * register
1753 */
1754 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1755 temp64 = readq(&bar0->general_int_mask);
1756 val64 |= temp64;
1757 writeq(val64, &bar0->general_int_mask);
1758 }
1759 }
1760
1761 /* MAC Interrupts */ 1699 /* MAC Interrupts */
1762 /* Enabling/Disabling MAC interrupts */ 1700 /* Enabling/Disabling MAC interrupts */
1763 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) { 1701 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
@@ -1784,53 +1722,6 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1784 } 1722 }
1785 } 1723 }
1786 1724
1787 /* XGXS Interrupts */
1788 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1789 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1790 if (flag == ENABLE_INTRS) {
1791 temp64 = readq(&bar0->general_int_mask);
1792 temp64 &= ~((u64) val64);
1793 writeq(temp64, &bar0->general_int_mask);
1794 /*
1795 * All XGXS block error interrupts are disabled for now
1796 * TODO
1797 */
1798 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1799 } else if (flag == DISABLE_INTRS) {
1800 /*
1801 * Disable MC Intrs in the general intr mask register
1802 */
1803 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1804 temp64 = readq(&bar0->general_int_mask);
1805 val64 |= temp64;
1806 writeq(val64, &bar0->general_int_mask);
1807 }
1808 }
1809
1810 /* Memory Controller(MC) interrupts */
1811 if (mask & MC_INTR) {
1812 val64 = MC_INT_M;
1813 if (flag == ENABLE_INTRS) {
1814 temp64 = readq(&bar0->general_int_mask);
1815 temp64 &= ~((u64) val64);
1816 writeq(temp64, &bar0->general_int_mask);
1817 /*
1818 * Enable all MC Intrs.
1819 */
1820 writeq(0x0, &bar0->mc_int_mask);
1821 writeq(0x0, &bar0->mc_err_mask);
1822 } else if (flag == DISABLE_INTRS) {
1823 /*
1824 * Disable MC Intrs in the general intr mask register
1825 */
1826 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1827 temp64 = readq(&bar0->general_int_mask);
1828 val64 |= temp64;
1829 writeq(val64, &bar0->general_int_mask);
1830 }
1831 }
1832
1833
1834 /* Tx traffic interrupts */ 1725 /* Tx traffic interrupts */
1835 if (mask & TX_TRAFFIC_INTR) { 1726 if (mask & TX_TRAFFIC_INTR) {
1836 val64 = TXTRAFFIC_INT_M; 1727 val64 = TXTRAFFIC_INT_M;
@@ -1877,41 +1768,36 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1877 } 1768 }
1878} 1769}
1879 1770
1880static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc) 1771/**
1772 * verify_pcc_quiescent- Checks for PCC quiescent state
1773 * Return: 1 If PCC is quiescence
1774 * 0 If PCC is not quiescence
1775 */
1776static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1881{ 1777{
1882 int ret = 0; 1778 int ret = 0, herc;
1779 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1780 u64 val64 = readq(&bar0->adapter_status);
1781
1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1883 1783
1884 if (flag == FALSE) { 1784 if (flag == FALSE) {
1885 if ((!herc && (rev_id >= 4)) || herc) { 1785 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1886 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) && 1786 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1887 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1888 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1889 ret = 1; 1787 ret = 1;
1890 } 1788 } else {
1891 }else { 1789 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1892 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1893 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1894 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1895 ret = 1; 1790 ret = 1;
1896 }
1897 } 1791 }
1898 } else { 1792 } else {
1899 if ((!herc && (rev_id >= 4)) || herc) { 1793 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1900 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) == 1794 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1901 ADAPTER_STATUS_RMAC_PCC_IDLE) && 1795 ADAPTER_STATUS_RMAC_PCC_IDLE))
1902 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1903 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1904 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1905 ret = 1; 1796 ret = 1;
1906 }
1907 } else { 1797 } else {
1908 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) == 1798 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1909 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) && 1799 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1910 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1911 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1912 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1913 ret = 1; 1800 ret = 1;
1914 }
1915 } 1801 }
1916 } 1802 }
1917 1803
@@ -1919,9 +1805,6 @@ static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1919} 1805}
1920/** 1806/**
1921 * verify_xena_quiescence - Checks whether the H/W is ready 1807 * verify_xena_quiescence - Checks whether the H/W is ready
1922 * @val64 : Value read from adapter status register.
1923 * @flag : indicates if the adapter enable bit was ever written once
1924 * before.
1925 * Description: Returns whether the H/W is ready to go or not. Depending 1808 * Description: Returns whether the H/W is ready to go or not. Depending
1926 * on whether adapter enable bit was written or not the comparison 1809 * on whether adapter enable bit was written or not the comparison
1927 * differs and the calling function passes the input argument flag to 1810 * differs and the calling function passes the input argument flag to
@@ -1930,24 +1813,63 @@ static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1930 * 0 If Xena is not quiescence 1813 * 0 If Xena is not quiescence
1931 */ 1814 */
1932 1815
1933static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag) 1816static int verify_xena_quiescence(struct s2io_nic *sp)
1934{ 1817{
1935 int ret = 0, herc; 1818 int mode;
1936 u64 tmp64 = ~((u64) val64); 1819 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1937 int rev_id = get_xena_rev_id(sp->pdev); 1820 u64 val64 = readq(&bar0->adapter_status);
1821 mode = s2io_verify_pci_mode(sp);
1938 1822
1939 herc = (sp->device_type == XFRAME_II_DEVICE); 1823 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1940 if (! 1824 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1941 (tmp64 & 1825 return 0;
1942 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY | 1826 }
1943 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY | 1827 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1944 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY | 1828 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1945 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK | 1829 return 0;
1946 ADAPTER_STATUS_P_PLL_LOCK))) { 1830 }
1947 ret = check_prc_pcc_state(val64, flag, rev_id, herc); 1831 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1832 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1833 return 0;
1834 }
1835 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1836 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1837 return 0;
1838 }
1839 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1840 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1841 return 0;
1842 }
1843 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1844 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1845 return 0;
1846 }
1847 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1848 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1849 return 0;
1850 }
1851 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1852 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1853 return 0;
1948 } 1854 }
1949 1855
1950 return ret; 1856 /*
1857 * In PCI 33 mode, the P_PLL is not used, and therefore,
1858 * the the P_PLL_LOCK bit in the adapter_status register will
1859 * not be asserted.
1860 */
1861 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1862 sp->device_type == XFRAME_II_DEVICE && mode !=
1863 PCI_MODE_PCI_33) {
1864 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1865 return 0;
1866 }
1867 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1868 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1869 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1870 return 0;
1871 }
1872 return 1;
1951} 1873}
1952 1874
1953/** 1875/**
@@ -1958,9 +1880,9 @@ static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1958 * 1880 *
1959 */ 1881 */
1960 1882
1961static void fix_mac_address(nic_t * sp) 1883static void fix_mac_address(struct s2io_nic * sp)
1962{ 1884{
1963 XENA_dev_config_t __iomem *bar0 = sp->bar0; 1885 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1964 u64 val64; 1886 u64 val64;
1965 int i = 0; 1887 int i = 0;
1966 1888
@@ -1986,11 +1908,11 @@ static void fix_mac_address(nic_t * sp)
1986 1908
1987static int start_nic(struct s2io_nic *nic) 1909static int start_nic(struct s2io_nic *nic)
1988{ 1910{
1989 XENA_dev_config_t __iomem *bar0 = nic->bar0; 1911 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1990 struct net_device *dev = nic->dev; 1912 struct net_device *dev = nic->dev;
1991 register u64 val64 = 0; 1913 register u64 val64 = 0;
1992 u16 subid, i; 1914 u16 subid, i;
1993 mac_info_t *mac_control; 1915 struct mac_info *mac_control;
1994 struct config_param *config; 1916 struct config_param *config;
1995 1917
1996 mac_control = &nic->mac_control; 1918 mac_control = &nic->mac_control;
@@ -2052,7 +1974,7 @@ static int start_nic(struct s2io_nic *nic)
2052 * it. 1974 * it.
2053 */ 1975 */
2054 val64 = readq(&bar0->adapter_status); 1976 val64 = readq(&bar0->adapter_status);
2055 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) { 1977 if (!verify_xena_quiescence(nic)) {
2056 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name); 1978 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2057 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n", 1979 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2058 (unsigned long long) val64); 1980 (unsigned long long) val64);
@@ -2095,11 +2017,12 @@ static int start_nic(struct s2io_nic *nic)
2095/** 2017/**
2096 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb 2018 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2097 */ 2019 */
2098static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off) 2020static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2021 TxD *txdlp, int get_off)
2099{ 2022{
2100 nic_t *nic = fifo_data->nic; 2023 struct s2io_nic *nic = fifo_data->nic;
2101 struct sk_buff *skb; 2024 struct sk_buff *skb;
2102 TxD_t *txds; 2025 struct TxD *txds;
2103 u16 j, frg_cnt; 2026 u16 j, frg_cnt;
2104 2027
2105 txds = txdlp; 2028 txds = txdlp;
@@ -2113,7 +2036,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2113 skb = (struct sk_buff *) ((unsigned long) 2036 skb = (struct sk_buff *) ((unsigned long)
2114 txds->Host_Control); 2037 txds->Host_Control);
2115 if (!skb) { 2038 if (!skb) {
2116 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds)); 2039 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2117 return NULL; 2040 return NULL;
2118 } 2041 }
2119 pci_unmap_single(nic->pdev, (dma_addr_t) 2042 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -2132,7 +2055,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2132 frag->size, PCI_DMA_TODEVICE); 2055 frag->size, PCI_DMA_TODEVICE);
2133 } 2056 }
2134 } 2057 }
2135 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds)); 2058 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2136 return(skb); 2059 return(skb);
2137} 2060}
2138 2061
@@ -2148,9 +2071,9 @@ static void free_tx_buffers(struct s2io_nic *nic)
2148{ 2071{
2149 struct net_device *dev = nic->dev; 2072 struct net_device *dev = nic->dev;
2150 struct sk_buff *skb; 2073 struct sk_buff *skb;
2151 TxD_t *txdp; 2074 struct TxD *txdp;
2152 int i, j; 2075 int i, j;
2153 mac_info_t *mac_control; 2076 struct mac_info *mac_control;
2154 struct config_param *config; 2077 struct config_param *config;
2155 int cnt = 0; 2078 int cnt = 0;
2156 2079
@@ -2159,7 +2082,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
2159 2082
2160 for (i = 0; i < config->tx_fifo_num; i++) { 2083 for (i = 0; i < config->tx_fifo_num; i++) {
2161 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) { 2084 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2162 txdp = (TxD_t *) mac_control->fifos[i].list_info[j]. 2085 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2163 list_virt_addr; 2086 list_virt_addr;
2164 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); 2087 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2165 if (skb) { 2088 if (skb) {
@@ -2187,10 +2110,10 @@ static void free_tx_buffers(struct s2io_nic *nic)
2187 2110
2188static void stop_nic(struct s2io_nic *nic) 2111static void stop_nic(struct s2io_nic *nic)
2189{ 2112{
2190 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2191 register u64 val64 = 0; 2114 register u64 val64 = 0;
2192 u16 interruptible; 2115 u16 interruptible;
2193 mac_info_t *mac_control; 2116 struct mac_info *mac_control;
2194 struct config_param *config; 2117 struct config_param *config;
2195 2118
2196 mac_control = &nic->mac_control; 2119 mac_control = &nic->mac_control;
@@ -2208,14 +2131,15 @@ static void stop_nic(struct s2io_nic *nic)
2208 writeq(val64, &bar0->adapter_control); 2131 writeq(val64, &bar0->adapter_control);
2209} 2132}
2210 2133
2211static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) 2134static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2135 sk_buff *skb)
2212{ 2136{
2213 struct net_device *dev = nic->dev; 2137 struct net_device *dev = nic->dev;
2214 struct sk_buff *frag_list; 2138 struct sk_buff *frag_list;
2215 void *tmp; 2139 void *tmp;
2216 2140
2217 /* Buffer-1 receives L3/L4 headers */ 2141 /* Buffer-1 receives L3/L4 headers */
2218 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single 2142 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2219 (nic->pdev, skb->data, l3l4hdr_size + 4, 2143 (nic->pdev, skb->data, l3l4hdr_size + 4,
2220 PCI_DMA_FROMDEVICE); 2144 PCI_DMA_FROMDEVICE);
2221 2145
@@ -2226,13 +2150,14 @@ static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2226 return -ENOMEM ; 2150 return -ENOMEM ;
2227 } 2151 }
2228 frag_list = skb_shinfo(skb)->frag_list; 2152 frag_list = skb_shinfo(skb)->frag_list;
2153 skb->truesize += frag_list->truesize;
2229 frag_list->next = NULL; 2154 frag_list->next = NULL;
2230 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); 2155 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2231 frag_list->data = tmp; 2156 frag_list->data = tmp;
2232 frag_list->tail = tmp; 2157 frag_list->tail = tmp;
2233 2158
2234 /* Buffer-2 receives L4 data payload */ 2159 /* Buffer-2 receives L4 data payload */
2235 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, 2160 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2236 frag_list->data, dev->mtu, 2161 frag_list->data, dev->mtu,
2237 PCI_DMA_FROMDEVICE); 2162 PCI_DMA_FROMDEVICE);
2238 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); 2163 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
@@ -2266,18 +2191,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2266{ 2191{
2267 struct net_device *dev = nic->dev; 2192 struct net_device *dev = nic->dev;
2268 struct sk_buff *skb; 2193 struct sk_buff *skb;
2269 RxD_t *rxdp; 2194 struct RxD_t *rxdp;
2270 int off, off1, size, block_no, block_no1; 2195 int off, off1, size, block_no, block_no1;
2271 u32 alloc_tab = 0; 2196 u32 alloc_tab = 0;
2272 u32 alloc_cnt; 2197 u32 alloc_cnt;
2273 mac_info_t *mac_control; 2198 struct mac_info *mac_control;
2274 struct config_param *config; 2199 struct config_param *config;
2275 u64 tmp; 2200 u64 tmp;
2276 buffAdd_t *ba; 2201 struct buffAdd *ba;
2277#ifndef CONFIG_S2IO_NAPI
2278 unsigned long flags; 2202 unsigned long flags;
2279#endif 2203 struct RxD_t *first_rxdp = NULL;
2280 RxD_t *first_rxdp = NULL;
2281 2204
2282 mac_control = &nic->mac_control; 2205 mac_control = &nic->mac_control;
2283 config = &nic->config; 2206 config = &nic->config;
@@ -2320,12 +2243,15 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2320 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2243 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2321 dev->name, rxdp); 2244 dev->name, rxdp);
2322 } 2245 }
2323#ifndef CONFIG_S2IO_NAPI 2246 if(!napi) {
2324 spin_lock_irqsave(&nic->put_lock, flags); 2247 spin_lock_irqsave(&nic->put_lock, flags);
2325 mac_control->rings[ring_no].put_pos = 2248 mac_control->rings[ring_no].put_pos =
2326 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; 2249 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2327 spin_unlock_irqrestore(&nic->put_lock, flags); 2250 spin_unlock_irqrestore(&nic->put_lock, flags);
2328#endif 2251 } else {
2252 mac_control->rings[ring_no].put_pos =
2253 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2254 }
2329 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2255 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2330 ((nic->rxd_mode >= RXD_MODE_3A) && 2256 ((nic->rxd_mode >= RXD_MODE_3A) &&
2331 (rxdp->Control_2 & BIT(0)))) { 2257 (rxdp->Control_2 & BIT(0)))) {
@@ -2356,9 +2282,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2356 } 2282 }
2357 if (nic->rxd_mode == RXD_MODE_1) { 2283 if (nic->rxd_mode == RXD_MODE_1) {
2358 /* 1 buffer mode - normal operation mode */ 2284 /* 1 buffer mode - normal operation mode */
2359 memset(rxdp, 0, sizeof(RxD1_t)); 2285 memset(rxdp, 0, sizeof(struct RxD1));
2360 skb_reserve(skb, NET_IP_ALIGN); 2286 skb_reserve(skb, NET_IP_ALIGN);
2361 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single 2287 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2362 (nic->pdev, skb->data, size - NET_IP_ALIGN, 2288 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2363 PCI_DMA_FROMDEVICE); 2289 PCI_DMA_FROMDEVICE);
2364 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2290 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
@@ -2375,7 +2301,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2375 * payload 2301 * payload
2376 */ 2302 */
2377 2303
2378 memset(rxdp, 0, sizeof(RxD3_t)); 2304 memset(rxdp, 0, sizeof(struct RxD3));
2379 ba = &mac_control->rings[ring_no].ba[block_no][off]; 2305 ba = &mac_control->rings[ring_no].ba[block_no][off];
2380 skb_reserve(skb, BUF0_LEN); 2306 skb_reserve(skb, BUF0_LEN);
2381 tmp = (u64)(unsigned long) skb->data; 2307 tmp = (u64)(unsigned long) skb->data;
@@ -2384,13 +2310,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2384 skb->data = (void *) (unsigned long)tmp; 2310 skb->data = (void *) (unsigned long)tmp;
2385 skb->tail = (void *) (unsigned long)tmp; 2311 skb->tail = (void *) (unsigned long)tmp;
2386 2312
2387 if (!(((RxD3_t*)rxdp)->Buffer0_ptr)) 2313 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2388 ((RxD3_t*)rxdp)->Buffer0_ptr = 2314 ((struct RxD3*)rxdp)->Buffer0_ptr =
2389 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2315 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2390 PCI_DMA_FROMDEVICE); 2316 PCI_DMA_FROMDEVICE);
2391 else 2317 else
2392 pci_dma_sync_single_for_device(nic->pdev, 2318 pci_dma_sync_single_for_device(nic->pdev,
2393 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr, 2319 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2394 BUF0_LEN, PCI_DMA_FROMDEVICE); 2320 BUF0_LEN, PCI_DMA_FROMDEVICE);
2395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2321 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2396 if (nic->rxd_mode == RXD_MODE_3B) { 2322 if (nic->rxd_mode == RXD_MODE_3B) {
@@ -2400,13 +2326,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2400 * Buffer2 will have L3/L4 header plus 2326 * Buffer2 will have L3/L4 header plus
2401 * L4 payload 2327 * L4 payload
2402 */ 2328 */
2403 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single 2329 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2404 (nic->pdev, skb->data, dev->mtu + 4, 2330 (nic->pdev, skb->data, dev->mtu + 4,
2405 PCI_DMA_FROMDEVICE); 2331 PCI_DMA_FROMDEVICE);
2406 2332
2407 /* Buffer-1 will be dummy buffer. Not used */ 2333 /* Buffer-1 will be dummy buffer. Not used */
2408 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) { 2334 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2409 ((RxD3_t*)rxdp)->Buffer1_ptr = 2335 ((struct RxD3*)rxdp)->Buffer1_ptr =
2410 pci_map_single(nic->pdev, 2336 pci_map_single(nic->pdev,
2411 ba->ba_1, BUF1_LEN, 2337 ba->ba_1, BUF1_LEN,
2412 PCI_DMA_FROMDEVICE); 2338 PCI_DMA_FROMDEVICE);
@@ -2466,9 +2392,9 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2466 struct net_device *dev = sp->dev; 2392 struct net_device *dev = sp->dev;
2467 int j; 2393 int j;
2468 struct sk_buff *skb; 2394 struct sk_buff *skb;
2469 RxD_t *rxdp; 2395 struct RxD_t *rxdp;
2470 mac_info_t *mac_control; 2396 struct mac_info *mac_control;
2471 buffAdd_t *ba; 2397 struct buffAdd *ba;
2472 2398
2473 mac_control = &sp->mac_control; 2399 mac_control = &sp->mac_control;
2474 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2400 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
@@ -2481,41 +2407,41 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2481 } 2407 }
2482 if (sp->rxd_mode == RXD_MODE_1) { 2408 if (sp->rxd_mode == RXD_MODE_1) {
2483 pci_unmap_single(sp->pdev, (dma_addr_t) 2409 pci_unmap_single(sp->pdev, (dma_addr_t)
2484 ((RxD1_t*)rxdp)->Buffer0_ptr, 2410 ((struct RxD1*)rxdp)->Buffer0_ptr,
2485 dev->mtu + 2411 dev->mtu +
2486 HEADER_ETHERNET_II_802_3_SIZE 2412 HEADER_ETHERNET_II_802_3_SIZE
2487 + HEADER_802_2_SIZE + 2413 + HEADER_802_2_SIZE +
2488 HEADER_SNAP_SIZE, 2414 HEADER_SNAP_SIZE,
2489 PCI_DMA_FROMDEVICE); 2415 PCI_DMA_FROMDEVICE);
2490 memset(rxdp, 0, sizeof(RxD1_t)); 2416 memset(rxdp, 0, sizeof(struct RxD1));
2491 } else if(sp->rxd_mode == RXD_MODE_3B) { 2417 } else if(sp->rxd_mode == RXD_MODE_3B) {
2492 ba = &mac_control->rings[ring_no]. 2418 ba = &mac_control->rings[ring_no].
2493 ba[blk][j]; 2419 ba[blk][j];
2494 pci_unmap_single(sp->pdev, (dma_addr_t) 2420 pci_unmap_single(sp->pdev, (dma_addr_t)
2495 ((RxD3_t*)rxdp)->Buffer0_ptr, 2421 ((struct RxD3*)rxdp)->Buffer0_ptr,
2496 BUF0_LEN, 2422 BUF0_LEN,
2497 PCI_DMA_FROMDEVICE); 2423 PCI_DMA_FROMDEVICE);
2498 pci_unmap_single(sp->pdev, (dma_addr_t) 2424 pci_unmap_single(sp->pdev, (dma_addr_t)
2499 ((RxD3_t*)rxdp)->Buffer1_ptr, 2425 ((struct RxD3*)rxdp)->Buffer1_ptr,
2500 BUF1_LEN, 2426 BUF1_LEN,
2501 PCI_DMA_FROMDEVICE); 2427 PCI_DMA_FROMDEVICE);
2502 pci_unmap_single(sp->pdev, (dma_addr_t) 2428 pci_unmap_single(sp->pdev, (dma_addr_t)
2503 ((RxD3_t*)rxdp)->Buffer2_ptr, 2429 ((struct RxD3*)rxdp)->Buffer2_ptr,
2504 dev->mtu + 4, 2430 dev->mtu + 4,
2505 PCI_DMA_FROMDEVICE); 2431 PCI_DMA_FROMDEVICE);
2506 memset(rxdp, 0, sizeof(RxD3_t)); 2432 memset(rxdp, 0, sizeof(struct RxD3));
2507 } else { 2433 } else {
2508 pci_unmap_single(sp->pdev, (dma_addr_t) 2434 pci_unmap_single(sp->pdev, (dma_addr_t)
2509 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2435 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2510 PCI_DMA_FROMDEVICE); 2436 PCI_DMA_FROMDEVICE);
2511 pci_unmap_single(sp->pdev, (dma_addr_t) 2437 pci_unmap_single(sp->pdev, (dma_addr_t)
2512 ((RxD3_t*)rxdp)->Buffer1_ptr, 2438 ((struct RxD3*)rxdp)->Buffer1_ptr,
2513 l3l4hdr_size + 4, 2439 l3l4hdr_size + 4,
2514 PCI_DMA_FROMDEVICE); 2440 PCI_DMA_FROMDEVICE);
2515 pci_unmap_single(sp->pdev, (dma_addr_t) 2441 pci_unmap_single(sp->pdev, (dma_addr_t)
2516 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu, 2442 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2517 PCI_DMA_FROMDEVICE); 2443 PCI_DMA_FROMDEVICE);
2518 memset(rxdp, 0, sizeof(RxD3_t)); 2444 memset(rxdp, 0, sizeof(struct RxD3));
2519 } 2445 }
2520 dev_kfree_skb(skb); 2446 dev_kfree_skb(skb);
2521 atomic_dec(&sp->rx_bufs_left[ring_no]); 2447 atomic_dec(&sp->rx_bufs_left[ring_no]);
@@ -2535,7 +2461,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
2535{ 2461{
2536 struct net_device *dev = sp->dev; 2462 struct net_device *dev = sp->dev;
2537 int i, blk = 0, buf_cnt = 0; 2463 int i, blk = 0, buf_cnt = 0;
2538 mac_info_t *mac_control; 2464 struct mac_info *mac_control;
2539 struct config_param *config; 2465 struct config_param *config;
2540 2466
2541 mac_control = &sp->mac_control; 2467 mac_control = &sp->mac_control;
@@ -2568,15 +2494,13 @@ static void free_rx_buffers(struct s2io_nic *sp)
2568 * 0 on success and 1 if there are No Rx packets to be processed. 2494 * 0 on success and 1 if there are No Rx packets to be processed.
2569 */ 2495 */
2570 2496
2571#if defined(CONFIG_S2IO_NAPI)
2572static int s2io_poll(struct net_device *dev, int *budget) 2497static int s2io_poll(struct net_device *dev, int *budget)
2573{ 2498{
2574 nic_t *nic = dev->priv; 2499 struct s2io_nic *nic = dev->priv;
2575 int pkt_cnt = 0, org_pkts_to_process; 2500 int pkt_cnt = 0, org_pkts_to_process;
2576 mac_info_t *mac_control; 2501 struct mac_info *mac_control;
2577 struct config_param *config; 2502 struct config_param *config;
2578 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2503 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2579 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2580 int i; 2504 int i;
2581 2505
2582 atomic_inc(&nic->isr_cnt); 2506 atomic_inc(&nic->isr_cnt);
@@ -2588,8 +2512,8 @@ static int s2io_poll(struct net_device *dev, int *budget)
2588 nic->pkts_to_process = dev->quota; 2512 nic->pkts_to_process = dev->quota;
2589 org_pkts_to_process = nic->pkts_to_process; 2513 org_pkts_to_process = nic->pkts_to_process;
2590 2514
2591 writeq(val64, &bar0->rx_traffic_int); 2515 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2592 val64 = readl(&bar0->rx_traffic_int); 2516 readl(&bar0->rx_traffic_int);
2593 2517
2594 for (i = 0; i < config->rx_ring_num; i++) { 2518 for (i = 0; i < config->rx_ring_num; i++) {
2595 rx_intr_handler(&mac_control->rings[i]); 2519 rx_intr_handler(&mac_control->rings[i]);
@@ -2615,7 +2539,7 @@ static int s2io_poll(struct net_device *dev, int *budget)
2615 } 2539 }
2616 /* Re enable the Rx interrupts. */ 2540 /* Re enable the Rx interrupts. */
2617 writeq(0x0, &bar0->rx_traffic_mask); 2541 writeq(0x0, &bar0->rx_traffic_mask);
2618 val64 = readl(&bar0->rx_traffic_mask); 2542 readl(&bar0->rx_traffic_mask);
2619 atomic_dec(&nic->isr_cnt); 2543 atomic_dec(&nic->isr_cnt);
2620 return 0; 2544 return 0;
2621 2545
@@ -2633,7 +2557,6 @@ no_rx:
2633 atomic_dec(&nic->isr_cnt); 2557 atomic_dec(&nic->isr_cnt);
2634 return 1; 2558 return 1;
2635} 2559}
2636#endif
2637 2560
2638#ifdef CONFIG_NET_POLL_CONTROLLER 2561#ifdef CONFIG_NET_POLL_CONTROLLER
2639/** 2562/**
@@ -2647,10 +2570,10 @@ no_rx:
2647 */ 2570 */
2648static void s2io_netpoll(struct net_device *dev) 2571static void s2io_netpoll(struct net_device *dev)
2649{ 2572{
2650 nic_t *nic = dev->priv; 2573 struct s2io_nic *nic = dev->priv;
2651 mac_info_t *mac_control; 2574 struct mac_info *mac_control;
2652 struct config_param *config; 2575 struct config_param *config;
2653 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2576 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2654 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2577 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2655 int i; 2578 int i;
2656 2579
@@ -2699,17 +2622,15 @@ static void s2io_netpoll(struct net_device *dev)
2699 * Return Value: 2622 * Return Value:
2700 * NONE. 2623 * NONE.
2701 */ 2624 */
2702static void rx_intr_handler(ring_info_t *ring_data) 2625static void rx_intr_handler(struct ring_info *ring_data)
2703{ 2626{
2704 nic_t *nic = ring_data->nic; 2627 struct s2io_nic *nic = ring_data->nic;
2705 struct net_device *dev = (struct net_device *) nic->dev; 2628 struct net_device *dev = (struct net_device *) nic->dev;
2706 int get_block, put_block, put_offset; 2629 int get_block, put_block, put_offset;
2707 rx_curr_get_info_t get_info, put_info; 2630 struct rx_curr_get_info get_info, put_info;
2708 RxD_t *rxdp; 2631 struct RxD_t *rxdp;
2709 struct sk_buff *skb; 2632 struct sk_buff *skb;
2710#ifndef CONFIG_S2IO_NAPI
2711 int pkt_cnt = 0; 2633 int pkt_cnt = 0;
2712#endif
2713 int i; 2634 int i;
2714 2635
2715 spin_lock(&nic->rx_lock); 2636 spin_lock(&nic->rx_lock);
@@ -2722,19 +2643,21 @@ static void rx_intr_handler(ring_info_t *ring_data)
2722 2643
2723 get_info = ring_data->rx_curr_get_info; 2644 get_info = ring_data->rx_curr_get_info;
2724 get_block = get_info.block_index; 2645 get_block = get_info.block_index;
2725 put_info = ring_data->rx_curr_put_info; 2646 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2726 put_block = put_info.block_index; 2647 put_block = put_info.block_index;
2727 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; 2648 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2728#ifndef CONFIG_S2IO_NAPI 2649 if (!napi) {
2729 spin_lock(&nic->put_lock); 2650 spin_lock(&nic->put_lock);
2730 put_offset = ring_data->put_pos; 2651 put_offset = ring_data->put_pos;
2731 spin_unlock(&nic->put_lock); 2652 spin_unlock(&nic->put_lock);
2732#else 2653 } else
2733 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) + 2654 put_offset = ring_data->put_pos;
2734 put_info.offset; 2655
2735#endif
2736 while (RXD_IS_UP2DT(rxdp)) { 2656 while (RXD_IS_UP2DT(rxdp)) {
2737 /* If your are next to put index then it's FIFO full condition */ 2657 /*
2658 * If your are next to put index then it's
2659 * FIFO full condition
2660 */
2738 if ((get_block == put_block) && 2661 if ((get_block == put_block) &&
2739 (get_info.offset + 1) == put_info.offset) { 2662 (get_info.offset + 1) == put_info.offset) {
2740 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2663 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
@@ -2750,7 +2673,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2750 } 2673 }
2751 if (nic->rxd_mode == RXD_MODE_1) { 2674 if (nic->rxd_mode == RXD_MODE_1) {
2752 pci_unmap_single(nic->pdev, (dma_addr_t) 2675 pci_unmap_single(nic->pdev, (dma_addr_t)
2753 ((RxD1_t*)rxdp)->Buffer0_ptr, 2676 ((struct RxD1*)rxdp)->Buffer0_ptr,
2754 dev->mtu + 2677 dev->mtu +
2755 HEADER_ETHERNET_II_802_3_SIZE + 2678 HEADER_ETHERNET_II_802_3_SIZE +
2756 HEADER_802_2_SIZE + 2679 HEADER_802_2_SIZE +
@@ -2758,22 +2681,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2758 PCI_DMA_FROMDEVICE); 2681 PCI_DMA_FROMDEVICE);
2759 } else if (nic->rxd_mode == RXD_MODE_3B) { 2682 } else if (nic->rxd_mode == RXD_MODE_3B) {
2760 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2683 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2761 ((RxD3_t*)rxdp)->Buffer0_ptr, 2684 ((struct RxD3*)rxdp)->Buffer0_ptr,
2762 BUF0_LEN, PCI_DMA_FROMDEVICE); 2685 BUF0_LEN, PCI_DMA_FROMDEVICE);
2763 pci_unmap_single(nic->pdev, (dma_addr_t) 2686 pci_unmap_single(nic->pdev, (dma_addr_t)
2764 ((RxD3_t*)rxdp)->Buffer2_ptr, 2687 ((struct RxD3*)rxdp)->Buffer2_ptr,
2765 dev->mtu + 4, 2688 dev->mtu + 4,
2766 PCI_DMA_FROMDEVICE); 2689 PCI_DMA_FROMDEVICE);
2767 } else { 2690 } else {
2768 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 2691 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2769 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2692 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2770 PCI_DMA_FROMDEVICE); 2693 PCI_DMA_FROMDEVICE);
2771 pci_unmap_single(nic->pdev, (dma_addr_t) 2694 pci_unmap_single(nic->pdev, (dma_addr_t)
2772 ((RxD3_t*)rxdp)->Buffer1_ptr, 2695 ((struct RxD3*)rxdp)->Buffer1_ptr,
2773 l3l4hdr_size + 4, 2696 l3l4hdr_size + 4,
2774 PCI_DMA_FROMDEVICE); 2697 PCI_DMA_FROMDEVICE);
2775 pci_unmap_single(nic->pdev, (dma_addr_t) 2698 pci_unmap_single(nic->pdev, (dma_addr_t)
2776 ((RxD3_t*)rxdp)->Buffer2_ptr, 2699 ((struct RxD3*)rxdp)->Buffer2_ptr,
2777 dev->mtu, PCI_DMA_FROMDEVICE); 2700 dev->mtu, PCI_DMA_FROMDEVICE);
2778 } 2701 }
2779 prefetch(skb->data); 2702 prefetch(skb->data);
@@ -2792,20 +2715,17 @@ static void rx_intr_handler(ring_info_t *ring_data)
2792 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2715 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2793 } 2716 }
2794 2717
2795#ifdef CONFIG_S2IO_NAPI
2796 nic->pkts_to_process -= 1; 2718 nic->pkts_to_process -= 1;
2797 if (!nic->pkts_to_process) 2719 if ((napi) && (!nic->pkts_to_process))
2798 break; 2720 break;
2799#else
2800 pkt_cnt++; 2721 pkt_cnt++;
2801 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 2722 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2802 break; 2723 break;
2803#endif
2804 } 2724 }
2805 if (nic->lro) { 2725 if (nic->lro) {
2806 /* Clear all LRO sessions before exiting */ 2726 /* Clear all LRO sessions before exiting */
2807 for (i=0; i<MAX_LRO_SESSIONS; i++) { 2727 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2808 lro_t *lro = &nic->lro0_n[i]; 2728 struct lro *lro = &nic->lro0_n[i];
2809 if (lro->in_use) { 2729 if (lro->in_use) {
2810 update_L3L4_header(nic, lro); 2730 update_L3L4_header(nic, lro);
2811 queue_rx_frame(lro->parent); 2731 queue_rx_frame(lro->parent);
@@ -2829,17 +2749,17 @@ static void rx_intr_handler(ring_info_t *ring_data)
2829 * NONE 2749 * NONE
2830 */ 2750 */
2831 2751
2832static void tx_intr_handler(fifo_info_t *fifo_data) 2752static void tx_intr_handler(struct fifo_info *fifo_data)
2833{ 2753{
2834 nic_t *nic = fifo_data->nic; 2754 struct s2io_nic *nic = fifo_data->nic;
2835 struct net_device *dev = (struct net_device *) nic->dev; 2755 struct net_device *dev = (struct net_device *) nic->dev;
2836 tx_curr_get_info_t get_info, put_info; 2756 struct tx_curr_get_info get_info, put_info;
2837 struct sk_buff *skb; 2757 struct sk_buff *skb;
2838 TxD_t *txdlp; 2758 struct TxD *txdlp;
2839 2759
2840 get_info = fifo_data->tx_curr_get_info; 2760 get_info = fifo_data->tx_curr_get_info;
2841 put_info = fifo_data->tx_curr_put_info; 2761 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2842 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset]. 2762 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2843 list_virt_addr; 2763 list_virt_addr;
2844 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) && 2764 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2845 (get_info.offset != put_info.offset) && 2765 (get_info.offset != put_info.offset) &&
@@ -2854,11 +2774,10 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2854 } 2774 }
2855 if ((err >> 48) == 0xA) { 2775 if ((err >> 48) == 0xA) {
2856 DBG_PRINT(TX_DBG, "TxD returned due \ 2776 DBG_PRINT(TX_DBG, "TxD returned due \
2857to loss of link\n"); 2777 to loss of link\n");
2858 } 2778 }
2859 else { 2779 else {
2860 DBG_PRINT(ERR_DBG, "***TxD error \ 2780 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2861%llx\n", err);
2862 } 2781 }
2863 } 2782 }
2864 2783
@@ -2877,7 +2796,7 @@ to loss of link\n");
2877 get_info.offset++; 2796 get_info.offset++;
2878 if (get_info.offset == get_info.fifo_len + 1) 2797 if (get_info.offset == get_info.fifo_len + 1)
2879 get_info.offset = 0; 2798 get_info.offset = 0;
2880 txdlp = (TxD_t *) fifo_data->list_info 2799 txdlp = (struct TxD *) fifo_data->list_info
2881 [get_info.offset].list_virt_addr; 2800 [get_info.offset].list_virt_addr;
2882 fifo_data->tx_curr_get_info.offset = 2801 fifo_data->tx_curr_get_info.offset =
2883 get_info.offset; 2802 get_info.offset;
@@ -2902,8 +2821,8 @@ to loss of link\n");
2902static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev) 2821static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2903{ 2822{
2904 u64 val64 = 0x0; 2823 u64 val64 = 0x0;
2905 nic_t *sp = dev->priv; 2824 struct s2io_nic *sp = dev->priv;
2906 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2825 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2907 2826
2908 //address transaction 2827 //address transaction
2909 val64 = val64 | MDIO_MMD_INDX_ADDR(addr) 2828 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
@@ -2951,8 +2870,8 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2951{ 2870{
2952 u64 val64 = 0x0; 2871 u64 val64 = 0x0;
2953 u64 rval64 = 0x0; 2872 u64 rval64 = 0x0;
2954 nic_t *sp = dev->priv; 2873 struct s2io_nic *sp = dev->priv;
2955 XENA_dev_config_t __iomem *bar0 = sp->bar0; 2874 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2956 2875
2957 /* address transaction */ 2876 /* address transaction */
2958 val64 = val64 | MDIO_MMD_INDX_ADDR(addr) 2877 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
@@ -3055,8 +2974,8 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3055 u64 val64 = 0x0; 2974 u64 val64 = 0x0;
3056 u64 addr = 0x0; 2975 u64 addr = 0x0;
3057 2976
3058 nic_t *sp = dev->priv; 2977 struct s2io_nic *sp = dev->priv;
3059 StatInfo_t *stat_info = sp->mac_control.stats_info; 2978 struct stat_block *stat_info = sp->mac_control.stats_info;
3060 2979
3061 /* Check the communication with the MDIO slave */ 2980 /* Check the communication with the MDIO slave */
3062 addr = 0x0000; 2981 addr = 0x0000;
@@ -3154,10 +3073,12 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3154static void alarm_intr_handler(struct s2io_nic *nic) 3073static void alarm_intr_handler(struct s2io_nic *nic)
3155{ 3074{
3156 struct net_device *dev = (struct net_device *) nic->dev; 3075 struct net_device *dev = (struct net_device *) nic->dev;
3157 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3076 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3158 register u64 val64 = 0, err_reg = 0; 3077 register u64 val64 = 0, err_reg = 0;
3159 u64 cnt; 3078 u64 cnt;
3160 int i; 3079 int i;
3080 if (atomic_read(&nic->card_state) == CARD_DOWN)
3081 return;
3161 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; 3082 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3162 /* Handling the XPAK counters update */ 3083 /* Handling the XPAK counters update */
3163 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { 3084 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
@@ -3297,6 +3218,25 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3297 } 3218 }
3298 return ret; 3219 return ret;
3299} 3220}
3221/*
3222 * check_pci_device_id - Checks if the device id is supported
3223 * @id : device id
3224 * Description: Function to check if the pci device id is supported by driver.
3225 * Return value: Actual device id if supported else PCI_ANY_ID
3226 */
3227static u16 check_pci_device_id(u16 id)
3228{
3229 switch (id) {
3230 case PCI_DEVICE_ID_HERC_WIN:
3231 case PCI_DEVICE_ID_HERC_UNI:
3232 return XFRAME_II_DEVICE;
3233 case PCI_DEVICE_ID_S2IO_UNI:
3234 case PCI_DEVICE_ID_S2IO_WIN:
3235 return XFRAME_I_DEVICE;
3236 default:
3237 return PCI_ANY_ID;
3238 }
3239}
3300 3240
3301/** 3241/**
3302 * s2io_reset - Resets the card. 3242 * s2io_reset - Resets the card.
@@ -3308,42 +3248,57 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3308 * void. 3248 * void.
3309 */ 3249 */
3310 3250
3311static void s2io_reset(nic_t * sp) 3251static void s2io_reset(struct s2io_nic * sp)
3312{ 3252{
3313 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3253 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3314 u64 val64; 3254 u64 val64;
3315 u16 subid, pci_cmd; 3255 u16 subid, pci_cmd;
3256 int i;
3257 u16 val16;
3258 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3259 __FUNCTION__, sp->dev->name);
3316 3260
3317 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3261 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3318 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3262 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3319 3263
3264 if (sp->device_type == XFRAME_II_DEVICE) {
3265 int ret;
3266 ret = pci_set_power_state(sp->pdev, 3);
3267 if (!ret)
3268 ret = pci_set_power_state(sp->pdev, 0);
3269 else {
3270 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3271 __FUNCTION__);
3272 goto old_way;
3273 }
3274 msleep(20);
3275 goto new_way;
3276 }
3277old_way:
3320 val64 = SW_RESET_ALL; 3278 val64 = SW_RESET_ALL;
3321 writeq(val64, &bar0->sw_reset); 3279 writeq(val64, &bar0->sw_reset);
3322 3280new_way:
3323 /*
3324 * At this stage, if the PCI write is indeed completed, the
3325 * card is reset and so is the PCI Config space of the device.
3326 * So a read cannot be issued at this stage on any of the
3327 * registers to ensure the write into "sw_reset" register
3328 * has gone through.
3329 * Question: Is there any system call that will explicitly force
3330 * all the write commands still pending on the bus to be pushed
3331 * through?
3332 * As of now I'am just giving a 250ms delay and hoping that the
3333 * PCI write to sw_reset register is done by this time.
3334 */
3335 msleep(250);
3336 if (strstr(sp->product_name, "CX4")) { 3281 if (strstr(sp->product_name, "CX4")) {
3337 msleep(750); 3282 msleep(750);
3338 } 3283 }
3284 msleep(250);
3285 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3339 3286
3340 /* Restore the PCI state saved during initialization. */ 3287 /* Restore the PCI state saved during initialization. */
3341 pci_restore_state(sp->pdev); 3288 pci_restore_state(sp->pdev);
3342 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, 3289 pci_read_config_word(sp->pdev, 0x2, &val16);
3343 pci_cmd); 3290 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3344 s2io_init_pci(sp); 3291 break;
3292 msleep(200);
3293 }
3345 3294
3346 msleep(250); 3295 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3296 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3297 }
3298
3299 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3300
3301 s2io_init_pci(sp);
3347 3302
3348 /* Set swapper to enable I/O register access */ 3303 /* Set swapper to enable I/O register access */
3349 s2io_set_swapper(sp); 3304 s2io_set_swapper(sp);
@@ -3399,10 +3354,10 @@ static void s2io_reset(nic_t * sp)
3399 * SUCCESS on success and FAILURE on failure. 3354 * SUCCESS on success and FAILURE on failure.
3400 */ 3355 */
3401 3356
3402static int s2io_set_swapper(nic_t * sp) 3357static int s2io_set_swapper(struct s2io_nic * sp)
3403{ 3358{
3404 struct net_device *dev = sp->dev; 3359 struct net_device *dev = sp->dev;
3405 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3406 u64 val64, valt, valr; 3361 u64 val64, valt, valr;
3407 3362
3408 /* 3363 /*
@@ -3527,9 +3482,9 @@ static int s2io_set_swapper(nic_t * sp)
3527 return SUCCESS; 3482 return SUCCESS;
3528} 3483}
3529 3484
3530static int wait_for_msix_trans(nic_t *nic, int i) 3485static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3531{ 3486{
3532 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3487 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3533 u64 val64; 3488 u64 val64;
3534 int ret = 0, cnt = 0; 3489 int ret = 0, cnt = 0;
3535 3490
@@ -3548,9 +3503,9 @@ static int wait_for_msix_trans(nic_t *nic, int i)
3548 return ret; 3503 return ret;
3549} 3504}
3550 3505
3551static void restore_xmsi_data(nic_t *nic) 3506static void restore_xmsi_data(struct s2io_nic *nic)
3552{ 3507{
3553 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3508 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3554 u64 val64; 3509 u64 val64;
3555 int i; 3510 int i;
3556 3511
@@ -3566,9 +3521,9 @@ static void restore_xmsi_data(nic_t *nic)
3566 } 3521 }
3567} 3522}
3568 3523
3569static void store_xmsi_data(nic_t *nic) 3524static void store_xmsi_data(struct s2io_nic *nic)
3570{ 3525{
3571 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3526 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3572 u64 val64, addr, data; 3527 u64 val64, addr, data;
3573 int i; 3528 int i;
3574 3529
@@ -3589,9 +3544,9 @@ static void store_xmsi_data(nic_t *nic)
3589 } 3544 }
3590} 3545}
3591 3546
3592int s2io_enable_msi(nic_t *nic) 3547int s2io_enable_msi(struct s2io_nic *nic)
3593{ 3548{
3594 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3549 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3595 u16 msi_ctrl, msg_val; 3550 u16 msi_ctrl, msg_val;
3596 struct config_param *config = &nic->config; 3551 struct config_param *config = &nic->config;
3597 struct net_device *dev = nic->dev; 3552 struct net_device *dev = nic->dev;
@@ -3639,9 +3594,9 @@ int s2io_enable_msi(nic_t *nic)
3639 return 0; 3594 return 0;
3640} 3595}
3641 3596
3642static int s2io_enable_msi_x(nic_t *nic) 3597static int s2io_enable_msi_x(struct s2io_nic *nic)
3643{ 3598{
3644 XENA_dev_config_t __iomem *bar0 = nic->bar0; 3599 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3645 u64 tx_mat, rx_mat; 3600 u64 tx_mat, rx_mat;
3646 u16 msi_control; /* Temp variable */ 3601 u16 msi_control; /* Temp variable */
3647 int ret, i, j, msix_indx = 1; 3602 int ret, i, j, msix_indx = 1;
@@ -3749,7 +3704,7 @@ static int s2io_enable_msi_x(nic_t *nic)
3749 3704
3750static int s2io_open(struct net_device *dev) 3705static int s2io_open(struct net_device *dev)
3751{ 3706{
3752 nic_t *sp = dev->priv; 3707 struct s2io_nic *sp = dev->priv;
3753 int err = 0; 3708 int err = 0;
3754 3709
3755 /* 3710 /*
@@ -3802,7 +3757,7 @@ hw_init_failed:
3802 3757
3803static int s2io_close(struct net_device *dev) 3758static int s2io_close(struct net_device *dev)
3804{ 3759{
3805 nic_t *sp = dev->priv; 3760 struct s2io_nic *sp = dev->priv;
3806 3761
3807 flush_scheduled_work(); 3762 flush_scheduled_work();
3808 netif_stop_queue(dev); 3763 netif_stop_queue(dev);
@@ -3828,15 +3783,15 @@ static int s2io_close(struct net_device *dev)
3828 3783
3829static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 3784static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3830{ 3785{
3831 nic_t *sp = dev->priv; 3786 struct s2io_nic *sp = dev->priv;
3832 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 3787 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3833 register u64 val64; 3788 register u64 val64;
3834 TxD_t *txdp; 3789 struct TxD *txdp;
3835 TxFIFO_element_t __iomem *tx_fifo; 3790 struct TxFIFO_element __iomem *tx_fifo;
3836 unsigned long flags; 3791 unsigned long flags;
3837 u16 vlan_tag = 0; 3792 u16 vlan_tag = 0;
3838 int vlan_priority = 0; 3793 int vlan_priority = 0;
3839 mac_info_t *mac_control; 3794 struct mac_info *mac_control;
3840 struct config_param *config; 3795 struct config_param *config;
3841 int offload_type; 3796 int offload_type;
3842 3797
@@ -3864,7 +3819,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3864 3819
3865 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset; 3820 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3866 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset; 3821 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3867 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off]. 3822 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3868 list_virt_addr; 3823 list_virt_addr;
3869 3824
3870 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; 3825 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
@@ -3887,12 +3842,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3887 } 3842 }
3888 3843
3889 offload_type = s2io_offload_type(skb); 3844 offload_type = s2io_offload_type(skb);
3890#ifdef NETIF_F_TSO
3891 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 3845 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3892 txdp->Control_1 |= TXD_TCP_LSO_EN; 3846 txdp->Control_1 |= TXD_TCP_LSO_EN;
3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb)); 3847 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3894 } 3848 }
3895#endif
3896 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3849 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3897 txdp->Control_2 |= 3850 txdp->Control_2 |=
3898 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN | 3851 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -3993,13 +3946,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3993static void 3946static void
3994s2io_alarm_handle(unsigned long data) 3947s2io_alarm_handle(unsigned long data)
3995{ 3948{
3996 nic_t *sp = (nic_t *)data; 3949 struct s2io_nic *sp = (struct s2io_nic *)data;
3997 3950
3998 alarm_intr_handler(sp); 3951 alarm_intr_handler(sp);
3999 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3952 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4000} 3953}
4001 3954
4002static int s2io_chk_rx_buffers(nic_t *sp, int rng_n) 3955static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4003{ 3956{
4004 int rxb_size, level; 3957 int rxb_size, level;
4005 3958
@@ -4031,9 +3984,9 @@ static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4031static irqreturn_t s2io_msi_handle(int irq, void *dev_id) 3984static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4032{ 3985{
4033 struct net_device *dev = (struct net_device *) dev_id; 3986 struct net_device *dev = (struct net_device *) dev_id;
4034 nic_t *sp = dev->priv; 3987 struct s2io_nic *sp = dev->priv;
4035 int i; 3988 int i;
4036 mac_info_t *mac_control; 3989 struct mac_info *mac_control;
4037 struct config_param *config; 3990 struct config_param *config;
4038 3991
4039 atomic_inc(&sp->isr_cnt); 3992 atomic_inc(&sp->isr_cnt);
@@ -4063,8 +4016,8 @@ static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4063 4016
4064static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4017static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4065{ 4018{
4066 ring_info_t *ring = (ring_info_t *)dev_id; 4019 struct ring_info *ring = (struct ring_info *)dev_id;
4067 nic_t *sp = ring->nic; 4020 struct s2io_nic *sp = ring->nic;
4068 4021
4069 atomic_inc(&sp->isr_cnt); 4022 atomic_inc(&sp->isr_cnt);
4070 4023
@@ -4077,17 +4030,17 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4077 4030
4078static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4031static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4079{ 4032{
4080 fifo_info_t *fifo = (fifo_info_t *)dev_id; 4033 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4081 nic_t *sp = fifo->nic; 4034 struct s2io_nic *sp = fifo->nic;
4082 4035
4083 atomic_inc(&sp->isr_cnt); 4036 atomic_inc(&sp->isr_cnt);
4084 tx_intr_handler(fifo); 4037 tx_intr_handler(fifo);
4085 atomic_dec(&sp->isr_cnt); 4038 atomic_dec(&sp->isr_cnt);
4086 return IRQ_HANDLED; 4039 return IRQ_HANDLED;
4087} 4040}
4088static void s2io_txpic_intr_handle(nic_t *sp) 4041static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4089{ 4042{
4090 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4043 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4091 u64 val64; 4044 u64 val64;
4092 4045
4093 val64 = readq(&bar0->pic_int_status); 4046 val64 = readq(&bar0->pic_int_status);
@@ -4109,39 +4062,33 @@ static void s2io_txpic_intr_handle(nic_t *sp)
4109 } 4062 }
4110 else if (val64 & GPIO_INT_REG_LINK_UP) { 4063 else if (val64 & GPIO_INT_REG_LINK_UP) {
4111 val64 = readq(&bar0->adapter_status); 4064 val64 = readq(&bar0->adapter_status);
4112 if (verify_xena_quiescence(sp, val64,
4113 sp->device_enabled_once)) {
4114 /* Enable Adapter */ 4065 /* Enable Adapter */
4115 val64 = readq(&bar0->adapter_control); 4066 val64 = readq(&bar0->adapter_control);
4116 val64 |= ADAPTER_CNTL_EN; 4067 val64 |= ADAPTER_CNTL_EN;
4117 writeq(val64, &bar0->adapter_control); 4068 writeq(val64, &bar0->adapter_control);
4118 val64 |= ADAPTER_LED_ON; 4069 val64 |= ADAPTER_LED_ON;
4119 writeq(val64, &bar0->adapter_control); 4070 writeq(val64, &bar0->adapter_control);
4120 if (!sp->device_enabled_once) 4071 if (!sp->device_enabled_once)
4121 sp->device_enabled_once = 1; 4072 sp->device_enabled_once = 1;
4122 4073
4123 s2io_link(sp, LINK_UP); 4074 s2io_link(sp, LINK_UP);
4124 /* 4075 /*
4125 * unmask link down interrupt and mask link-up 4076 * unmask link down interrupt and mask link-up
4126 * intr 4077 * intr
4127 */ 4078 */
4128 val64 = readq(&bar0->gpio_int_mask); 4079 val64 = readq(&bar0->gpio_int_mask);
4129 val64 &= ~GPIO_INT_MASK_LINK_DOWN; 4080 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4130 val64 |= GPIO_INT_MASK_LINK_UP; 4081 val64 |= GPIO_INT_MASK_LINK_UP;
4131 writeq(val64, &bar0->gpio_int_mask); 4082 writeq(val64, &bar0->gpio_int_mask);
4132 4083
4133 }
4134 }else if (val64 & GPIO_INT_REG_LINK_DOWN) { 4084 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4135 val64 = readq(&bar0->adapter_status); 4085 val64 = readq(&bar0->adapter_status);
4136 if (verify_xena_quiescence(sp, val64, 4086 s2io_link(sp, LINK_DOWN);
4137 sp->device_enabled_once)) { 4087 /* Link is down so unmaks link up interrupt */
4138 s2io_link(sp, LINK_DOWN); 4088 val64 = readq(&bar0->gpio_int_mask);
4139 /* Link is down so unmaks link up interrupt */ 4089 val64 &= ~GPIO_INT_MASK_LINK_UP;
4140 val64 = readq(&bar0->gpio_int_mask); 4090 val64 |= GPIO_INT_MASK_LINK_DOWN;
4141 val64 &= ~GPIO_INT_MASK_LINK_UP; 4091 writeq(val64, &bar0->gpio_int_mask);
4142 val64 |= GPIO_INT_MASK_LINK_DOWN;
4143 writeq(val64, &bar0->gpio_int_mask);
4144 }
4145 } 4092 }
4146 } 4093 }
4147 val64 = readq(&bar0->gpio_int_mask); 4094 val64 = readq(&bar0->gpio_int_mask);
@@ -4163,11 +4110,11 @@ static void s2io_txpic_intr_handle(nic_t *sp)
4163static irqreturn_t s2io_isr(int irq, void *dev_id) 4110static irqreturn_t s2io_isr(int irq, void *dev_id)
4164{ 4111{
4165 struct net_device *dev = (struct net_device *) dev_id; 4112 struct net_device *dev = (struct net_device *) dev_id;
4166 nic_t *sp = dev->priv; 4113 struct s2io_nic *sp = dev->priv;
4167 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4114 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4168 int i; 4115 int i;
4169 u64 reason = 0, val64, org_mask; 4116 u64 reason = 0;
4170 mac_info_t *mac_control; 4117 struct mac_info *mac_control;
4171 struct config_param *config; 4118 struct config_param *config;
4172 4119
4173 atomic_inc(&sp->isr_cnt); 4120 atomic_inc(&sp->isr_cnt);
@@ -4185,43 +4132,48 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4185 reason = readq(&bar0->general_int_status); 4132 reason = readq(&bar0->general_int_status);
4186 4133
4187 if (!reason) { 4134 if (!reason) {
4188 /* The interrupt was not raised by Xena. */ 4135 /* The interrupt was not raised by us. */
4136 atomic_dec(&sp->isr_cnt);
4137 return IRQ_NONE;
4138 }
4139 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4140 /* Disable device and get out */
4189 atomic_dec(&sp->isr_cnt); 4141 atomic_dec(&sp->isr_cnt);
4190 return IRQ_NONE; 4142 return IRQ_NONE;
4191 } 4143 }
4192 4144
4193 val64 = 0xFFFFFFFFFFFFFFFFULL; 4145 if (napi) {
4194 /* Store current mask before masking all interrupts */ 4146 if (reason & GEN_INTR_RXTRAFFIC) {
4195 org_mask = readq(&bar0->general_int_mask); 4147 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4196 writeq(val64, &bar0->general_int_mask); 4148 __netif_rx_schedule(dev);
4149 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4150 }
4151 else
4152 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4153 }
4154 } else {
4155 /*
4156 * Rx handler is called by default, without checking for the
4157 * cause of interrupt.
4158 * rx_traffic_int reg is an R1 register, writing all 1's
4159 * will ensure that the actual interrupt causing bit get's
4160 * cleared and hence a read can be avoided.
4161 */
4162 if (reason & GEN_INTR_RXTRAFFIC)
4163 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4197 4164
4198#ifdef CONFIG_S2IO_NAPI 4165 for (i = 0; i < config->rx_ring_num; i++) {
4199 if (reason & GEN_INTR_RXTRAFFIC) { 4166 rx_intr_handler(&mac_control->rings[i]);
4200 if (netif_rx_schedule_prep(dev)) {
4201 writeq(val64, &bar0->rx_traffic_mask);
4202 __netif_rx_schedule(dev);
4203 } 4167 }
4204 } 4168 }
4205#else
4206 /*
4207 * Rx handler is called by default, without checking for the
4208 * cause of interrupt.
4209 * rx_traffic_int reg is an R1 register, writing all 1's
4210 * will ensure that the actual interrupt causing bit get's
4211 * cleared and hence a read can be avoided.
4212 */
4213 writeq(val64, &bar0->rx_traffic_int);
4214 for (i = 0; i < config->rx_ring_num; i++) {
4215 rx_intr_handler(&mac_control->rings[i]);
4216 }
4217#endif
4218 4169
4219 /* 4170 /*
4220 * tx_traffic_int reg is an R1 register, writing all 1's 4171 * tx_traffic_int reg is an R1 register, writing all 1's
4221 * will ensure that the actual interrupt causing bit get's 4172 * will ensure that the actual interrupt causing bit get's
4222 * cleared and hence a read can be avoided. 4173 * cleared and hence a read can be avoided.
4223 */ 4174 */
4224 writeq(val64, &bar0->tx_traffic_int); 4175 if (reason & GEN_INTR_TXTRAFFIC)
4176 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4225 4177
4226 for (i = 0; i < config->tx_fifo_num; i++) 4178 for (i = 0; i < config->tx_fifo_num; i++)
4227 tx_intr_handler(&mac_control->fifos[i]); 4179 tx_intr_handler(&mac_control->fifos[i]);
@@ -4233,11 +4185,14 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4233 * reallocate the buffers from the interrupt handler itself, 4185 * reallocate the buffers from the interrupt handler itself,
4234 * else schedule a tasklet to reallocate the buffers. 4186 * else schedule a tasklet to reallocate the buffers.
4235 */ 4187 */
4236#ifndef CONFIG_S2IO_NAPI 4188 if (!napi) {
4237 for (i = 0; i < config->rx_ring_num; i++) 4189 for (i = 0; i < config->rx_ring_num; i++)
4238 s2io_chk_rx_buffers(sp, i); 4190 s2io_chk_rx_buffers(sp, i);
4239#endif 4191 }
4240 writeq(org_mask, &bar0->general_int_mask); 4192
4193 writeq(0, &bar0->general_int_mask);
4194 readl(&bar0->general_int_status);
4195
4241 atomic_dec(&sp->isr_cnt); 4196 atomic_dec(&sp->isr_cnt);
4242 return IRQ_HANDLED; 4197 return IRQ_HANDLED;
4243} 4198}
@@ -4245,9 +4200,9 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4245/** 4200/**
4246 * s2io_updt_stats - 4201 * s2io_updt_stats -
4247 */ 4202 */
4248static void s2io_updt_stats(nic_t *sp) 4203static void s2io_updt_stats(struct s2io_nic *sp)
4249{ 4204{
4250 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4205 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4251 u64 val64; 4206 u64 val64;
4252 int cnt = 0; 4207 int cnt = 0;
4253 4208
@@ -4266,7 +4221,7 @@ static void s2io_updt_stats(nic_t *sp)
4266 break; /* Updt failed */ 4221 break; /* Updt failed */
4267 } while(1); 4222 } while(1);
4268 } else { 4223 } else {
4269 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t)); 4224 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4270 } 4225 }
4271} 4226}
4272 4227
@@ -4282,8 +4237,8 @@ static void s2io_updt_stats(nic_t *sp)
4282 4237
4283static struct net_device_stats *s2io_get_stats(struct net_device *dev) 4238static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4284{ 4239{
4285 nic_t *sp = dev->priv; 4240 struct s2io_nic *sp = dev->priv;
4286 mac_info_t *mac_control; 4241 struct mac_info *mac_control;
4287 struct config_param *config; 4242 struct config_param *config;
4288 4243
4289 4244
@@ -4324,8 +4279,8 @@ static void s2io_set_multicast(struct net_device *dev)
4324{ 4279{
4325 int i, j, prev_cnt; 4280 int i, j, prev_cnt;
4326 struct dev_mc_list *mclist; 4281 struct dev_mc_list *mclist;
4327 nic_t *sp = dev->priv; 4282 struct s2io_nic *sp = dev->priv;
4328 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4283 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4329 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4284 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4330 0xfeffffffffffULL; 4285 0xfeffffffffffULL;
4331 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0; 4286 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
@@ -4478,8 +4433,8 @@ static void s2io_set_multicast(struct net_device *dev)
4478 4433
4479static int s2io_set_mac_addr(struct net_device *dev, u8 * addr) 4434static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4480{ 4435{
4481 nic_t *sp = dev->priv; 4436 struct s2io_nic *sp = dev->priv;
4482 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4437 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4483 register u64 val64, mac_addr = 0; 4438 register u64 val64, mac_addr = 0;
4484 int i; 4439 int i;
4485 4440
@@ -4525,7 +4480,7 @@ static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4525static int s2io_ethtool_sset(struct net_device *dev, 4480static int s2io_ethtool_sset(struct net_device *dev,
4526 struct ethtool_cmd *info) 4481 struct ethtool_cmd *info)
4527{ 4482{
4528 nic_t *sp = dev->priv; 4483 struct s2io_nic *sp = dev->priv;
4529 if ((info->autoneg == AUTONEG_ENABLE) || 4484 if ((info->autoneg == AUTONEG_ENABLE) ||
4530 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) 4485 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4531 return -EINVAL; 4486 return -EINVAL;
@@ -4551,7 +4506,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
4551 4506
4552static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) 4507static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4553{ 4508{
4554 nic_t *sp = dev->priv; 4509 struct s2io_nic *sp = dev->priv;
4555 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 4510 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4556 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 4511 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4557 info->port = PORT_FIBRE; 4512 info->port = PORT_FIBRE;
@@ -4584,7 +4539,7 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4584static void s2io_ethtool_gdrvinfo(struct net_device *dev, 4539static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4585 struct ethtool_drvinfo *info) 4540 struct ethtool_drvinfo *info)
4586{ 4541{
4587 nic_t *sp = dev->priv; 4542 struct s2io_nic *sp = dev->priv;
4588 4543
4589 strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); 4544 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4590 strncpy(info->version, s2io_driver_version, sizeof(info->version)); 4545 strncpy(info->version, s2io_driver_version, sizeof(info->version));
@@ -4616,7 +4571,7 @@ static void s2io_ethtool_gregs(struct net_device *dev,
4616 int i; 4571 int i;
4617 u64 reg; 4572 u64 reg;
4618 u8 *reg_space = (u8 *) space; 4573 u8 *reg_space = (u8 *) space;
4619 nic_t *sp = dev->priv; 4574 struct s2io_nic *sp = dev->priv;
4620 4575
4621 regs->len = XENA_REG_SPACE; 4576 regs->len = XENA_REG_SPACE;
4622 regs->version = sp->pdev->subsystem_device; 4577 regs->version = sp->pdev->subsystem_device;
@@ -4638,8 +4593,8 @@ static void s2io_ethtool_gregs(struct net_device *dev,
4638*/ 4593*/
4639static void s2io_phy_id(unsigned long data) 4594static void s2io_phy_id(unsigned long data)
4640{ 4595{
4641 nic_t *sp = (nic_t *) data; 4596 struct s2io_nic *sp = (struct s2io_nic *) data;
4642 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4597 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4643 u64 val64 = 0; 4598 u64 val64 = 0;
4644 u16 subid; 4599 u16 subid;
4645 4600
@@ -4676,8 +4631,8 @@ static void s2io_phy_id(unsigned long data)
4676static int s2io_ethtool_idnic(struct net_device *dev, u32 data) 4631static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4677{ 4632{
4678 u64 val64 = 0, last_gpio_ctrl_val; 4633 u64 val64 = 0, last_gpio_ctrl_val;
4679 nic_t *sp = dev->priv; 4634 struct s2io_nic *sp = dev->priv;
4680 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4635 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4681 u16 subid; 4636 u16 subid;
4682 4637
4683 subid = sp->pdev->subsystem_device; 4638 subid = sp->pdev->subsystem_device;
@@ -4725,8 +4680,8 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
4725 struct ethtool_pauseparam *ep) 4680 struct ethtool_pauseparam *ep)
4726{ 4681{
4727 u64 val64; 4682 u64 val64;
4728 nic_t *sp = dev->priv; 4683 struct s2io_nic *sp = dev->priv;
4729 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4684 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4730 4685
4731 val64 = readq(&bar0->rmac_pause_cfg); 4686 val64 = readq(&bar0->rmac_pause_cfg);
4732 if (val64 & RMAC_PAUSE_GEN_ENABLE) 4687 if (val64 & RMAC_PAUSE_GEN_ENABLE)
@@ -4752,8 +4707,8 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
4752 struct ethtool_pauseparam *ep) 4707 struct ethtool_pauseparam *ep)
4753{ 4708{
4754 u64 val64; 4709 u64 val64;
4755 nic_t *sp = dev->priv; 4710 struct s2io_nic *sp = dev->priv;
4756 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4711 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757 4712
4758 val64 = readq(&bar0->rmac_pause_cfg); 4713 val64 = readq(&bar0->rmac_pause_cfg);
4759 if (ep->tx_pause) 4714 if (ep->tx_pause)
@@ -4785,12 +4740,12 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
4785 */ 4740 */
4786 4741
4787#define S2IO_DEV_ID 5 4742#define S2IO_DEV_ID 5
4788static int read_eeprom(nic_t * sp, int off, u64 * data) 4743static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4789{ 4744{
4790 int ret = -1; 4745 int ret = -1;
4791 u32 exit_cnt = 0; 4746 u32 exit_cnt = 0;
4792 u64 val64; 4747 u64 val64;
4793 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4748 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4794 4749
4795 if (sp->device_type == XFRAME_I_DEVICE) { 4750 if (sp->device_type == XFRAME_I_DEVICE) {
4796 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4751 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
@@ -4850,11 +4805,11 @@ static int read_eeprom(nic_t * sp, int off, u64 * data)
4850 * 0 on success, -1 on failure. 4805 * 0 on success, -1 on failure.
4851 */ 4806 */
4852 4807
4853static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) 4808static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4854{ 4809{
4855 int exit_cnt = 0, ret = -1; 4810 int exit_cnt = 0, ret = -1;
4856 u64 val64; 4811 u64 val64;
4857 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4812 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4858 4813
4859 if (sp->device_type == XFRAME_I_DEVICE) { 4814 if (sp->device_type == XFRAME_I_DEVICE) {
4860 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4815 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
@@ -4899,7 +4854,7 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4899 } 4854 }
4900 return ret; 4855 return ret;
4901} 4856}
4902static void s2io_vpd_read(nic_t *nic) 4857static void s2io_vpd_read(struct s2io_nic *nic)
4903{ 4858{
4904 u8 *vpd_data; 4859 u8 *vpd_data;
4905 u8 data; 4860 u8 data;
@@ -4914,6 +4869,7 @@ static void s2io_vpd_read(nic_t *nic)
4914 strcpy(nic->product_name, "Xframe I 10GbE network adapter"); 4869 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4915 vpd_addr = 0x50; 4870 vpd_addr = 0x50;
4916 } 4871 }
4872 strcpy(nic->serial_num, "NOT AVAILABLE");
4917 4873
4918 vpd_data = kmalloc(256, GFP_KERNEL); 4874 vpd_data = kmalloc(256, GFP_KERNEL);
4919 if (!vpd_data) 4875 if (!vpd_data)
@@ -4937,7 +4893,22 @@ static void s2io_vpd_read(nic_t *nic)
4937 pci_read_config_dword(nic->pdev, (vpd_addr + 4), 4893 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4938 (u32 *)&vpd_data[i]); 4894 (u32 *)&vpd_data[i]);
4939 } 4895 }
4940 if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) { 4896
4897 if(!fail) {
4898 /* read serial number of adapter */
4899 for (cnt = 0; cnt < 256; cnt++) {
4900 if ((vpd_data[cnt] == 'S') &&
4901 (vpd_data[cnt+1] == 'N') &&
4902 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4903 memset(nic->serial_num, 0, VPD_STRING_LEN);
4904 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4905 vpd_data[cnt+2]);
4906 break;
4907 }
4908 }
4909 }
4910
4911 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4941 memset(nic->product_name, 0, vpd_data[1]); 4912 memset(nic->product_name, 0, vpd_data[1]);
4942 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4913 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4943 } 4914 }
@@ -4962,7 +4933,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
4962{ 4933{
4963 u32 i, valid; 4934 u32 i, valid;
4964 u64 data; 4935 u64 data;
4965 nic_t *sp = dev->priv; 4936 struct s2io_nic *sp = dev->priv;
4966 4937
4967 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); 4938 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4968 4939
@@ -5000,7 +4971,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5000{ 4971{
5001 int len = eeprom->len, cnt = 0; 4972 int len = eeprom->len, cnt = 0;
5002 u64 valid = 0, data; 4973 u64 valid = 0, data;
5003 nic_t *sp = dev->priv; 4974 struct s2io_nic *sp = dev->priv;
5004 4975
5005 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { 4976 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5006 DBG_PRINT(ERR_DBG, 4977 DBG_PRINT(ERR_DBG,
@@ -5044,9 +5015,9 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5044 * 0 on success. 5015 * 0 on success.
5045 */ 5016 */
5046 5017
5047static int s2io_register_test(nic_t * sp, uint64_t * data) 5018static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5048{ 5019{
5049 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5020 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5050 u64 val64 = 0, exp_val; 5021 u64 val64 = 0, exp_val;
5051 int fail = 0; 5022 int fail = 0;
5052 5023
@@ -5111,7 +5082,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
5111 * 0 on success. 5082 * 0 on success.
5112 */ 5083 */
5113 5084
5114static int s2io_eeprom_test(nic_t * sp, uint64_t * data) 5085static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5115{ 5086{
5116 int fail = 0; 5087 int fail = 0;
5117 u64 ret_data, org_4F0, org_7F0; 5088 u64 ret_data, org_4F0, org_7F0;
@@ -5213,7 +5184,7 @@ static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5213 * 0 on success and -1 on failure. 5184 * 0 on success and -1 on failure.
5214 */ 5185 */
5215 5186
5216static int s2io_bist_test(nic_t * sp, uint64_t * data) 5187static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5217{ 5188{
5218 u8 bist = 0; 5189 u8 bist = 0;
5219 int cnt = 0, ret = -1; 5190 int cnt = 0, ret = -1;
@@ -5249,9 +5220,9 @@ static int s2io_bist_test(nic_t * sp, uint64_t * data)
5249 * 0 on success. 5220 * 0 on success.
5250 */ 5221 */
5251 5222
5252static int s2io_link_test(nic_t * sp, uint64_t * data) 5223static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5253{ 5224{
5254 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5225 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5255 u64 val64; 5226 u64 val64;
5256 5227
5257 val64 = readq(&bar0->adapter_status); 5228 val64 = readq(&bar0->adapter_status);
@@ -5276,9 +5247,9 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5276 * 0 on success. 5247 * 0 on success.
5277 */ 5248 */
5278 5249
5279static int s2io_rldram_test(nic_t * sp, uint64_t * data) 5250static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5280{ 5251{
5281 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5252 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5282 u64 val64; 5253 u64 val64;
5283 int cnt, iteration = 0, test_fail = 0; 5254 int cnt, iteration = 0, test_fail = 0;
5284 5255
@@ -5380,7 +5351,7 @@ static void s2io_ethtool_test(struct net_device *dev,
5380 struct ethtool_test *ethtest, 5351 struct ethtool_test *ethtest,
5381 uint64_t * data) 5352 uint64_t * data)
5382{ 5353{
5383 nic_t *sp = dev->priv; 5354 struct s2io_nic *sp = dev->priv;
5384 int orig_state = netif_running(sp->dev); 5355 int orig_state = netif_running(sp->dev);
5385 5356
5386 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 5357 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
@@ -5436,8 +5407,8 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
5436 u64 * tmp_stats) 5407 u64 * tmp_stats)
5437{ 5408{
5438 int i = 0; 5409 int i = 0;
5439 nic_t *sp = dev->priv; 5410 struct s2io_nic *sp = dev->priv;
5440 StatInfo_t *stat_info = sp->mac_control.stats_info; 5411 struct stat_block *stat_info = sp->mac_control.stats_info;
5441 5412
5442 s2io_updt_stats(sp); 5413 s2io_updt_stats(sp);
5443 tmp_stats[i++] = 5414 tmp_stats[i++] =
@@ -5664,14 +5635,14 @@ static int s2io_ethtool_get_regs_len(struct net_device *dev)
5664 5635
5665static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 5636static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5666{ 5637{
5667 nic_t *sp = dev->priv; 5638 struct s2io_nic *sp = dev->priv;
5668 5639
5669 return (sp->rx_csum); 5640 return (sp->rx_csum);
5670} 5641}
5671 5642
5672static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) 5643static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5673{ 5644{
5674 nic_t *sp = dev->priv; 5645 struct s2io_nic *sp = dev->priv;
5675 5646
5676 if (data) 5647 if (data)
5677 sp->rx_csum = 1; 5648 sp->rx_csum = 1;
@@ -5750,10 +5721,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
5750 .set_tx_csum = s2io_ethtool_op_set_tx_csum, 5721 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5751 .get_sg = ethtool_op_get_sg, 5722 .get_sg = ethtool_op_get_sg,
5752 .set_sg = ethtool_op_set_sg, 5723 .set_sg = ethtool_op_set_sg,
5753#ifdef NETIF_F_TSO
5754 .get_tso = s2io_ethtool_op_get_tso, 5724 .get_tso = s2io_ethtool_op_get_tso,
5755 .set_tso = s2io_ethtool_op_set_tso, 5725 .set_tso = s2io_ethtool_op_set_tso,
5756#endif
5757 .get_ufo = ethtool_op_get_ufo, 5726 .get_ufo = ethtool_op_get_ufo,
5758 .set_ufo = ethtool_op_set_ufo, 5727 .set_ufo = ethtool_op_set_ufo,
5759 .self_test_count = s2io_ethtool_self_test_count, 5728 .self_test_count = s2io_ethtool_self_test_count,
@@ -5794,7 +5763,7 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5794 5763
5795static int s2io_change_mtu(struct net_device *dev, int new_mtu) 5764static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5796{ 5765{
5797 nic_t *sp = dev->priv; 5766 struct s2io_nic *sp = dev->priv;
5798 5767
5799 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 5768 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5800 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", 5769 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -5813,7 +5782,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5813 if (netif_queue_stopped(dev)) 5782 if (netif_queue_stopped(dev))
5814 netif_wake_queue(dev); 5783 netif_wake_queue(dev);
5815 } else { /* Device is down */ 5784 } else { /* Device is down */
5816 XENA_dev_config_t __iomem *bar0 = sp->bar0; 5785 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5817 u64 val64 = new_mtu; 5786 u64 val64 = new_mtu;
5818 5787
5819 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len); 5788 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
@@ -5838,9 +5807,9 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5838static void s2io_tasklet(unsigned long dev_addr) 5807static void s2io_tasklet(unsigned long dev_addr)
5839{ 5808{
5840 struct net_device *dev = (struct net_device *) dev_addr; 5809 struct net_device *dev = (struct net_device *) dev_addr;
5841 nic_t *sp = dev->priv; 5810 struct s2io_nic *sp = dev->priv;
5842 int i, ret; 5811 int i, ret;
5843 mac_info_t *mac_control; 5812 struct mac_info *mac_control;
5844 struct config_param *config; 5813 struct config_param *config;
5845 5814
5846 mac_control = &sp->mac_control; 5815 mac_control = &sp->mac_control;
@@ -5873,9 +5842,9 @@ static void s2io_tasklet(unsigned long dev_addr)
5873 5842
5874static void s2io_set_link(struct work_struct *work) 5843static void s2io_set_link(struct work_struct *work)
5875{ 5844{
5876 nic_t *nic = container_of(work, nic_t, set_link_task); 5845 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5877 struct net_device *dev = nic->dev; 5846 struct net_device *dev = nic->dev;
5878 XENA_dev_config_t __iomem *bar0 = nic->bar0; 5847 struct XENA_dev_config __iomem *bar0 = nic->bar0;
5879 register u64 val64; 5848 register u64 val64;
5880 u16 subid; 5849 u16 subid;
5881 5850
@@ -5894,57 +5863,53 @@ static void s2io_set_link(struct work_struct *work)
5894 } 5863 }
5895 5864
5896 val64 = readq(&bar0->adapter_status); 5865 val64 = readq(&bar0->adapter_status);
5897 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) { 5866 if (LINK_IS_UP(val64)) {
5898 if (LINK_IS_UP(val64)) { 5867 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5899 val64 = readq(&bar0->adapter_control); 5868 if (verify_xena_quiescence(nic)) {
5900 val64 |= ADAPTER_CNTL_EN; 5869 val64 = readq(&bar0->adapter_control);
5901 writeq(val64, &bar0->adapter_control); 5870 val64 |= ADAPTER_CNTL_EN;
5902 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5903 subid)) {
5904 val64 = readq(&bar0->gpio_control);
5905 val64 |= GPIO_CTRL_GPIO_0;
5906 writeq(val64, &bar0->gpio_control);
5907 val64 = readq(&bar0->gpio_control);
5908 } else {
5909 val64 |= ADAPTER_LED_ON;
5910 writeq(val64, &bar0->adapter_control); 5871 writeq(val64, &bar0->adapter_control);
5911 } 5872 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5912 if (s2io_link_fault_indication(nic) == 5873 nic->device_type, subid)) {
5913 MAC_RMAC_ERR_TIMER) { 5874 val64 = readq(&bar0->gpio_control);
5914 val64 = readq(&bar0->adapter_status); 5875 val64 |= GPIO_CTRL_GPIO_0;
5915 if (!LINK_IS_UP(val64)) { 5876 writeq(val64, &bar0->gpio_control);
5916 DBG_PRINT(ERR_DBG, "%s:", dev->name); 5877 val64 = readq(&bar0->gpio_control);
5917 DBG_PRINT(ERR_DBG, " Link down"); 5878 } else {
5918 DBG_PRINT(ERR_DBG, "after "); 5879 val64 |= ADAPTER_LED_ON;
5919 DBG_PRINT(ERR_DBG, "enabling "); 5880 writeq(val64, &bar0->adapter_control);
5920 DBG_PRINT(ERR_DBG, "device \n");
5921 } 5881 }
5922 }
5923 if (nic->device_enabled_once == FALSE) {
5924 nic->device_enabled_once = TRUE; 5882 nic->device_enabled_once = TRUE;
5883 } else {
5884 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5885 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5886 netif_stop_queue(dev);
5925 } 5887 }
5888 }
5889 val64 = readq(&bar0->adapter_status);
5890 if (!LINK_IS_UP(val64)) {
5891 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5892 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5893 DBG_PRINT(ERR_DBG, "device \n");
5894 } else
5926 s2io_link(nic, LINK_UP); 5895 s2io_link(nic, LINK_UP);
5927 } else { 5896 } else {
5928 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type, 5897 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5929 subid)) { 5898 subid)) {
5930 val64 = readq(&bar0->gpio_control); 5899 val64 = readq(&bar0->gpio_control);
5931 val64 &= ~GPIO_CTRL_GPIO_0; 5900 val64 &= ~GPIO_CTRL_GPIO_0;
5932 writeq(val64, &bar0->gpio_control); 5901 writeq(val64, &bar0->gpio_control);
5933 val64 = readq(&bar0->gpio_control); 5902 val64 = readq(&bar0->gpio_control);
5934 }
5935 s2io_link(nic, LINK_DOWN);
5936 } 5903 }
5937 } else { /* NIC is not Quiescent. */ 5904 s2io_link(nic, LINK_DOWN);
5938 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5939 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5940 netif_stop_queue(dev);
5941 } 5905 }
5942 clear_bit(0, &(nic->link_state)); 5906 clear_bit(0, &(nic->link_state));
5943} 5907}
5944 5908
5945static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba, 5909static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5946 struct sk_buff **skb, u64 *temp0, u64 *temp1, 5910 struct buffAdd *ba,
5947 u64 *temp2, int size) 5911 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5912 u64 *temp2, int size)
5948{ 5913{
5949 struct net_device *dev = sp->dev; 5914 struct net_device *dev = sp->dev;
5950 struct sk_buff *frag_list; 5915 struct sk_buff *frag_list;
@@ -5958,7 +5923,7 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5958 * using same mapped address for the Rxd 5923 * using same mapped address for the Rxd
5959 * buffer pointer 5924 * buffer pointer
5960 */ 5925 */
5961 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0; 5926 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5962 } else { 5927 } else {
5963 *skb = dev_alloc_skb(size); 5928 *skb = dev_alloc_skb(size);
5964 if (!(*skb)) { 5929 if (!(*skb)) {
@@ -5970,7 +5935,7 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5970 * such it will be used for next rxd whose 5935 * such it will be used for next rxd whose
5971 * Host Control is NULL 5936 * Host Control is NULL
5972 */ 5937 */
5973 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 = 5938 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5974 pci_map_single( sp->pdev, (*skb)->data, 5939 pci_map_single( sp->pdev, (*skb)->data,
5975 size - NET_IP_ALIGN, 5940 size - NET_IP_ALIGN,
5976 PCI_DMA_FROMDEVICE); 5941 PCI_DMA_FROMDEVICE);
@@ -5979,36 +5944,36 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5979 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { 5944 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5980 /* Two buffer Mode */ 5945 /* Two buffer Mode */
5981 if (*skb) { 5946 if (*skb) {
5982 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; 5947 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5983 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; 5948 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
5984 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; 5949 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5985 } else { 5950 } else {
5986 *skb = dev_alloc_skb(size); 5951 *skb = dev_alloc_skb(size);
5987 if (!(*skb)) { 5952 if (!(*skb)) {
5988 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n", 5953 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5989 dev->name); 5954 dev->name);
5990 return -ENOMEM; 5955 return -ENOMEM;
5991 } 5956 }
5992 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = 5957 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5993 pci_map_single(sp->pdev, (*skb)->data, 5958 pci_map_single(sp->pdev, (*skb)->data,
5994 dev->mtu + 4, 5959 dev->mtu + 4,
5995 PCI_DMA_FROMDEVICE); 5960 PCI_DMA_FROMDEVICE);
5996 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = 5961 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5997 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 5962 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5998 PCI_DMA_FROMDEVICE); 5963 PCI_DMA_FROMDEVICE);
5999 rxdp->Host_Control = (unsigned long) (*skb); 5964 rxdp->Host_Control = (unsigned long) (*skb);
6000 5965
6001 /* Buffer-1 will be dummy buffer not used */ 5966 /* Buffer-1 will be dummy buffer not used */
6002 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = 5967 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6003 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 5968 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6004 PCI_DMA_FROMDEVICE); 5969 PCI_DMA_FROMDEVICE);
6005 } 5970 }
6006 } else if ((rxdp->Host_Control == 0)) { 5971 } else if ((rxdp->Host_Control == 0)) {
6007 /* Three buffer mode */ 5972 /* Three buffer mode */
6008 if (*skb) { 5973 if (*skb) {
6009 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0; 5974 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6010 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1; 5975 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6011 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2; 5976 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6012 } else { 5977 } else {
6013 *skb = dev_alloc_skb(size); 5978 *skb = dev_alloc_skb(size);
6014 if (!(*skb)) { 5979 if (!(*skb)) {
@@ -6016,11 +5981,11 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6016 dev->name); 5981 dev->name);
6017 return -ENOMEM; 5982 return -ENOMEM;
6018 } 5983 }
6019 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 = 5984 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6020 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, 5985 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6021 PCI_DMA_FROMDEVICE); 5986 PCI_DMA_FROMDEVICE);
6022 /* Buffer-1 receives L3/L4 headers */ 5987 /* Buffer-1 receives L3/L4 headers */
6023 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 = 5988 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6024 pci_map_single( sp->pdev, (*skb)->data, 5989 pci_map_single( sp->pdev, (*skb)->data,
6025 l3l4hdr_size + 4, 5990 l3l4hdr_size + 4,
6026 PCI_DMA_FROMDEVICE); 5991 PCI_DMA_FROMDEVICE);
@@ -6040,14 +6005,15 @@ static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
6040 /* 6005 /*
6041 * Buffer-2 receives L4 data payload 6006 * Buffer-2 receives L4 data payload
6042 */ 6007 */
6043 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 = 6008 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6044 pci_map_single( sp->pdev, frag_list->data, 6009 pci_map_single( sp->pdev, frag_list->data,
6045 dev->mtu, PCI_DMA_FROMDEVICE); 6010 dev->mtu, PCI_DMA_FROMDEVICE);
6046 } 6011 }
6047 } 6012 }
6048 return 0; 6013 return 0;
6049} 6014}
6050static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size) 6015static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6016 int size)
6051{ 6017{
6052 struct net_device *dev = sp->dev; 6018 struct net_device *dev = sp->dev;
6053 if (sp->rxd_mode == RXD_MODE_1) { 6019 if (sp->rxd_mode == RXD_MODE_1) {
@@ -6063,15 +6029,15 @@ static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6063 } 6029 }
6064} 6030}
6065 6031
6066static int rxd_owner_bit_reset(nic_t *sp) 6032static int rxd_owner_bit_reset(struct s2io_nic *sp)
6067{ 6033{
6068 int i, j, k, blk_cnt = 0, size; 6034 int i, j, k, blk_cnt = 0, size;
6069 mac_info_t * mac_control = &sp->mac_control; 6035 struct mac_info * mac_control = &sp->mac_control;
6070 struct config_param *config = &sp->config; 6036 struct config_param *config = &sp->config;
6071 struct net_device *dev = sp->dev; 6037 struct net_device *dev = sp->dev;
6072 RxD_t *rxdp = NULL; 6038 struct RxD_t *rxdp = NULL;
6073 struct sk_buff *skb = NULL; 6039 struct sk_buff *skb = NULL;
6074 buffAdd_t *ba = NULL; 6040 struct buffAdd *ba = NULL;
6075 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0; 6041 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6076 6042
6077 /* Calculate the size based on ring mode */ 6043 /* Calculate the size based on ring mode */
@@ -6110,7 +6076,7 @@ static int rxd_owner_bit_reset(nic_t *sp)
6110 6076
6111} 6077}
6112 6078
6113static int s2io_add_isr(nic_t * sp) 6079static int s2io_add_isr(struct s2io_nic * sp)
6114{ 6080{
6115 int ret = 0; 6081 int ret = 0;
6116 struct net_device *dev = sp->dev; 6082 struct net_device *dev = sp->dev;
@@ -6125,7 +6091,7 @@ static int s2io_add_isr(nic_t * sp)
6125 sp->intr_type = INTA; 6091 sp->intr_type = INTA;
6126 } 6092 }
6127 6093
6128 /* Store the values of the MSIX table in the nic_t structure */ 6094 /* Store the values of the MSIX table in the struct s2io_nic structure */
6129 store_xmsi_data(sp); 6095 store_xmsi_data(sp);
6130 6096
6131 /* After proper initialization of H/W, register ISR */ 6097 /* After proper initialization of H/W, register ISR */
@@ -6180,7 +6146,7 @@ static int s2io_add_isr(nic_t * sp)
6180 } 6146 }
6181 return 0; 6147 return 0;
6182} 6148}
6183static void s2io_rem_isr(nic_t * sp) 6149static void s2io_rem_isr(struct s2io_nic * sp)
6184{ 6150{
6185 int cnt = 0; 6151 int cnt = 0;
6186 struct net_device *dev = sp->dev; 6152 struct net_device *dev = sp->dev;
@@ -6222,10 +6188,10 @@ static void s2io_rem_isr(nic_t * sp)
6222 } while(cnt < 5); 6188 } while(cnt < 5);
6223} 6189}
6224 6190
6225static void s2io_card_down(nic_t * sp) 6191static void s2io_card_down(struct s2io_nic * sp)
6226{ 6192{
6227 int cnt = 0; 6193 int cnt = 0;
6228 XENA_dev_config_t __iomem *bar0 = sp->bar0; 6194 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6229 unsigned long flags; 6195 unsigned long flags;
6230 register u64 val64 = 0; 6196 register u64 val64 = 0;
6231 6197
@@ -6256,7 +6222,8 @@ static void s2io_card_down(nic_t * sp)
6256 rxd_owner_bit_reset(sp); 6222 rxd_owner_bit_reset(sp);
6257 6223
6258 val64 = readq(&bar0->adapter_status); 6224 val64 = readq(&bar0->adapter_status);
6259 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) { 6225 if (verify_xena_quiescence(sp)) {
6226 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6260 break; 6227 break;
6261 } 6228 }
6262 6229
@@ -6285,10 +6252,10 @@ static void s2io_card_down(nic_t * sp)
6285 clear_bit(0, &(sp->link_state)); 6252 clear_bit(0, &(sp->link_state));
6286} 6253}
6287 6254
6288static int s2io_card_up(nic_t * sp) 6255static int s2io_card_up(struct s2io_nic * sp)
6289{ 6256{
6290 int i, ret = 0; 6257 int i, ret = 0;
6291 mac_info_t *mac_control; 6258 struct mac_info *mac_control;
6292 struct config_param *config; 6259 struct config_param *config;
6293 struct net_device *dev = (struct net_device *) sp->dev; 6260 struct net_device *dev = (struct net_device *) sp->dev;
6294 u16 interruptible; 6261 u16 interruptible;
@@ -6319,6 +6286,13 @@ static int s2io_card_up(nic_t * sp)
6319 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 6286 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6320 atomic_read(&sp->rx_bufs_left[i])); 6287 atomic_read(&sp->rx_bufs_left[i]));
6321 } 6288 }
6289 /* Maintain the state prior to the open */
6290 if (sp->promisc_flg)
6291 sp->promisc_flg = 0;
6292 if (sp->m_cast_flg) {
6293 sp->m_cast_flg = 0;
6294 sp->all_multi_pos= 0;
6295 }
6322 6296
6323 /* Setting its receive mode */ 6297 /* Setting its receive mode */
6324 s2io_set_multicast(dev); 6298 s2io_set_multicast(dev);
@@ -6380,7 +6354,7 @@ static int s2io_card_up(nic_t * sp)
6380 6354
6381static void s2io_restart_nic(struct work_struct *work) 6355static void s2io_restart_nic(struct work_struct *work)
6382{ 6356{
6383 nic_t *sp = container_of(work, nic_t, rst_timer_task); 6357 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6384 struct net_device *dev = sp->dev; 6358 struct net_device *dev = sp->dev;
6385 6359
6386 s2io_card_down(sp); 6360 s2io_card_down(sp);
@@ -6409,7 +6383,7 @@ static void s2io_restart_nic(struct work_struct *work)
6409 6383
6410static void s2io_tx_watchdog(struct net_device *dev) 6384static void s2io_tx_watchdog(struct net_device *dev)
6411{ 6385{
6412 nic_t *sp = dev->priv; 6386 struct s2io_nic *sp = dev->priv;
6413 6387
6414 if (netif_carrier_ok(dev)) { 6388 if (netif_carrier_ok(dev)) {
6415 schedule_work(&sp->rst_timer_task); 6389 schedule_work(&sp->rst_timer_task);
@@ -6434,16 +6408,16 @@ static void s2io_tx_watchdog(struct net_device *dev)
6434 * Return value: 6408 * Return value:
6435 * SUCCESS on success and -1 on failure. 6409 * SUCCESS on success and -1 on failure.
6436 */ 6410 */
6437static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp) 6411static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6438{ 6412{
6439 nic_t *sp = ring_data->nic; 6413 struct s2io_nic *sp = ring_data->nic;
6440 struct net_device *dev = (struct net_device *) sp->dev; 6414 struct net_device *dev = (struct net_device *) sp->dev;
6441 struct sk_buff *skb = (struct sk_buff *) 6415 struct sk_buff *skb = (struct sk_buff *)
6442 ((unsigned long) rxdp->Host_Control); 6416 ((unsigned long) rxdp->Host_Control);
6443 int ring_no = ring_data->ring_no; 6417 int ring_no = ring_data->ring_no;
6444 u16 l3_csum, l4_csum; 6418 u16 l3_csum, l4_csum;
6445 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 6419 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6446 lro_t *lro; 6420 struct lro *lro;
6447 6421
6448 skb->dev = dev; 6422 skb->dev = dev;
6449 6423
@@ -6488,7 +6462,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6488 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); 6462 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6489 unsigned char *buff = skb_push(skb, buf0_len); 6463 unsigned char *buff = skb_push(skb, buf0_len);
6490 6464
6491 buffAdd_t *ba = &ring_data->ba[get_block][get_off]; 6465 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6492 sp->stats.rx_bytes += buf0_len + buf2_len; 6466 sp->stats.rx_bytes += buf0_len + buf2_len;
6493 memcpy(buff, ba->ba_0, buf0_len); 6467 memcpy(buff, ba->ba_0, buf0_len);
6494 6468
@@ -6498,7 +6472,6 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6498 skb_put(skb, buf1_len); 6472 skb_put(skb, buf1_len);
6499 skb->len += buf2_len; 6473 skb->len += buf2_len;
6500 skb->data_len += buf2_len; 6474 skb->data_len += buf2_len;
6501 skb->truesize += buf2_len;
6502 skb_put(skb_shinfo(skb)->frag_list, buf2_len); 6475 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6503 sp->stats.rx_bytes += buf1_len; 6476 sp->stats.rx_bytes += buf1_len;
6504 6477
@@ -6582,23 +6555,20 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6582 6555
6583 if (!sp->lro) { 6556 if (!sp->lro) {
6584 skb->protocol = eth_type_trans(skb, dev); 6557 skb->protocol = eth_type_trans(skb, dev);
6585#ifdef CONFIG_S2IO_NAPI
6586 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6587 /* Queueing the vlan frame to the upper layer */
6588 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6589 RXD_GET_VLAN_TAG(rxdp->Control_2));
6590 } else {
6591 netif_receive_skb(skb);
6592 }
6593#else
6594 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) { 6558 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6595 /* Queueing the vlan frame to the upper layer */ 6559 /* Queueing the vlan frame to the upper layer */
6596 vlan_hwaccel_rx(skb, sp->vlgrp, 6560 if (napi)
6597 RXD_GET_VLAN_TAG(rxdp->Control_2)); 6561 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6562 RXD_GET_VLAN_TAG(rxdp->Control_2));
6563 else
6564 vlan_hwaccel_rx(skb, sp->vlgrp,
6565 RXD_GET_VLAN_TAG(rxdp->Control_2));
6598 } else { 6566 } else {
6599 netif_rx(skb); 6567 if (napi)
6568 netif_receive_skb(skb);
6569 else
6570 netif_rx(skb);
6600 } 6571 }
6601#endif
6602 } else { 6572 } else {
6603send_up: 6573send_up:
6604 queue_rx_frame(skb); 6574 queue_rx_frame(skb);
@@ -6622,7 +6592,7 @@ aggregate:
6622 * void. 6592 * void.
6623 */ 6593 */
6624 6594
6625static void s2io_link(nic_t * sp, int link) 6595static void s2io_link(struct s2io_nic * sp, int link)
6626{ 6596{
6627 struct net_device *dev = (struct net_device *) sp->dev; 6597 struct net_device *dev = (struct net_device *) sp->dev;
6628 6598
@@ -6666,7 +6636,7 @@ static int get_xena_rev_id(struct pci_dev *pdev)
6666 * void 6636 * void
6667 */ 6637 */
6668 6638
6669static void s2io_init_pci(nic_t * sp) 6639static void s2io_init_pci(struct s2io_nic * sp)
6670{ 6640{
6671 u16 pci_cmd = 0, pcix_cmd = 0; 6641 u16 pci_cmd = 0, pcix_cmd = 0;
6672 6642
@@ -6699,13 +6669,9 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6699 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); 6669 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6700 rx_ring_num = 8; 6670 rx_ring_num = 8;
6701 } 6671 }
6702#ifdef CONFIG_S2IO_NAPI 6672 if (*dev_intr_type != INTA)
6703 if (*dev_intr_type != INTA) { 6673 napi = 0;
6704 DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when " 6674
6705 "MSI/MSI-X is enabled. Defaulting to INTA\n");
6706 *dev_intr_type = INTA;
6707 }
6708#endif
6709#ifndef CONFIG_PCI_MSI 6675#ifndef CONFIG_PCI_MSI
6710 if (*dev_intr_type != INTA) { 6676 if (*dev_intr_type != INTA) {
6711 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support" 6677 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
@@ -6726,6 +6692,8 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6726 "Defaulting to INTA\n"); 6692 "Defaulting to INTA\n");
6727 *dev_intr_type = INTA; 6693 *dev_intr_type = INTA;
6728 } 6694 }
6695 if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
6696 napi = 0;
6729 if (rx_ring_mode > 3) { 6697 if (rx_ring_mode > 3) {
6730 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); 6698 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6731 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); 6699 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
@@ -6751,15 +6719,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6751static int __devinit 6719static int __devinit
6752s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) 6720s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6753{ 6721{
6754 nic_t *sp; 6722 struct s2io_nic *sp;
6755 struct net_device *dev; 6723 struct net_device *dev;
6756 int i, j, ret; 6724 int i, j, ret;
6757 int dma_flag = FALSE; 6725 int dma_flag = FALSE;
6758 u32 mac_up, mac_down; 6726 u32 mac_up, mac_down;
6759 u64 val64 = 0, tmp64 = 0; 6727 u64 val64 = 0, tmp64 = 0;
6760 XENA_dev_config_t __iomem *bar0 = NULL; 6728 struct XENA_dev_config __iomem *bar0 = NULL;
6761 u16 subid; 6729 u16 subid;
6762 mac_info_t *mac_control; 6730 struct mac_info *mac_control;
6763 struct config_param *config; 6731 struct config_param *config;
6764 int mode; 6732 int mode;
6765 u8 dev_intr_type = intr_type; 6733 u8 dev_intr_type = intr_type;
@@ -6814,7 +6782,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6814 } 6782 }
6815 } 6783 }
6816 6784
6817 dev = alloc_etherdev(sizeof(nic_t)); 6785 dev = alloc_etherdev(sizeof(struct s2io_nic));
6818 if (dev == NULL) { 6786 if (dev == NULL) {
6819 DBG_PRINT(ERR_DBG, "Device allocation failed\n"); 6787 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6820 pci_disable_device(pdev); 6788 pci_disable_device(pdev);
@@ -6829,7 +6797,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6829 6797
6830 /* Private member variable initialized to s2io NIC structure */ 6798 /* Private member variable initialized to s2io NIC structure */
6831 sp = dev->priv; 6799 sp = dev->priv;
6832 memset(sp, 0, sizeof(nic_t)); 6800 memset(sp, 0, sizeof(struct s2io_nic));
6833 sp->dev = dev; 6801 sp->dev = dev;
6834 sp->pdev = pdev; 6802 sp->pdev = pdev;
6835 sp->high_dma_flag = dma_flag; 6803 sp->high_dma_flag = dma_flag;
@@ -6925,7 +6893,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6925 sp->bar0 = ioremap(pci_resource_start(pdev, 0), 6893 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6926 pci_resource_len(pdev, 0)); 6894 pci_resource_len(pdev, 0));
6927 if (!sp->bar0) { 6895 if (!sp->bar0) {
6928 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n", 6896 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
6929 dev->name); 6897 dev->name);
6930 ret = -ENOMEM; 6898 ret = -ENOMEM;
6931 goto bar0_remap_failed; 6899 goto bar0_remap_failed;
@@ -6934,7 +6902,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6934 sp->bar1 = ioremap(pci_resource_start(pdev, 2), 6902 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6935 pci_resource_len(pdev, 2)); 6903 pci_resource_len(pdev, 2));
6936 if (!sp->bar1) { 6904 if (!sp->bar1) {
6937 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n", 6905 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
6938 dev->name); 6906 dev->name);
6939 ret = -ENOMEM; 6907 ret = -ENOMEM;
6940 goto bar1_remap_failed; 6908 goto bar1_remap_failed;
@@ -6945,7 +6913,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6945 6913
6946 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 6914 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6947 for (j = 0; j < MAX_TX_FIFOS; j++) { 6915 for (j = 0; j < MAX_TX_FIFOS; j++) {
6948 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *) 6916 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
6949 (sp->bar1 + (j * 0x00020000)); 6917 (sp->bar1 + (j * 0x00020000));
6950 } 6918 }
6951 6919
@@ -6966,10 +6934,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6966 * will use eth_mac_addr() for dev->set_mac_address 6934 * will use eth_mac_addr() for dev->set_mac_address
6967 * mac address will be set every time dev->open() is called 6935 * mac address will be set every time dev->open() is called
6968 */ 6936 */
6969#if defined(CONFIG_S2IO_NAPI)
6970 dev->poll = s2io_poll; 6937 dev->poll = s2io_poll;
6971 dev->weight = 32; 6938 dev->weight = 32;
6972#endif
6973 6939
6974#ifdef CONFIG_NET_POLL_CONTROLLER 6940#ifdef CONFIG_NET_POLL_CONTROLLER
6975 dev->poll_controller = s2io_netpoll; 6941 dev->poll_controller = s2io_netpoll;
@@ -6978,13 +6944,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6978 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 6944 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6979 if (sp->high_dma_flag == TRUE) 6945 if (sp->high_dma_flag == TRUE)
6980 dev->features |= NETIF_F_HIGHDMA; 6946 dev->features |= NETIF_F_HIGHDMA;
6981#ifdef NETIF_F_TSO
6982 dev->features |= NETIF_F_TSO; 6947 dev->features |= NETIF_F_TSO;
6983#endif
6984#ifdef NETIF_F_TSO6
6985 dev->features |= NETIF_F_TSO6; 6948 dev->features |= NETIF_F_TSO6;
6986#endif 6949 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
6987 if (sp->device_type & XFRAME_II_DEVICE) {
6988 dev->features |= NETIF_F_UFO; 6950 dev->features |= NETIF_F_UFO;
6989 dev->features |= NETIF_F_HW_CSUM; 6951 dev->features |= NETIF_F_HW_CSUM;
6990 } 6952 }
@@ -7065,9 +7027,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7065 7027
7066 /* Initialize spinlocks */ 7028 /* Initialize spinlocks */
7067 spin_lock_init(&sp->tx_lock); 7029 spin_lock_init(&sp->tx_lock);
7068#ifndef CONFIG_S2IO_NAPI 7030
7069 spin_lock_init(&sp->put_lock); 7031 if (!napi)
7070#endif 7032 spin_lock_init(&sp->put_lock);
7071 spin_lock_init(&sp->rx_lock); 7033 spin_lock_init(&sp->rx_lock);
7072 7034
7073 /* 7035 /*
@@ -7098,13 +7060,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7098 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 7060 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7099 s2io_driver_version); 7061 s2io_driver_version);
7100 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7062 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7101 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7063 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7102 sp->def_mac_addr[0].mac_addr[0], 7064 sp->def_mac_addr[0].mac_addr[0],
7103 sp->def_mac_addr[0].mac_addr[1], 7065 sp->def_mac_addr[0].mac_addr[1],
7104 sp->def_mac_addr[0].mac_addr[2], 7066 sp->def_mac_addr[0].mac_addr[2],
7105 sp->def_mac_addr[0].mac_addr[3], 7067 sp->def_mac_addr[0].mac_addr[3],
7106 sp->def_mac_addr[0].mac_addr[4], 7068 sp->def_mac_addr[0].mac_addr[4],
7107 sp->def_mac_addr[0].mac_addr[5]); 7069 sp->def_mac_addr[0].mac_addr[5]);
7070 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7108 if (sp->device_type & XFRAME_II_DEVICE) { 7071 if (sp->device_type & XFRAME_II_DEVICE) {
7109 mode = s2io_print_pci_mode(sp); 7072 mode = s2io_print_pci_mode(sp);
7110 if (mode < 0) { 7073 if (mode < 0) {
@@ -7128,9 +7091,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7128 dev->name); 7091 dev->name);
7129 break; 7092 break;
7130 } 7093 }
7131#ifdef CONFIG_S2IO_NAPI 7094
7132 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 7095 if (napi)
7133#endif 7096 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7134 switch(sp->intr_type) { 7097 switch(sp->intr_type) {
7135 case INTA: 7098 case INTA:
7136 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 7099 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -7145,7 +7108,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7145 if (sp->lro) 7108 if (sp->lro)
7146 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 7109 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7147 dev->name); 7110 dev->name);
7148 7111 if (ufo)
7112 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7113 " enabled\n", dev->name);
7149 /* Initialize device name */ 7114 /* Initialize device name */
7150 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 7115 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7151 7116
@@ -7202,7 +7167,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7202{ 7167{
7203 struct net_device *dev = 7168 struct net_device *dev =
7204 (struct net_device *) pci_get_drvdata(pdev); 7169 (struct net_device *) pci_get_drvdata(pdev);
7205 nic_t *sp; 7170 struct s2io_nic *sp;
7206 7171
7207 if (dev == NULL) { 7172 if (dev == NULL) {
7208 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n"); 7173 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
@@ -7215,7 +7180,6 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7215 free_shared_mem(sp); 7180 free_shared_mem(sp);
7216 iounmap(sp->bar0); 7181 iounmap(sp->bar0);
7217 iounmap(sp->bar1); 7182 iounmap(sp->bar1);
7218 pci_disable_device(pdev);
7219 if (sp->intr_type != MSI_X) 7183 if (sp->intr_type != MSI_X)
7220 pci_release_regions(pdev); 7184 pci_release_regions(pdev);
7221 else { 7185 else {
@@ -7226,6 +7190,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7226 } 7190 }
7227 pci_set_drvdata(pdev, NULL); 7191 pci_set_drvdata(pdev, NULL);
7228 free_netdev(dev); 7192 free_netdev(dev);
7193 pci_disable_device(pdev);
7229} 7194}
7230 7195
7231/** 7196/**
@@ -7244,7 +7209,7 @@ int __init s2io_starter(void)
7244 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 7209 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7245 */ 7210 */
7246 7211
7247static void s2io_closer(void) 7212static __exit void s2io_closer(void)
7248{ 7213{
7249 pci_unregister_driver(&s2io_driver); 7214 pci_unregister_driver(&s2io_driver);
7250 DBG_PRINT(INIT_DBG, "cleanup done\n"); 7215 DBG_PRINT(INIT_DBG, "cleanup done\n");
@@ -7254,7 +7219,7 @@ module_init(s2io_starter);
7254module_exit(s2io_closer); 7219module_exit(s2io_closer);
7255 7220
7256static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, 7221static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7257 struct tcphdr **tcp, RxD_t *rxdp) 7222 struct tcphdr **tcp, struct RxD_t *rxdp)
7258{ 7223{
7259 int ip_off; 7224 int ip_off;
7260 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; 7225 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
@@ -7288,7 +7253,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7288 return 0; 7253 return 0;
7289} 7254}
7290 7255
7291static int check_for_socket_match(lro_t *lro, struct iphdr *ip, 7256static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7292 struct tcphdr *tcp) 7257 struct tcphdr *tcp)
7293{ 7258{
7294 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7259 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7303,7 +7268,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7303 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2)); 7268 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7304} 7269}
7305 7270
7306static void initiate_new_session(lro_t *lro, u8 *l2h, 7271static void initiate_new_session(struct lro *lro, u8 *l2h,
7307 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) 7272 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7308{ 7273{
7309 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7274 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7329,12 +7294,12 @@ static void initiate_new_session(lro_t *lro, u8 *l2h,
7329 lro->in_use = 1; 7294 lro->in_use = 1;
7330} 7295}
7331 7296
7332static void update_L3L4_header(nic_t *sp, lro_t *lro) 7297static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7333{ 7298{
7334 struct iphdr *ip = lro->iph; 7299 struct iphdr *ip = lro->iph;
7335 struct tcphdr *tcp = lro->tcph; 7300 struct tcphdr *tcp = lro->tcph;
7336 u16 nchk; 7301 __sum16 nchk;
7337 StatInfo_t *statinfo = sp->mac_control.stats_info; 7302 struct stat_block *statinfo = sp->mac_control.stats_info;
7338 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7303 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7339 7304
7340 /* Update L3 header */ 7305 /* Update L3 header */
@@ -7360,7 +7325,7 @@ static void update_L3L4_header(nic_t *sp, lro_t *lro)
7360 statinfo->sw_stat.num_aggregations++; 7325 statinfo->sw_stat.num_aggregations++;
7361} 7326}
7362 7327
7363static void aggregate_new_rx(lro_t *lro, struct iphdr *ip, 7328static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7364 struct tcphdr *tcp, u32 l4_pyld) 7329 struct tcphdr *tcp, u32 l4_pyld)
7365{ 7330{
7366 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 7331 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
@@ -7382,7 +7347,7 @@ static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7382 } 7347 }
7383} 7348}
7384 7349
7385static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip, 7350static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7386 struct tcphdr *tcp, u32 tcp_pyld_len) 7351 struct tcphdr *tcp, u32 tcp_pyld_len)
7387{ 7352{
7388 u8 *ptr; 7353 u8 *ptr;
@@ -7440,8 +7405,8 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7440} 7405}
7441 7406
7442static int 7407static int
7443s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, 7408s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7444 RxD_t *rxdp, nic_t *sp) 7409 struct RxD_t *rxdp, struct s2io_nic *sp)
7445{ 7410{
7446 struct iphdr *ip; 7411 struct iphdr *ip;
7447 struct tcphdr *tcph; 7412 struct tcphdr *tcph;
@@ -7458,7 +7423,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7458 tcph = (struct tcphdr *)*tcp; 7423 tcph = (struct tcphdr *)*tcp;
7459 *tcp_len = get_l4_pyld_length(ip, tcph); 7424 *tcp_len = get_l4_pyld_length(ip, tcph);
7460 for (i=0; i<MAX_LRO_SESSIONS; i++) { 7425 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7461 lro_t *l_lro = &sp->lro0_n[i]; 7426 struct lro *l_lro = &sp->lro0_n[i];
7462 if (l_lro->in_use) { 7427 if (l_lro->in_use) {
7463 if (check_for_socket_match(l_lro, ip, tcph)) 7428 if (check_for_socket_match(l_lro, ip, tcph))
7464 continue; 7429 continue;
@@ -7496,7 +7461,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7496 } 7461 }
7497 7462
7498 for (i=0; i<MAX_LRO_SESSIONS; i++) { 7463 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7499 lro_t *l_lro = &sp->lro0_n[i]; 7464 struct lro *l_lro = &sp->lro0_n[i];
7500 if (!(l_lro->in_use)) { 7465 if (!(l_lro->in_use)) {
7501 *lro = l_lro; 7466 *lro = l_lro;
7502 ret = 3; /* Begin anew */ 7467 ret = 3; /* Begin anew */
@@ -7535,9 +7500,9 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7535 return ret; 7500 return ret;
7536} 7501}
7537 7502
7538static void clear_lro_session(lro_t *lro) 7503static void clear_lro_session(struct lro *lro)
7539{ 7504{
7540 static u16 lro_struct_size = sizeof(lro_t); 7505 static u16 lro_struct_size = sizeof(struct lro);
7541 7506
7542 memset(lro, 0, lro_struct_size); 7507 memset(lro, 0, lro_struct_size);
7543} 7508}
@@ -7547,14 +7512,14 @@ static void queue_rx_frame(struct sk_buff *skb)
7547 struct net_device *dev = skb->dev; 7512 struct net_device *dev = skb->dev;
7548 7513
7549 skb->protocol = eth_type_trans(skb, dev); 7514 skb->protocol = eth_type_trans(skb, dev);
7550#ifdef CONFIG_S2IO_NAPI 7515 if (napi)
7551 netif_receive_skb(skb); 7516 netif_receive_skb(skb);
7552#else 7517 else
7553 netif_rx(skb); 7518 netif_rx(skb);
7554#endif
7555} 7519}
7556 7520
7557static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7521static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7522 struct sk_buff *skb,
7558 u32 tcp_len) 7523 u32 tcp_len)
7559{ 7524{
7560 struct sk_buff *first = lro->parent; 7525 struct sk_buff *first = lro->parent;
@@ -7566,6 +7531,7 @@ static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7566 lro->last_frag->next = skb; 7531 lro->last_frag->next = skb;
7567 else 7532 else
7568 skb_shinfo(first)->frag_list = skb; 7533 skb_shinfo(first)->frag_list = skb;
7534 first->truesize += skb->truesize;
7569 lro->last_frag = skb; 7535 lro->last_frag = skb;
7570 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7536 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7571 return; 7537 return;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 3b0bafd273c8..0de0c65f945a 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -30,6 +30,8 @@
30#undef SUCCESS 30#undef SUCCESS
31#define SUCCESS 0 31#define SUCCESS 0
32#define FAILURE -1 32#define FAILURE -1
33#define S2IO_MINUS_ONE 0xFFFFFFFFFFFFFFFFULL
34#define S2IO_MAX_PCI_CONFIG_SPACE_REINIT 100
33 35
34#define CHECKBIT(value, nbit) (value & (1 << nbit)) 36#define CHECKBIT(value, nbit) (value & (1 << nbit))
35 37
@@ -37,7 +39,7 @@
37#define MAX_FLICKER_TIME 60000 /* 60 Secs */ 39#define MAX_FLICKER_TIME 60000 /* 60 Secs */
38 40
39/* Maximum outstanding splits to be configured into xena. */ 41/* Maximum outstanding splits to be configured into xena. */
40typedef enum xena_max_outstanding_splits { 42enum {
41 XENA_ONE_SPLIT_TRANSACTION = 0, 43 XENA_ONE_SPLIT_TRANSACTION = 0,
42 XENA_TWO_SPLIT_TRANSACTION = 1, 44 XENA_TWO_SPLIT_TRANSACTION = 1,
43 XENA_THREE_SPLIT_TRANSACTION = 2, 45 XENA_THREE_SPLIT_TRANSACTION = 2,
@@ -46,7 +48,7 @@ typedef enum xena_max_outstanding_splits {
46 XENA_TWELVE_SPLIT_TRANSACTION = 5, 48 XENA_TWELVE_SPLIT_TRANSACTION = 5,
47 XENA_SIXTEEN_SPLIT_TRANSACTION = 6, 49 XENA_SIXTEEN_SPLIT_TRANSACTION = 6,
48 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7 50 XENA_THIRTYTWO_SPLIT_TRANSACTION = 7
49} xena_max_outstanding_splits; 51};
50#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4) 52#define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
51 53
52/* OS concerned variables and constants */ 54/* OS concerned variables and constants */
@@ -77,7 +79,7 @@ static int debug_level = ERR_DBG;
77#define S2IO_JUMBO_SIZE 9600 79#define S2IO_JUMBO_SIZE 9600
78 80
79/* Driver statistics maintained by driver */ 81/* Driver statistics maintained by driver */
80typedef struct { 82struct swStat {
81 unsigned long long single_ecc_errs; 83 unsigned long long single_ecc_errs;
82 unsigned long long double_ecc_errs; 84 unsigned long long double_ecc_errs;
83 unsigned long long parity_err_cnt; 85 unsigned long long parity_err_cnt;
@@ -92,10 +94,10 @@ typedef struct {
92 unsigned long long flush_max_pkts; 94 unsigned long long flush_max_pkts;
93 unsigned long long sum_avg_pkts_aggregated; 95 unsigned long long sum_avg_pkts_aggregated;
94 unsigned long long num_aggregations; 96 unsigned long long num_aggregations;
95} swStat_t; 97};
96 98
97/* Xpak releated alarm and warnings */ 99/* Xpak releated alarm and warnings */
98typedef struct { 100struct xpakStat {
99 u64 alarm_transceiver_temp_high; 101 u64 alarm_transceiver_temp_high;
100 u64 alarm_transceiver_temp_low; 102 u64 alarm_transceiver_temp_low;
101 u64 alarm_laser_bias_current_high; 103 u64 alarm_laser_bias_current_high;
@@ -110,11 +112,11 @@ typedef struct {
110 u64 warn_laser_output_power_low; 112 u64 warn_laser_output_power_low;
111 u64 xpak_regs_stat; 113 u64 xpak_regs_stat;
112 u32 xpak_timer_count; 114 u32 xpak_timer_count;
113} xpakStat_t; 115};
114 116
115 117
116/* The statistics block of Xena */ 118/* The statistics block of Xena */
117typedef struct stat_block { 119struct stat_block {
118/* Tx MAC statistics counters. */ 120/* Tx MAC statistics counters. */
119 __le32 tmac_data_octets; 121 __le32 tmac_data_octets;
120 __le32 tmac_frms; 122 __le32 tmac_frms;
@@ -290,9 +292,9 @@ typedef struct stat_block {
290 __le32 reserved_14; 292 __le32 reserved_14;
291 __le32 link_fault_cnt; 293 __le32 link_fault_cnt;
292 u8 buffer[20]; 294 u8 buffer[20];
293 swStat_t sw_stat; 295 struct swStat sw_stat;
294 xpakStat_t xpak_stat; 296 struct xpakStat xpak_stat;
295} StatInfo_t; 297};
296 298
297/* 299/*
298 * Structures representing different init time configuration 300 * Structures representing different init time configuration
@@ -315,7 +317,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
315}; 317};
316 318
317/* Maintains Per FIFO related information. */ 319/* Maintains Per FIFO related information. */
318typedef struct tx_fifo_config { 320struct tx_fifo_config {
319#define MAX_AVAILABLE_TXDS 8192 321#define MAX_AVAILABLE_TXDS 8192
320 u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */ 322 u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */
321/* Priority definition */ 323/* Priority definition */
@@ -332,11 +334,11 @@ typedef struct tx_fifo_config {
332 u8 f_no_snoop; 334 u8 f_no_snoop;
333#define NO_SNOOP_TXD 0x01 335#define NO_SNOOP_TXD 0x01
334#define NO_SNOOP_TXD_BUFFER 0x02 336#define NO_SNOOP_TXD_BUFFER 0x02
335} tx_fifo_config_t; 337};
336 338
337 339
338/* Maintains per Ring related information */ 340/* Maintains per Ring related information */
339typedef struct rx_ring_config { 341struct rx_ring_config {
340 u32 num_rxd; /*No of RxDs per Rx Ring */ 342 u32 num_rxd; /*No of RxDs per Rx Ring */
341#define RX_RING_PRI_0 0 /* highest */ 343#define RX_RING_PRI_0 0 /* highest */
342#define RX_RING_PRI_1 1 344#define RX_RING_PRI_1 1
@@ -357,7 +359,7 @@ typedef struct rx_ring_config {
357 u8 f_no_snoop; 359 u8 f_no_snoop;
358#define NO_SNOOP_RXD 0x01 360#define NO_SNOOP_RXD 0x01
359#define NO_SNOOP_RXD_BUFFER 0x02 361#define NO_SNOOP_RXD_BUFFER 0x02
360} rx_ring_config_t; 362};
361 363
362/* This structure provides contains values of the tunable parameters 364/* This structure provides contains values of the tunable parameters
363 * of the H/W 365 * of the H/W
@@ -367,7 +369,7 @@ struct config_param {
367 u32 tx_fifo_num; /*Number of Tx FIFOs */ 369 u32 tx_fifo_num; /*Number of Tx FIFOs */
368 370
369 u8 fifo_mapping[MAX_TX_FIFOS]; 371 u8 fifo_mapping[MAX_TX_FIFOS];
370 tx_fifo_config_t tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 372 struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
371 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 373 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
372 u64 tx_intr_type; 374 u64 tx_intr_type;
373 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */ 375 /* Specifies if Tx Intr is UTILZ or PER_LIST type. */
@@ -376,7 +378,7 @@ struct config_param {
376 u32 rx_ring_num; /*Number of receive rings */ 378 u32 rx_ring_num; /*Number of receive rings */
377#define MAX_RX_BLOCKS_PER_RING 150 379#define MAX_RX_BLOCKS_PER_RING 150
378 380
379 rx_ring_config_t rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */ 381 struct rx_ring_config rx_cfg[MAX_RX_RINGS]; /*Per-Rx Ring config */
380 u8 bimodal; /*Flag for setting bimodal interrupts*/ 382 u8 bimodal; /*Flag for setting bimodal interrupts*/
381 383
382#define HEADER_ETHERNET_II_802_3_SIZE 14 384#define HEADER_ETHERNET_II_802_3_SIZE 14
@@ -395,14 +397,14 @@ struct config_param {
395}; 397};
396 398
397/* Structure representing MAC Addrs */ 399/* Structure representing MAC Addrs */
398typedef struct mac_addr { 400struct mac_addr {
399 u8 mac_addr[ETH_ALEN]; 401 u8 mac_addr[ETH_ALEN];
400} macaddr_t; 402};
401 403
402/* Structure that represent every FIFO element in the BAR1 404/* Structure that represent every FIFO element in the BAR1
403 * Address location. 405 * Address location.
404 */ 406 */
405typedef struct _TxFIFO_element { 407struct TxFIFO_element {
406 u64 TxDL_Pointer; 408 u64 TxDL_Pointer;
407 409
408 u64 List_Control; 410 u64 List_Control;
@@ -413,10 +415,10 @@ typedef struct _TxFIFO_element {
413#define TX_FIFO_SPECIAL_FUNC BIT(23) 415#define TX_FIFO_SPECIAL_FUNC BIT(23)
414#define TX_FIFO_DS_NO_SNOOP BIT(31) 416#define TX_FIFO_DS_NO_SNOOP BIT(31)
415#define TX_FIFO_BUFF_NO_SNOOP BIT(30) 417#define TX_FIFO_BUFF_NO_SNOOP BIT(30)
416} TxFIFO_element_t; 418};
417 419
418/* Tx descriptor structure */ 420/* Tx descriptor structure */
419typedef struct _TxD { 421struct TxD {
420 u64 Control_1; 422 u64 Control_1;
421/* bit mask */ 423/* bit mask */
422#define TXD_LIST_OWN_XENA BIT(7) 424#define TXD_LIST_OWN_XENA BIT(7)
@@ -447,16 +449,16 @@ typedef struct _TxD {
447 449
448 u64 Buffer_Pointer; 450 u64 Buffer_Pointer;
449 u64 Host_Control; /* reserved for host */ 451 u64 Host_Control; /* reserved for host */
450} TxD_t; 452};
451 453
452/* Structure to hold the phy and virt addr of every TxDL. */ 454/* Structure to hold the phy and virt addr of every TxDL. */
453typedef struct list_info_hold { 455struct list_info_hold {
454 dma_addr_t list_phy_addr; 456 dma_addr_t list_phy_addr;
455 void *list_virt_addr; 457 void *list_virt_addr;
456} list_info_hold_t; 458};
457 459
458/* Rx descriptor structure for 1 buffer mode */ 460/* Rx descriptor structure for 1 buffer mode */
459typedef struct _RxD_t { 461struct RxD_t {
460 u64 Host_Control; /* reserved for host */ 462 u64 Host_Control; /* reserved for host */
461 u64 Control_1; 463 u64 Control_1;
462#define RXD_OWN_XENA BIT(7) 464#define RXD_OWN_XENA BIT(7)
@@ -481,21 +483,21 @@ typedef struct _RxD_t {
481#define SET_NUM_TAG(val) vBIT(val,16,32) 483#define SET_NUM_TAG(val) vBIT(val,16,32)
482 484
483 485
484} RxD_t; 486};
485/* Rx descriptor structure for 1 buffer mode */ 487/* Rx descriptor structure for 1 buffer mode */
486typedef struct _RxD1_t { 488struct RxD1 {
487 struct _RxD_t h; 489 struct RxD_t h;
488 490
489#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14) 491#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
490#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14) 492#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
491#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \ 493#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
492 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48) 494 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
493 u64 Buffer0_ptr; 495 u64 Buffer0_ptr;
494} RxD1_t; 496};
495/* Rx descriptor structure for 3 or 2 buffer mode */ 497/* Rx descriptor structure for 3 or 2 buffer mode */
496 498
497typedef struct _RxD3_t { 499struct RxD3 {
498 struct _RxD_t h; 500 struct RxD_t h;
499 501
500#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14) 502#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
501#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16) 503#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
@@ -515,15 +517,15 @@ typedef struct _RxD3_t {
515 u64 Buffer0_ptr; 517 u64 Buffer0_ptr;
516 u64 Buffer1_ptr; 518 u64 Buffer1_ptr;
517 u64 Buffer2_ptr; 519 u64 Buffer2_ptr;
518} RxD3_t; 520};
519 521
520 522
521/* Structure that represents the Rx descriptor block which contains 523/* Structure that represents the Rx descriptor block which contains
522 * 128 Rx descriptors. 524 * 128 Rx descriptors.
523 */ 525 */
524typedef struct _RxD_block { 526struct RxD_block {
525#define MAX_RXDS_PER_BLOCK_1 127 527#define MAX_RXDS_PER_BLOCK_1 127
526 RxD1_t rxd[MAX_RXDS_PER_BLOCK_1]; 528 struct RxD1 rxd[MAX_RXDS_PER_BLOCK_1];
527 529
528 u64 reserved_0; 530 u64 reserved_0;
529#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 531#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
@@ -533,22 +535,22 @@ typedef struct _RxD_block {
533 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch 535 u64 pNext_RxD_Blk_physical; /* Buff0_ptr.In a 32 bit arch
534 * the upper 32 bits should 536 * the upper 32 bits should
535 * be 0 */ 537 * be 0 */
536} RxD_block_t; 538};
537 539
538#define SIZE_OF_BLOCK 4096 540#define SIZE_OF_BLOCK 4096
539 541
540#define RXD_MODE_1 0 542#define RXD_MODE_1 0 /* One Buffer mode */
541#define RXD_MODE_3A 1 543#define RXD_MODE_3A 1 /* Three Buffer mode */
542#define RXD_MODE_3B 2 544#define RXD_MODE_3B 2 /* Two Buffer mode */
543 545
544/* Structure to hold virtual addresses of Buf0 and Buf1 in 546/* Structure to hold virtual addresses of Buf0 and Buf1 in
545 * 2buf mode. */ 547 * 2buf mode. */
546typedef struct bufAdd { 548struct buffAdd {
547 void *ba_0_org; 549 void *ba_0_org;
548 void *ba_1_org; 550 void *ba_1_org;
549 void *ba_0; 551 void *ba_0;
550 void *ba_1; 552 void *ba_1;
551} buffAdd_t; 553};
552 554
553/* Structure which stores all the MAC control parameters */ 555/* Structure which stores all the MAC control parameters */
554 556
@@ -556,43 +558,46 @@ typedef struct bufAdd {
556 * from which the Rx Interrupt processor can start picking 558 * from which the Rx Interrupt processor can start picking
557 * up the RxDs for processing. 559 * up the RxDs for processing.
558 */ 560 */
559typedef struct _rx_curr_get_info_t { 561struct rx_curr_get_info {
560 u32 block_index; 562 u32 block_index;
561 u32 offset; 563 u32 offset;
562 u32 ring_len; 564 u32 ring_len;
563} rx_curr_get_info_t; 565};
564 566
565typedef rx_curr_get_info_t rx_curr_put_info_t; 567struct rx_curr_put_info {
568 u32 block_index;
569 u32 offset;
570 u32 ring_len;
571};
566 572
567/* This structure stores the offset of the TxDl in the FIFO 573/* This structure stores the offset of the TxDl in the FIFO
568 * from which the Tx Interrupt processor can start picking 574 * from which the Tx Interrupt processor can start picking
569 * up the TxDLs for send complete interrupt processing. 575 * up the TxDLs for send complete interrupt processing.
570 */ 576 */
571typedef struct { 577struct tx_curr_get_info {
572 u32 offset; 578 u32 offset;
573 u32 fifo_len; 579 u32 fifo_len;
574} tx_curr_get_info_t; 580};
575
576typedef tx_curr_get_info_t tx_curr_put_info_t;
577 581
582struct tx_curr_put_info {
583 u32 offset;
584 u32 fifo_len;
585};
578 586
579typedef struct rxd_info { 587struct rxd_info {
580 void *virt_addr; 588 void *virt_addr;
581 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
582}rxd_info_t; 590};
583 591
584/* Structure that holds the Phy and virt addresses of the Blocks */ 592/* Structure that holds the Phy and virt addresses of the Blocks */
585typedef struct rx_block_info { 593struct rx_block_info {
586 void *block_virt_addr; 594 void *block_virt_addr;
587 dma_addr_t block_dma_addr; 595 dma_addr_t block_dma_addr;
588 rxd_info_t *rxds; 596 struct rxd_info *rxds;
589} rx_block_info_t; 597};
590
591/* pre declaration of the nic structure */
592typedef struct s2io_nic nic_t;
593 598
594/* Ring specific structure */ 599/* Ring specific structure */
595typedef struct ring_info { 600struct ring_info {
596 /* The ring number */ 601 /* The ring number */
597 int ring_no; 602 int ring_no;
598 603
@@ -600,7 +605,7 @@ typedef struct ring_info {
600 * Place holders for the virtual and physical addresses of 605 * Place holders for the virtual and physical addresses of
601 * all the Rx Blocks 606 * all the Rx Blocks
602 */ 607 */
603 rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING]; 608 struct rx_block_info rx_blocks[MAX_RX_BLOCKS_PER_RING];
604 int block_count; 609 int block_count;
605 int pkt_cnt; 610 int pkt_cnt;
606 611
@@ -608,26 +613,24 @@ typedef struct ring_info {
608 * Put pointer info which indictes which RxD has to be replenished 613 * Put pointer info which indictes which RxD has to be replenished
609 * with a new buffer. 614 * with a new buffer.
610 */ 615 */
611 rx_curr_put_info_t rx_curr_put_info; 616 struct rx_curr_put_info rx_curr_put_info;
612 617
613 /* 618 /*
614 * Get pointer info which indictes which is the last RxD that was 619 * Get pointer info which indictes which is the last RxD that was
615 * processed by the driver. 620 * processed by the driver.
616 */ 621 */
617 rx_curr_get_info_t rx_curr_get_info; 622 struct rx_curr_get_info rx_curr_get_info;
618 623
619#ifndef CONFIG_S2IO_NAPI
620 /* Index to the absolute position of the put pointer of Rx ring */ 624 /* Index to the absolute position of the put pointer of Rx ring */
621 int put_pos; 625 int put_pos;
622#endif
623 626
624 /* Buffer Address store. */ 627 /* Buffer Address store. */
625 buffAdd_t **ba; 628 struct buffAdd **ba;
626 nic_t *nic; 629 struct s2io_nic *nic;
627} ring_info_t; 630};
628 631
629/* Fifo specific structure */ 632/* Fifo specific structure */
630typedef struct fifo_info { 633struct fifo_info {
631 /* FIFO number */ 634 /* FIFO number */
632 int fifo_no; 635 int fifo_no;
633 636
@@ -635,40 +638,40 @@ typedef struct fifo_info {
635 int max_txds; 638 int max_txds;
636 639
637 /* Place holder of all the TX List's Phy and Virt addresses. */ 640 /* Place holder of all the TX List's Phy and Virt addresses. */
638 list_info_hold_t *list_info; 641 struct list_info_hold *list_info;
639 642
640 /* 643 /*
641 * Current offset within the tx FIFO where driver would write 644 * Current offset within the tx FIFO where driver would write
642 * new Tx frame 645 * new Tx frame
643 */ 646 */
644 tx_curr_put_info_t tx_curr_put_info; 647 struct tx_curr_put_info tx_curr_put_info;
645 648
646 /* 649 /*
647 * Current offset within tx FIFO from where the driver would start freeing 650 * Current offset within tx FIFO from where the driver would start freeing
648 * the buffers 651 * the buffers
649 */ 652 */
650 tx_curr_get_info_t tx_curr_get_info; 653 struct tx_curr_get_info tx_curr_get_info;
651 654
652 nic_t *nic; 655 struct s2io_nic *nic;
653}fifo_info_t; 656};
654 657
655/* Information related to the Tx and Rx FIFOs and Rings of Xena 658/* Information related to the Tx and Rx FIFOs and Rings of Xena
656 * is maintained in this structure. 659 * is maintained in this structure.
657 */ 660 */
658typedef struct mac_info { 661struct mac_info {
659/* tx side stuff */ 662/* tx side stuff */
660 /* logical pointer of start of each Tx FIFO */ 663 /* logical pointer of start of each Tx FIFO */
661 TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS]; 664 struct TxFIFO_element __iomem *tx_FIFO_start[MAX_TX_FIFOS];
662 665
663 /* Fifo specific structure */ 666 /* Fifo specific structure */
664 fifo_info_t fifos[MAX_TX_FIFOS]; 667 struct fifo_info fifos[MAX_TX_FIFOS];
665 668
666 /* Save virtual address of TxD page with zero DMA addr(if any) */ 669 /* Save virtual address of TxD page with zero DMA addr(if any) */
667 void *zerodma_virt_addr; 670 void *zerodma_virt_addr;
668 671
669/* rx side stuff */ 672/* rx side stuff */
670 /* Ring specific structure */ 673 /* Ring specific structure */
671 ring_info_t rings[MAX_RX_RINGS]; 674 struct ring_info rings[MAX_RX_RINGS];
672 675
673 u16 rmac_pause_time; 676 u16 rmac_pause_time;
674 u16 mc_pause_threshold_q0q3; 677 u16 mc_pause_threshold_q0q3;
@@ -677,14 +680,14 @@ typedef struct mac_info {
677 void *stats_mem; /* orignal pointer to allocated mem */ 680 void *stats_mem; /* orignal pointer to allocated mem */
678 dma_addr_t stats_mem_phy; /* Physical address of the stat block */ 681 dma_addr_t stats_mem_phy; /* Physical address of the stat block */
679 u32 stats_mem_sz; 682 u32 stats_mem_sz;
680 StatInfo_t *stats_info; /* Logical address of the stat block */ 683 struct stat_block *stats_info; /* Logical address of the stat block */
681} mac_info_t; 684};
682 685
683/* structure representing the user defined MAC addresses */ 686/* structure representing the user defined MAC addresses */
684typedef struct { 687struct usr_addr {
685 char addr[ETH_ALEN]; 688 char addr[ETH_ALEN];
686 int usage_cnt; 689 int usage_cnt;
687} usr_addr_t; 690};
688 691
689/* Default Tunable parameters of the NIC. */ 692/* Default Tunable parameters of the NIC. */
690#define DEFAULT_FIFO_0_LEN 4096 693#define DEFAULT_FIFO_0_LEN 4096
@@ -717,36 +720,34 @@ struct msix_info_st {
717}; 720};
718 721
719/* Data structure to represent a LRO session */ 722/* Data structure to represent a LRO session */
720typedef struct lro { 723struct lro {
721 struct sk_buff *parent; 724 struct sk_buff *parent;
722 struct sk_buff *last_frag; 725 struct sk_buff *last_frag;
723 u8 *l2h; 726 u8 *l2h;
724 struct iphdr *iph; 727 struct iphdr *iph;
725 struct tcphdr *tcph; 728 struct tcphdr *tcph;
726 u32 tcp_next_seq; 729 u32 tcp_next_seq;
727 u32 tcp_ack; 730 __be32 tcp_ack;
728 int total_len; 731 int total_len;
729 int frags_len; 732 int frags_len;
730 int sg_num; 733 int sg_num;
731 int in_use; 734 int in_use;
732 u16 window; 735 __be16 window;
733 u32 cur_tsval; 736 u32 cur_tsval;
734 u32 cur_tsecr; 737 u32 cur_tsecr;
735 u8 saw_ts; 738 u8 saw_ts;
736}lro_t; 739};
737 740
738/* Structure representing one instance of the NIC */ 741/* Structure representing one instance of the NIC */
739struct s2io_nic { 742struct s2io_nic {
740 int rxd_mode; 743 int rxd_mode;
741#ifdef CONFIG_S2IO_NAPI
742 /* 744 /*
743 * Count of packets to be processed in a given iteration, it will be indicated 745 * Count of packets to be processed in a given iteration, it will be indicated
744 * by the quota field of the device structure when NAPI is enabled. 746 * by the quota field of the device structure when NAPI is enabled.
745 */ 747 */
746 int pkts_to_process; 748 int pkts_to_process;
747#endif
748 struct net_device *dev; 749 struct net_device *dev;
749 mac_info_t mac_control; 750 struct mac_info mac_control;
750 struct config_param config; 751 struct config_param config;
751 struct pci_dev *pdev; 752 struct pci_dev *pdev;
752 void __iomem *bar0; 753 void __iomem *bar0;
@@ -754,8 +755,8 @@ struct s2io_nic {
754#define MAX_MAC_SUPPORTED 16 755#define MAX_MAC_SUPPORTED 16
755#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED 756#define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
756 757
757 macaddr_t def_mac_addr[MAX_MAC_SUPPORTED]; 758 struct mac_addr def_mac_addr[MAX_MAC_SUPPORTED];
758 macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED]; 759 struct mac_addr pre_mac_addr[MAX_MAC_SUPPORTED];
759 760
760 struct net_device_stats stats; 761 struct net_device_stats stats;
761 int high_dma_flag; 762 int high_dma_flag;
@@ -775,9 +776,7 @@ struct s2io_nic {
775 atomic_t rx_bufs_left[MAX_RX_RINGS]; 776 atomic_t rx_bufs_left[MAX_RX_RINGS];
776 777
777 spinlock_t tx_lock; 778 spinlock_t tx_lock;
778#ifndef CONFIG_S2IO_NAPI
779 spinlock_t put_lock; 779 spinlock_t put_lock;
780#endif
781 780
782#define PROMISC 1 781#define PROMISC 1
783#define ALL_MULTI 2 782#define ALL_MULTI 2
@@ -785,7 +784,7 @@ struct s2io_nic {
785#define MAX_ADDRS_SUPPORTED 64 784#define MAX_ADDRS_SUPPORTED 64
786 u16 usr_addr_count; 785 u16 usr_addr_count;
787 u16 mc_addr_count; 786 u16 mc_addr_count;
788 usr_addr_t usr_addrs[MAX_ADDRS_SUPPORTED]; 787 struct usr_addr usr_addrs[MAX_ADDRS_SUPPORTED];
789 788
790 u16 m_cast_flg; 789 u16 m_cast_flg;
791 u16 all_multi_pos; 790 u16 all_multi_pos;
@@ -841,7 +840,7 @@ struct s2io_nic {
841 u8 device_type; 840 u8 device_type;
842 841
843#define MAX_LRO_SESSIONS 32 842#define MAX_LRO_SESSIONS 32
844 lro_t lro0_n[MAX_LRO_SESSIONS]; 843 struct lro lro0_n[MAX_LRO_SESSIONS];
845 unsigned long clubbed_frms_cnt; 844 unsigned long clubbed_frms_cnt;
846 unsigned long sending_both; 845 unsigned long sending_both;
847 u8 lro; 846 u8 lro;
@@ -855,8 +854,9 @@ struct s2io_nic {
855 spinlock_t rx_lock; 854 spinlock_t rx_lock;
856 atomic_t isr_cnt; 855 atomic_t isr_cnt;
857 u64 *ufo_in_band_v; 856 u64 *ufo_in_band_v;
858#define VPD_PRODUCT_NAME_LEN 50 857#define VPD_STRING_LEN 80
859 u8 product_name[VPD_PRODUCT_NAME_LEN]; 858 u8 product_name[VPD_STRING_LEN];
859 u8 serial_num[VPD_STRING_LEN];
860}; 860};
861 861
862#define RESET_ERROR 1; 862#define RESET_ERROR 1;
@@ -975,43 +975,50 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
975static int init_shared_mem(struct s2io_nic *sp); 975static int init_shared_mem(struct s2io_nic *sp);
976static void free_shared_mem(struct s2io_nic *sp); 976static void free_shared_mem(struct s2io_nic *sp);
977static int init_nic(struct s2io_nic *nic); 977static int init_nic(struct s2io_nic *nic);
978static void rx_intr_handler(ring_info_t *ring_data); 978static void rx_intr_handler(struct ring_info *ring_data);
979static void tx_intr_handler(fifo_info_t *fifo_data); 979static void tx_intr_handler(struct fifo_info *fifo_data);
980static void alarm_intr_handler(struct s2io_nic *sp); 980static void alarm_intr_handler(struct s2io_nic *sp);
981 981
982static int s2io_starter(void); 982static int s2io_starter(void);
983static void s2io_closer(void);
983static void s2io_tx_watchdog(struct net_device *dev); 984static void s2io_tx_watchdog(struct net_device *dev);
984static void s2io_tasklet(unsigned long dev_addr); 985static void s2io_tasklet(unsigned long dev_addr);
985static void s2io_set_multicast(struct net_device *dev); 986static void s2io_set_multicast(struct net_device *dev);
986static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp); 987static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
987static void s2io_link(nic_t * sp, int link); 988static void s2io_link(struct s2io_nic * sp, int link);
988#if defined(CONFIG_S2IO_NAPI) 989static void s2io_reset(struct s2io_nic * sp);
989static int s2io_poll(struct net_device *dev, int *budget); 990static int s2io_poll(struct net_device *dev, int *budget);
990#endif 991static void s2io_init_pci(struct s2io_nic * sp);
991static void s2io_init_pci(nic_t * sp);
992static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 992static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
993static void s2io_alarm_handle(unsigned long data); 993static void s2io_alarm_handle(unsigned long data);
994static int s2io_enable_msi(nic_t *nic); 994static int s2io_enable_msi(struct s2io_nic *nic);
995static irqreturn_t s2io_msi_handle(int irq, void *dev_id); 995static irqreturn_t s2io_msi_handle(int irq, void *dev_id);
996static irqreturn_t 996static irqreturn_t
997s2io_msix_ring_handle(int irq, void *dev_id); 997s2io_msix_ring_handle(int irq, void *dev_id);
998static irqreturn_t 998static irqreturn_t
999s2io_msix_fifo_handle(int irq, void *dev_id); 999s2io_msix_fifo_handle(int irq, void *dev_id);
1000static irqreturn_t s2io_isr(int irq, void *dev_id); 1000static irqreturn_t s2io_isr(int irq, void *dev_id);
1001static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 1001static int verify_xena_quiescence(struct s2io_nic *sp);
1002static const struct ethtool_ops netdev_ethtool_ops; 1002static const struct ethtool_ops netdev_ethtool_ops;
1003static void s2io_set_link(struct work_struct *work); 1003static void s2io_set_link(struct work_struct *work);
1004static int s2io_set_swapper(nic_t * sp); 1004static int s2io_set_swapper(struct s2io_nic * sp);
1005static void s2io_card_down(nic_t *nic); 1005static void s2io_card_down(struct s2io_nic *nic);
1006static int s2io_card_up(nic_t *nic); 1006static int s2io_card_up(struct s2io_nic *nic);
1007static int get_xena_rev_id(struct pci_dev *pdev); 1007static int get_xena_rev_id(struct pci_dev *pdev);
1008static void restore_xmsi_data(nic_t *nic); 1008static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit);
1009static int s2io_add_isr(struct s2io_nic * sp);
1010static void s2io_rem_isr(struct s2io_nic * sp);
1011
1012static void restore_xmsi_data(struct s2io_nic *nic);
1009 1013
1010static int s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro, RxD_t *rxdp, nic_t *sp); 1014static int
1011static void clear_lro_session(lro_t *lro); 1015s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
1016 struct RxD_t *rxdp, struct s2io_nic *sp);
1017static void clear_lro_session(struct lro *lro);
1012static void queue_rx_frame(struct sk_buff *skb); 1018static void queue_rx_frame(struct sk_buff *skb);
1013static void update_L3L4_header(nic_t *sp, lro_t *lro); 1019static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1014static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1020static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1021 struct sk_buff *skb, u32 tcp_len);
1015 1022
1016#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1023#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1017#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1024#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
new file mode 100644
index 000000000000..7f800feaa9a2
--- /dev/null
+++ b/drivers/net/sc92031.c
@@ -0,0 +1,1620 @@
1/* Silan SC92031 PCI Fast Ethernet Adapter driver
2 *
3 * Based on vendor drivers:
4 * Silan Fast Ethernet Netcard Driver:
5 * MODULE_AUTHOR ("gaoyonghong");
6 * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
7 * MODULE_LICENSE("GPL");
8 * 8139D Fast Ethernet driver:
9 * (C) 2002 by gaoyonghong
10 * MODULE_AUTHOR ("gaoyonghong");
11 * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
12 * MODULE_LICENSE("GPL");
13 * Both are almost identical and seem to be based on pci-skeleton.c
14 *
15 * Rewritten for 2.6 by Cesar Eduardo Barros
16 */
17
18/* Note about set_mac_address: I don't know how to change the hardware
19 * matching, so you need to enable IFF_PROMISC when using it.
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/pci.h>
26#include <linux/dma-mapping.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/crc32.h>
31
32#include <asm/irq.h>
33
34#define PCI_VENDOR_ID_SILAN 0x1904
35#define PCI_DEVICE_ID_SILAN_SC92031 0x2031
36#define PCI_DEVICE_ID_SILAN_8139D 0x8139
37
38#define SC92031_NAME "sc92031"
39#define SC92031_DESCRIPTION "Silan SC92031 PCI Fast Ethernet Adapter driver"
40#define SC92031_VERSION "2.0c"
41
42/* BAR 0 is MMIO, BAR 1 is PIO */
43#ifndef SC92031_USE_BAR
44#define SC92031_USE_BAR 0
45#endif
46
47/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
48static int multicast_filter_limit = 64;
49module_param(multicast_filter_limit, int, 0);
50MODULE_PARM_DESC(multicast_filter_limit,
51 "Maximum number of filtered multicast addresses");
52
53static int media;
54module_param(media, int, 0);
55MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
56 " 0x01 = 10M half, 0x02 = 10M full,"
57 " 0x04 = 100M half, 0x08 = 100M full)");
58
59/* Size of the in-memory receive ring. */
60#define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
61#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
62
63/* Number of Tx descriptor registers. */
64#define NUM_TX_DESC 4
65
66/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
67#define MAX_ETH_FRAME_SIZE 1536
68
69/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
70#define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
71#define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
72
73/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
74#define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
75
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78
79#define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
80
81/* media options */
82#define AUTOSELECT 0x00
83#define M10_HALF 0x01
84#define M10_FULL 0x02
85#define M100_HALF 0x04
86#define M100_FULL 0x08
87
88 /* Symbolic offsets to registers. */
89enum silan_registers {
90 Config0 = 0x00, // Config0
91 Config1 = 0x04, // Config1
92 RxBufWPtr = 0x08, // Rx buffer writer poiter
93 IntrStatus = 0x0C, // Interrupt status
94 IntrMask = 0x10, // Interrupt mask
95 RxbufAddr = 0x14, // Rx buffer start address
96 RxBufRPtr = 0x18, // Rx buffer read pointer
97 Txstatusall = 0x1C, // Transmit status of all descriptors
98 TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
99 TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
100 RxConfig = 0x40, // Rx configuration
101 MAC0 = 0x44, // Ethernet hardware address.
102 MAR0 = 0x4C, // Multicast filter.
103 RxStatus0 = 0x54, // Rx status
104 TxConfig = 0x5C, // Tx configuration
105 PhyCtrl = 0x60, // physical control
106 FlowCtrlConfig = 0x64, // flow control
107 Miicmd0 = 0x68, // Mii command0 register
108 Miicmd1 = 0x6C, // Mii command1 register
109 Miistatus = 0x70, // Mii status register
110 Timercnt = 0x74, // Timer counter register
111 TimerIntr = 0x78, // Timer interrupt register
112 PMConfig = 0x7C, // Power Manager configuration
113 CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
114 Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
115 LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
116 TestD0 = 0xD0,
117 TestD4 = 0xD4,
118 TestD8 = 0xD8,
119};
120
121#define MII_BMCR 0 // Basic mode control register
122#define MII_BMSR 1 // Basic mode status register
123#define MII_JAB 16
124#define MII_OutputStatus 24
125
126#define BMCR_FULLDPLX 0x0100 // Full duplex
127#define BMCR_ANRESTART 0x0200 // Auto negotiation restart
128#define BMCR_ANENABLE 0x1000 // Enable auto negotiation
129#define BMCR_SPEED100 0x2000 // Select 100Mbps
130#define BMSR_LSTATUS 0x0004 // Link status
131#define PHY_16_JAB_ENB 0x1000
132#define PHY_16_PORT_ENB 0x1
133
134enum IntrStatusBits {
135 LinkFail = 0x80000000,
136 LinkOK = 0x40000000,
137 TimeOut = 0x20000000,
138 RxOverflow = 0x0040,
139 RxOK = 0x0020,
140 TxOK = 0x0001,
141 IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
142};
143
144enum TxStatusBits {
145 TxCarrierLost = 0x20000000,
146 TxAborted = 0x10000000,
147 TxOutOfWindow = 0x08000000,
148 TxNccShift = 22,
149 EarlyTxThresShift = 16,
150 TxStatOK = 0x8000,
151 TxUnderrun = 0x4000,
152 TxOwn = 0x2000,
153};
154
155enum RxStatusBits {
156 RxStatesOK = 0x80000,
157 RxBadAlign = 0x40000,
158 RxHugeFrame = 0x20000,
159 RxSmallFrame = 0x10000,
160 RxCRCOK = 0x8000,
161 RxCrlFrame = 0x4000,
162 Rx_Broadcast = 0x2000,
163 Rx_Multicast = 0x1000,
164 RxAddrMatch = 0x0800,
165 MiiErr = 0x0400,
166};
167
168enum RxConfigBits {
169 RxFullDx = 0x80000000,
170 RxEnb = 0x40000000,
171 RxSmall = 0x20000000,
172 RxHuge = 0x10000000,
173 RxErr = 0x08000000,
174 RxAllphys = 0x04000000,
175 RxMulticast = 0x02000000,
176 RxBroadcast = 0x01000000,
177 RxLoopBack = (1 << 23) | (1 << 22),
178 LowThresholdShift = 12,
179 HighThresholdShift = 2,
180};
181
182enum TxConfigBits {
183 TxFullDx = 0x80000000,
184 TxEnb = 0x40000000,
185 TxEnbPad = 0x20000000,
186 TxEnbHuge = 0x10000000,
187 TxEnbFCS = 0x08000000,
188 TxNoBackOff = 0x04000000,
189 TxEnbPrem = 0x02000000,
190 TxCareLostCrs = 0x1000000,
191 TxExdCollNum = 0xf00000,
192 TxDataRate = 0x80000,
193};
194
195enum PhyCtrlconfigbits {
196 PhyCtrlAne = 0x80000000,
197 PhyCtrlSpd100 = 0x40000000,
198 PhyCtrlSpd10 = 0x20000000,
199 PhyCtrlPhyBaseAddr = 0x1f000000,
200 PhyCtrlDux = 0x800000,
201 PhyCtrlReset = 0x400000,
202};
203
204enum FlowCtrlConfigBits {
205 FlowCtrlFullDX = 0x80000000,
206 FlowCtrlEnb = 0x40000000,
207};
208
209enum Config0Bits {
210 Cfg0_Reset = 0x80000000,
211 Cfg0_Anaoff = 0x40000000,
212 Cfg0_LDPS = 0x20000000,
213};
214
215enum Config1Bits {
216 Cfg1_EarlyRx = 1 << 31,
217 Cfg1_EarlyTx = 1 << 30,
218
219 //rx buffer size
220 Cfg1_Rcv8K = 0x0,
221 Cfg1_Rcv16K = 0x1,
222 Cfg1_Rcv32K = 0x3,
223 Cfg1_Rcv64K = 0x7,
224 Cfg1_Rcv128K = 0xf,
225};
226
227enum MiiCmd0Bits {
228 Mii_Divider = 0x20000000,
229 Mii_WRITE = 0x400000,
230 Mii_READ = 0x200000,
231 Mii_SCAN = 0x100000,
232 Mii_Tamod = 0x80000,
233 Mii_Drvmod = 0x40000,
234 Mii_mdc = 0x20000,
235 Mii_mdoen = 0x10000,
236 Mii_mdo = 0x8000,
237 Mii_mdi = 0x4000,
238};
239
240enum MiiStatusBits {
241 Mii_StatusBusy = 0x80000000,
242};
243
244enum PMConfigBits {
245 PM_Enable = 1 << 31,
246 PM_LongWF = 1 << 30,
247 PM_Magic = 1 << 29,
248 PM_LANWake = 1 << 28,
249 PM_LWPTN = (1 << 27 | 1<< 26),
250 PM_LinkUp = 1 << 25,
251 PM_WakeUp = 1 << 24,
252};
253
254/* Locking rules:
255 * priv->lock protects most of the fields of priv and most of the
256 * hardware registers. It does not have to protect against softirqs
257 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
258 * it also does not need to be used in ->open and ->stop while the
259 * device interrupts are off.
260 * Not having to protect against softirqs is very useful due to heavy
261 * use of mdelay() at _sc92031_reset.
262 * Functions prefixed with _sc92031_ must be called with the lock held;
263 * functions prefixed with sc92031_ must be called without the lock held.
264 * Use mmiowb() before unlocking if the hardware was written to.
265 */
266
267/* Locking rules for the interrupt:
268 * - the interrupt and the tasklet never run at the same time
269 * - neither run between sc92031_disable_interrupts and
270 * sc92031_enable_interrupt
271 */
272
273struct sc92031_priv {
274 spinlock_t lock;
275 /* iomap.h cookie */
276 void __iomem *port_base;
277 /* pci device structure */
278 struct pci_dev *pdev;
279 /* tasklet */
280 struct tasklet_struct tasklet;
281
282 /* CPU address of rx ring */
283 void *rx_ring;
284 /* PCI address of rx ring */
285 dma_addr_t rx_ring_dma_addr;
286 /* PCI address of rx ring read pointer */
287 dma_addr_t rx_ring_tail;
288
289 /* tx ring write index */
290 unsigned tx_head;
291 /* tx ring read index */
292 unsigned tx_tail;
293 /* CPU address of tx bounce buffer */
294 void *tx_bufs;
295 /* PCI address of tx bounce buffer */
296 dma_addr_t tx_bufs_dma_addr;
297
298 /* copies of some hardware registers */
299 u32 intr_status;
300 atomic_t intr_mask;
301 u32 rx_config;
302 u32 tx_config;
303 u32 pm_config;
304
305 /* copy of some flags from dev->flags */
306 unsigned int mc_flags;
307
308 /* for ETHTOOL_GSTATS */
309 u64 tx_timeouts;
310 u64 rx_loss;
311
312 /* for dev->get_stats */
313 long rx_value;
314 struct net_device_stats stats;
315};
316
317/* I don't know which registers can be safely read; however, I can guess
318 * MAC0 is one of them. */
319static inline void _sc92031_dummy_read(void __iomem *port_base)
320{
321 ioread32(port_base + MAC0);
322}
323
324static u32 _sc92031_mii_wait(void __iomem *port_base)
325{
326 u32 mii_status;
327
328 do {
329 udelay(10);
330 mii_status = ioread32(port_base + Miistatus);
331 } while (mii_status & Mii_StatusBusy);
332
333 return mii_status;
334}
335
336static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
337{
338 iowrite32(Mii_Divider, port_base + Miicmd0);
339
340 _sc92031_mii_wait(port_base);
341
342 iowrite32(cmd1, port_base + Miicmd1);
343 iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
344
345 return _sc92031_mii_wait(port_base);
346}
347
348static void _sc92031_mii_scan(void __iomem *port_base)
349{
350 _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
351}
352
353static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
354{
355 return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
356}
357
358static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
359{
360 _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
361}
362
363static void sc92031_disable_interrupts(struct net_device *dev)
364{
365 struct sc92031_priv *priv = netdev_priv(dev);
366 void __iomem *port_base = priv->port_base;
367
368 /* tell the tasklet/interrupt not to enable interrupts */
369 atomic_set(&priv->intr_mask, 0);
370 wmb();
371
372 /* stop interrupts */
373 iowrite32(0, port_base + IntrMask);
374 _sc92031_dummy_read(port_base);
375 mmiowb();
376
377 /* wait for any concurrent interrupt/tasklet to finish */
378 synchronize_irq(dev->irq);
379 tasklet_disable(&priv->tasklet);
380}
381
382static void sc92031_enable_interrupts(struct net_device *dev)
383{
384 struct sc92031_priv *priv = netdev_priv(dev);
385 void __iomem *port_base = priv->port_base;
386
387 tasklet_enable(&priv->tasklet);
388
389 atomic_set(&priv->intr_mask, IntrBits);
390 wmb();
391
392 iowrite32(IntrBits, port_base + IntrMask);
393 mmiowb();
394}
395
396static void _sc92031_disable_tx_rx(struct net_device *dev)
397{
398 struct sc92031_priv *priv = netdev_priv(dev);
399 void __iomem *port_base = priv->port_base;
400
401 priv->rx_config &= ~RxEnb;
402 priv->tx_config &= ~TxEnb;
403 iowrite32(priv->rx_config, port_base + RxConfig);
404 iowrite32(priv->tx_config, port_base + TxConfig);
405}
406
407static void _sc92031_enable_tx_rx(struct net_device *dev)
408{
409 struct sc92031_priv *priv = netdev_priv(dev);
410 void __iomem *port_base = priv->port_base;
411
412 priv->rx_config |= RxEnb;
413 priv->tx_config |= TxEnb;
414 iowrite32(priv->rx_config, port_base + RxConfig);
415 iowrite32(priv->tx_config, port_base + TxConfig);
416}
417
418static void _sc92031_tx_clear(struct net_device *dev)
419{
420 struct sc92031_priv *priv = netdev_priv(dev);
421
422 while (priv->tx_head - priv->tx_tail > 0) {
423 priv->tx_tail++;
424 priv->stats.tx_dropped++;
425 }
426 priv->tx_head = priv->tx_tail = 0;
427}
428
429static void _sc92031_set_mar(struct net_device *dev)
430{
431 struct sc92031_priv *priv = netdev_priv(dev);
432 void __iomem *port_base = priv->port_base;
433 u32 mar0 = 0, mar1 = 0;
434
435 if ((dev->flags & IFF_PROMISC)
436 || dev->mc_count > multicast_filter_limit
437 || (dev->flags & IFF_ALLMULTI))
438 mar0 = mar1 = 0xffffffff;
439 else if (dev->flags & IFF_MULTICAST) {
440 struct dev_mc_list *mc_list;
441
442 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
443 u32 crc;
444 unsigned bit = 0;
445
446 crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr);
447 crc >>= 24;
448
449 if (crc & 0x01) bit |= 0x02;
450 if (crc & 0x02) bit |= 0x01;
451 if (crc & 0x10) bit |= 0x20;
452 if (crc & 0x20) bit |= 0x10;
453 if (crc & 0x40) bit |= 0x08;
454 if (crc & 0x80) bit |= 0x04;
455
456 if (bit > 31)
457 mar0 |= 0x1 << (bit - 32);
458 else
459 mar1 |= 0x1 << bit;
460 }
461 }
462
463 iowrite32(mar0, port_base + MAR0);
464 iowrite32(mar1, port_base + MAR0 + 4);
465}
466
467static void _sc92031_set_rx_config(struct net_device *dev)
468{
469 struct sc92031_priv *priv = netdev_priv(dev);
470 void __iomem *port_base = priv->port_base;
471 unsigned int old_mc_flags;
472 u32 rx_config_bits = 0;
473
474 old_mc_flags = priv->mc_flags;
475
476 if (dev->flags & IFF_PROMISC)
477 rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
478 | RxMulticast | RxAllphys;
479
480 if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
481 rx_config_bits |= RxMulticast;
482
483 if (dev->flags & IFF_BROADCAST)
484 rx_config_bits |= RxBroadcast;
485
486 priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
487 | RxMulticast | RxAllphys);
488 priv->rx_config |= rx_config_bits;
489
490 priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
491 | IFF_MULTICAST | IFF_BROADCAST);
492
493 if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
494 iowrite32(priv->rx_config, port_base + RxConfig);
495}
496
497static bool _sc92031_check_media(struct net_device *dev)
498{
499 struct sc92031_priv *priv = netdev_priv(dev);
500 void __iomem *port_base = priv->port_base;
501 u16 bmsr;
502
503 bmsr = _sc92031_mii_read(port_base, MII_BMSR);
504 rmb();
505 if (bmsr & BMSR_LSTATUS) {
506 bool speed_100, duplex_full;
507 u32 flow_ctrl_config = 0;
508 u16 output_status = _sc92031_mii_read(port_base,
509 MII_OutputStatus);
510 _sc92031_mii_scan(port_base);
511
512 speed_100 = output_status & 0x2;
513 duplex_full = output_status & 0x4;
514
515 /* Initial Tx/Rx configuration */
516 priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
517 priv->tx_config = 0x48800000;
518
519 /* NOTE: vendor driver had dead code here to enable tx padding */
520
521 if (!speed_100)
522 priv->tx_config |= 0x80000;
523
524 // configure rx mode
525 _sc92031_set_rx_config(dev);
526
527 if (duplex_full) {
528 priv->rx_config |= RxFullDx;
529 priv->tx_config |= TxFullDx;
530 flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
531 } else {
532 priv->rx_config &= ~RxFullDx;
533 priv->tx_config &= ~TxFullDx;
534 }
535
536 _sc92031_set_mar(dev);
537 _sc92031_set_rx_config(dev);
538 _sc92031_enable_tx_rx(dev);
539 iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
540
541 netif_carrier_on(dev);
542
543 if (printk_ratelimit())
544 printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
545 dev->name,
546 speed_100 ? "100" : "10",
547 duplex_full ? "full" : "half");
548 return true;
549 } else {
550 _sc92031_mii_scan(port_base);
551
552 netif_carrier_off(dev);
553
554 _sc92031_disable_tx_rx(dev);
555
556 if (printk_ratelimit())
557 printk(KERN_INFO "%s: link down\n", dev->name);
558 return false;
559 }
560}
561
562static void _sc92031_phy_reset(struct net_device *dev)
563{
564 struct sc92031_priv *priv = netdev_priv(dev);
565 void __iomem *port_base = priv->port_base;
566 u32 phy_ctrl;
567
568 phy_ctrl = ioread32(port_base + PhyCtrl);
569 phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
570 phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
571
572 switch (media) {
573 default:
574 case AUTOSELECT:
575 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
576 break;
577 case M10_HALF:
578 phy_ctrl |= PhyCtrlSpd10;
579 break;
580 case M10_FULL:
581 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
582 break;
583 case M100_HALF:
584 phy_ctrl |= PhyCtrlSpd100;
585 break;
586 case M100_FULL:
587 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
588 break;
589 }
590
591 iowrite32(phy_ctrl, port_base + PhyCtrl);
592 mdelay(10);
593
594 phy_ctrl &= ~PhyCtrlReset;
595 iowrite32(phy_ctrl, port_base + PhyCtrl);
596 mdelay(1);
597
598 _sc92031_mii_write(port_base, MII_JAB,
599 PHY_16_JAB_ENB | PHY_16_PORT_ENB);
600 _sc92031_mii_scan(port_base);
601
602 netif_carrier_off(dev);
603 netif_stop_queue(dev);
604}
605
606static void _sc92031_reset(struct net_device *dev)
607{
608 struct sc92031_priv *priv = netdev_priv(dev);
609 void __iomem *port_base = priv->port_base;
610
611 /* disable PM */
612 iowrite32(0, port_base + PMConfig);
613
614 /* soft reset the chip */
615 iowrite32(Cfg0_Reset, port_base + Config0);
616 mdelay(200);
617
618 iowrite32(0, port_base + Config0);
619 mdelay(10);
620
621 /* disable interrupts */
622 iowrite32(0, port_base + IntrMask);
623
624 /* clear multicast address */
625 iowrite32(0, port_base + MAR0);
626 iowrite32(0, port_base + MAR0 + 4);
627
628 /* init rx ring */
629 iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
630 priv->rx_ring_tail = priv->rx_ring_dma_addr;
631
632 /* init tx ring */
633 _sc92031_tx_clear(dev);
634
635 /* clear old register values */
636 priv->intr_status = 0;
637 atomic_set(&priv->intr_mask, 0);
638 priv->rx_config = 0;
639 priv->tx_config = 0;
640 priv->mc_flags = 0;
641
642 /* configure rx buffer size */
643 /* NOTE: vendor driver had dead code here to enable early tx/rx */
644 iowrite32(Cfg1_Rcv64K, port_base + Config1);
645
646 _sc92031_phy_reset(dev);
647 _sc92031_check_media(dev);
648
649 /* calculate rx fifo overflow */
650 priv->rx_value = 0;
651
652 /* enable PM */
653 iowrite32(priv->pm_config, port_base + PMConfig);
654
655 /* clear intr register */
656 ioread32(port_base + IntrStatus);
657}
658
659static void _sc92031_tx_tasklet(struct net_device *dev)
660{
661 struct sc92031_priv *priv = netdev_priv(dev);
662 void __iomem *port_base = priv->port_base;
663
664 unsigned old_tx_tail;
665 unsigned entry;
666 u32 tx_status;
667
668 old_tx_tail = priv->tx_tail;
669 while (priv->tx_head - priv->tx_tail > 0) {
670 entry = priv->tx_tail % NUM_TX_DESC;
671 tx_status = ioread32(port_base + TxStatus0 + entry * 4);
672
673 if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
674 break;
675
676 priv->tx_tail++;
677
678 if (tx_status & TxStatOK) {
679 priv->stats.tx_bytes += tx_status & 0x1fff;
680 priv->stats.tx_packets++;
681 /* Note: TxCarrierLost is always asserted at 100mbps. */
682 priv->stats.collisions += (tx_status >> 22) & 0xf;
683 }
684
685 if (tx_status & (TxOutOfWindow | TxAborted)) {
686 priv->stats.tx_errors++;
687
688 if (tx_status & TxAborted)
689 priv->stats.tx_aborted_errors++;
690
691 if (tx_status & TxCarrierLost)
692 priv->stats.tx_carrier_errors++;
693
694 if (tx_status & TxOutOfWindow)
695 priv->stats.tx_window_errors++;
696 }
697
698 if (tx_status & TxUnderrun)
699 priv->stats.tx_fifo_errors++;
700 }
701
702 if (priv->tx_tail != old_tx_tail)
703 if (netif_queue_stopped(dev))
704 netif_wake_queue(dev);
705}
706
707static void _sc92031_rx_tasklet_error(u32 rx_status,
708 struct sc92031_priv *priv, unsigned rx_size)
709{
710 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
711 priv->stats.rx_errors++;
712 priv->stats.rx_length_errors++;
713 }
714
715 if (!(rx_status & RxStatesOK)) {
716 priv->stats.rx_errors++;
717
718 if (rx_status & (RxHugeFrame | RxSmallFrame))
719 priv->stats.rx_length_errors++;
720
721 if (rx_status & RxBadAlign)
722 priv->stats.rx_frame_errors++;
723
724 if (!(rx_status & RxCRCOK))
725 priv->stats.rx_crc_errors++;
726 } else
727 priv->rx_loss++;
728}
729
730static void _sc92031_rx_tasklet(struct net_device *dev)
731{
732 struct sc92031_priv *priv = netdev_priv(dev);
733 void __iomem *port_base = priv->port_base;
734
735 dma_addr_t rx_ring_head;
736 unsigned rx_len;
737 unsigned rx_ring_offset;
738 void *rx_ring = priv->rx_ring;
739
740 rx_ring_head = ioread32(port_base + RxBufWPtr);
741 rmb();
742
743 /* rx_ring_head is only 17 bits in the RxBufWPtr register.
744 * we need to change it to 32 bits physical address
745 */
746 rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
747 rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
748 if (rx_ring_head < priv->rx_ring_dma_addr)
749 rx_ring_head += RX_BUF_LEN;
750
751 if (rx_ring_head >= priv->rx_ring_tail)
752 rx_len = rx_ring_head - priv->rx_ring_tail;
753 else
754 rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
755
756 if (!rx_len)
757 return;
758
759 if (unlikely(rx_len > RX_BUF_LEN)) {
760 if (printk_ratelimit())
761 printk(KERN_ERR "%s: rx packets length > rx buffer\n",
762 dev->name);
763 return;
764 }
765
766 rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
767
768 while (rx_len) {
769 u32 rx_status;
770 unsigned rx_size, rx_size_align, pkt_size;
771 struct sk_buff *skb;
772
773 rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
774 rmb();
775
776 rx_size = rx_status >> 20;
777 rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
778 pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
779
780 rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
781
782 if (unlikely(rx_status == 0
783 || rx_size > (MAX_ETH_FRAME_SIZE + 4)
784 || rx_size < 16
785 || !(rx_status & RxStatesOK))) {
786 _sc92031_rx_tasklet_error(rx_status, priv, rx_size);
787 break;
788 }
789
790 if (unlikely(rx_size_align + 4 > rx_len)) {
791 if (printk_ratelimit())
792 printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
793 break;
794 }
795
796 rx_len -= rx_size_align + 4;
797
798 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
799 if (unlikely(!skb)) {
800 if (printk_ratelimit())
801 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
802 dev->name, pkt_size);
803 goto next;
804 }
805
806 skb_reserve(skb, NET_IP_ALIGN);
807
808 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
809 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
810 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
811 memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
812 rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
813 } else {
814 memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
815 }
816
817 skb->dev = dev;
818 skb->protocol = eth_type_trans(skb, dev);
819 dev->last_rx = jiffies;
820 netif_rx(skb);
821
822 priv->stats.rx_bytes += pkt_size;
823 priv->stats.rx_packets++;
824
825 if (rx_status & Rx_Multicast)
826 priv->stats.multicast++;
827
828 next:
829 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
830 }
831 mb();
832
833 priv->rx_ring_tail = rx_ring_head;
834 iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
835}
836
837static void _sc92031_link_tasklet(struct net_device *dev)
838{
839 struct sc92031_priv *priv = netdev_priv(dev);
840
841 if (_sc92031_check_media(dev))
842 netif_wake_queue(dev);
843 else {
844 netif_stop_queue(dev);
845 priv->stats.tx_carrier_errors++;
846 }
847}
848
849static void sc92031_tasklet(unsigned long data)
850{
851 struct net_device *dev = (struct net_device *)data;
852 struct sc92031_priv *priv = netdev_priv(dev);
853 void __iomem *port_base = priv->port_base;
854 u32 intr_status, intr_mask;
855
856 intr_status = priv->intr_status;
857
858 spin_lock(&priv->lock);
859
860 if (unlikely(!netif_running(dev)))
861 goto out;
862
863 if (intr_status & TxOK)
864 _sc92031_tx_tasklet(dev);
865
866 if (intr_status & RxOK)
867 _sc92031_rx_tasklet(dev);
868
869 if (intr_status & RxOverflow)
870 priv->stats.rx_errors++;
871
872 if (intr_status & TimeOut) {
873 priv->stats.rx_errors++;
874 priv->stats.rx_length_errors++;
875 }
876
877 if (intr_status & (LinkFail | LinkOK))
878 _sc92031_link_tasklet(dev);
879
880out:
881 intr_mask = atomic_read(&priv->intr_mask);
882 rmb();
883
884 iowrite32(intr_mask, port_base + IntrMask);
885 mmiowb();
886
887 spin_unlock(&priv->lock);
888}
889
890static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
891{
892 struct net_device *dev = dev_id;
893 struct sc92031_priv *priv = netdev_priv(dev);
894 void __iomem *port_base = priv->port_base;
895 u32 intr_status, intr_mask;
896
897 /* mask interrupts before clearing IntrStatus */
898 iowrite32(0, port_base + IntrMask);
899 _sc92031_dummy_read(port_base);
900
901 intr_status = ioread32(port_base + IntrStatus);
902 if (unlikely(intr_status == 0xffffffff))
903 return IRQ_NONE; // hardware has gone missing
904
905 intr_status &= IntrBits;
906 if (!intr_status)
907 goto out_none;
908
909 priv->intr_status = intr_status;
910 tasklet_schedule(&priv->tasklet);
911
912 return IRQ_HANDLED;
913
914out_none:
915 intr_mask = atomic_read(&priv->intr_mask);
916 rmb();
917
918 iowrite32(intr_mask, port_base + IntrMask);
919 mmiowb();
920
921 return IRQ_NONE;
922}
923
924static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
925{
926 struct sc92031_priv *priv = netdev_priv(dev);
927 void __iomem *port_base = priv->port_base;
928
929 // FIXME I do not understand what is this trying to do.
930 if (netif_running(dev)) {
931 int temp;
932
933 spin_lock_bh(&priv->lock);
934
935 /* Update the error count. */
936 temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
937
938 if (temp == 0xffff) {
939 priv->rx_value += temp;
940 priv->stats.rx_fifo_errors = priv->rx_value;
941 } else {
942 priv->stats.rx_fifo_errors = temp + priv->rx_value;
943 }
944
945 spin_unlock_bh(&priv->lock);
946 }
947
948 return &priv->stats;
949}
950
951static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
952{
953 int err = 0;
954 struct sc92031_priv *priv = netdev_priv(dev);
955 void __iomem *port_base = priv->port_base;
956
957 unsigned len;
958 unsigned entry;
959 u32 tx_status;
960
961 if (unlikely(skb->len > TX_BUF_SIZE)) {
962 err = -EMSGSIZE;
963 priv->stats.tx_dropped++;
964 goto out;
965 }
966
967 spin_lock_bh(&priv->lock);
968
969 if (unlikely(!netif_carrier_ok(dev))) {
970 err = -ENOLINK;
971 priv->stats.tx_dropped++;
972 goto out_unlock;
973 }
974
975 BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
976
977 entry = priv->tx_head++ % NUM_TX_DESC;
978
979 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
980
981 len = skb->len;
982 if (unlikely(len < ETH_ZLEN)) {
983 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
984 0, ETH_ZLEN - len);
985 len = ETH_ZLEN;
986 }
987
988 wmb();
989
990 if (len < 100)
991 tx_status = len;
992 else if (len < 300)
993 tx_status = 0x30000 | len;
994 else
995 tx_status = 0x50000 | len;
996
997 iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
998 port_base + TxAddr0 + entry * 4);
999 iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
1000 mmiowb();
1001
1002 dev->trans_start = jiffies;
1003
1004 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
1005 netif_stop_queue(dev);
1006
1007out_unlock:
1008 spin_unlock_bh(&priv->lock);
1009
1010out:
1011 dev_kfree_skb(skb);
1012
1013 return err;
1014}
1015
1016static int sc92031_open(struct net_device *dev)
1017{
1018 int err;
1019 struct sc92031_priv *priv = netdev_priv(dev);
1020 struct pci_dev *pdev = priv->pdev;
1021
1022 priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1023 &priv->rx_ring_dma_addr);
1024 if (unlikely(!priv->rx_ring)) {
1025 err = -ENOMEM;
1026 goto out_alloc_rx_ring;
1027 }
1028
1029 priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1030 &priv->tx_bufs_dma_addr);
1031 if (unlikely(!priv->tx_bufs)) {
1032 err = -ENOMEM;
1033 goto out_alloc_tx_bufs;
1034 }
1035 priv->tx_head = priv->tx_tail = 0;
1036
1037 err = request_irq(pdev->irq, sc92031_interrupt,
1038 SA_SHIRQ, dev->name, dev);
1039 if (unlikely(err < 0))
1040 goto out_request_irq;
1041
1042 priv->pm_config = 0;
1043
1044 /* Interrupts already disabled by sc92031_stop or sc92031_probe */
1045 spin_lock(&priv->lock);
1046
1047 _sc92031_reset(dev);
1048 mmiowb();
1049
1050 spin_unlock(&priv->lock);
1051 sc92031_enable_interrupts(dev);
1052
1053 if (netif_carrier_ok(dev))
1054 netif_start_queue(dev);
1055 else
1056 netif_tx_disable(dev);
1057
1058 return 0;
1059
1060out_request_irq:
1061 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1062 priv->tx_bufs_dma_addr);
1063out_alloc_tx_bufs:
1064 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1065 priv->rx_ring_dma_addr);
1066out_alloc_rx_ring:
1067 return err;
1068}
1069
1070static int sc92031_stop(struct net_device *dev)
1071{
1072 struct sc92031_priv *priv = netdev_priv(dev);
1073 struct pci_dev *pdev = priv->pdev;
1074
1075 netif_tx_disable(dev);
1076
1077 /* Disable interrupts, stop Tx and Rx. */
1078 sc92031_disable_interrupts(dev);
1079
1080 spin_lock(&priv->lock);
1081
1082 _sc92031_disable_tx_rx(dev);
1083 _sc92031_tx_clear(dev);
1084 mmiowb();
1085
1086 spin_unlock(&priv->lock);
1087
1088 free_irq(pdev->irq, dev);
1089 pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1090 priv->tx_bufs_dma_addr);
1091 pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1092 priv->rx_ring_dma_addr);
1093
1094 return 0;
1095}
1096
1097static void sc92031_set_multicast_list(struct net_device *dev)
1098{
1099 struct sc92031_priv *priv = netdev_priv(dev);
1100
1101 spin_lock_bh(&priv->lock);
1102
1103 _sc92031_set_mar(dev);
1104 _sc92031_set_rx_config(dev);
1105 mmiowb();
1106
1107 spin_unlock_bh(&priv->lock);
1108}
1109
1110static void sc92031_tx_timeout(struct net_device *dev)
1111{
1112 struct sc92031_priv *priv = netdev_priv(dev);
1113
1114 /* Disable interrupts by clearing the interrupt mask.*/
1115 sc92031_disable_interrupts(dev);
1116
1117 spin_lock(&priv->lock);
1118
1119 priv->tx_timeouts++;
1120
1121 _sc92031_reset(dev);
1122 mmiowb();
1123
1124 spin_unlock(&priv->lock);
1125
1126 /* enable interrupts */
1127 sc92031_enable_interrupts(dev);
1128
1129 if (netif_carrier_ok(dev))
1130 netif_wake_queue(dev);
1131}
1132
1133#ifdef CONFIG_NET_POLL_CONTROLLER
1134static void sc92031_poll_controller(struct net_device *dev)
1135{
1136 disable_irq(dev->irq);
1137 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1138 sc92031_tasklet((unsigned long)dev);
1139 enable_irq(dev->irq);
1140}
1141#endif
1142
1143static int sc92031_ethtool_get_settings(struct net_device *dev,
1144 struct ethtool_cmd *cmd)
1145{
1146 struct sc92031_priv *priv = netdev_priv(dev);
1147 void __iomem *port_base = priv->port_base;
1148 u8 phy_address;
1149 u32 phy_ctrl;
1150 u16 output_status;
1151
1152 spin_lock_bh(&priv->lock);
1153
1154 phy_address = ioread32(port_base + Miicmd1) >> 27;
1155 phy_ctrl = ioread32(port_base + PhyCtrl);
1156
1157 output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1158 _sc92031_mii_scan(port_base);
1159 mmiowb();
1160
1161 spin_unlock_bh(&priv->lock);
1162
1163 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1164 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1165 | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1166
1167 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1168
1169 if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1170 == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1171 cmd->advertising |= ADVERTISED_Autoneg;
1172
1173 if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1174 cmd->advertising |= ADVERTISED_10baseT_Half;
1175
1176 if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1177 == (PhyCtrlSpd10 | PhyCtrlDux))
1178 cmd->advertising |= ADVERTISED_10baseT_Full;
1179
1180 if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1181 cmd->advertising |= ADVERTISED_100baseT_Half;
1182
1183 if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1184 == (PhyCtrlSpd100 | PhyCtrlDux))
1185 cmd->advertising |= ADVERTISED_100baseT_Full;
1186
1187 if (phy_ctrl & PhyCtrlAne)
1188 cmd->advertising |= ADVERTISED_Autoneg;
1189
1190 cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
1191 cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1192 cmd->port = PORT_MII;
1193 cmd->phy_address = phy_address;
1194 cmd->transceiver = XCVR_INTERNAL;
1195 cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1196
1197 return 0;
1198}
1199
1200static int sc92031_ethtool_set_settings(struct net_device *dev,
1201 struct ethtool_cmd *cmd)
1202{
1203 struct sc92031_priv *priv = netdev_priv(dev);
1204 void __iomem *port_base = priv->port_base;
1205 u32 phy_ctrl;
1206 u32 old_phy_ctrl;
1207
1208 if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100))
1209 return -EINVAL;
1210 if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1211 return -EINVAL;
1212 if (!(cmd->port == PORT_MII))
1213 return -EINVAL;
1214 if (!(cmd->phy_address == 0x1f))
1215 return -EINVAL;
1216 if (!(cmd->transceiver == XCVR_INTERNAL))
1217 return -EINVAL;
1218 if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1219 return -EINVAL;
1220
1221 if (cmd->autoneg == AUTONEG_ENABLE) {
1222 if (!(cmd->advertising & (ADVERTISED_Autoneg
1223 | ADVERTISED_100baseT_Full
1224 | ADVERTISED_100baseT_Half
1225 | ADVERTISED_10baseT_Full
1226 | ADVERTISED_10baseT_Half)))
1227 return -EINVAL;
1228
1229 phy_ctrl = PhyCtrlAne;
1230
1231 // FIXME: I'm not sure what the original code was trying to do
1232 if (cmd->advertising & ADVERTISED_Autoneg)
1233 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1234 if (cmd->advertising & ADVERTISED_100baseT_Full)
1235 phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1236 if (cmd->advertising & ADVERTISED_100baseT_Half)
1237 phy_ctrl |= PhyCtrlSpd100;
1238 if (cmd->advertising & ADVERTISED_10baseT_Full)
1239 phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1240 if (cmd->advertising & ADVERTISED_10baseT_Half)
1241 phy_ctrl |= PhyCtrlSpd10;
1242 } else {
1243 // FIXME: Whole branch guessed
1244 phy_ctrl = 0;
1245
1246 if (cmd->speed == SPEED_10)
1247 phy_ctrl |= PhyCtrlSpd10;
1248 else /* cmd->speed == SPEED_100 */
1249 phy_ctrl |= PhyCtrlSpd100;
1250
1251 if (cmd->duplex == DUPLEX_FULL)
1252 phy_ctrl |= PhyCtrlDux;
1253 }
1254
1255 spin_lock_bh(&priv->lock);
1256
1257 old_phy_ctrl = ioread32(port_base + PhyCtrl);
1258 phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1259 | PhyCtrlSpd100 | PhyCtrlSpd10);
1260 if (phy_ctrl != old_phy_ctrl)
1261 iowrite32(phy_ctrl, port_base + PhyCtrl);
1262
1263 spin_unlock_bh(&priv->lock);
1264
1265 return 0;
1266}
1267
1268static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1269 struct ethtool_drvinfo *drvinfo)
1270{
1271 struct sc92031_priv *priv = netdev_priv(dev);
1272 struct pci_dev *pdev = priv->pdev;
1273
1274 strcpy(drvinfo->driver, SC92031_NAME);
1275 strcpy(drvinfo->version, SC92031_VERSION);
1276 strcpy(drvinfo->bus_info, pci_name(pdev));
1277}
1278
1279static void sc92031_ethtool_get_wol(struct net_device *dev,
1280 struct ethtool_wolinfo *wolinfo)
1281{
1282 struct sc92031_priv *priv = netdev_priv(dev);
1283 void __iomem *port_base = priv->port_base;
1284 u32 pm_config;
1285
1286 spin_lock_bh(&priv->lock);
1287 pm_config = ioread32(port_base + PMConfig);
1288 spin_unlock_bh(&priv->lock);
1289
1290 // FIXME: Guessed
1291 wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1292 | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1293 wolinfo->wolopts = 0;
1294
1295 if (pm_config & PM_LinkUp)
1296 wolinfo->wolopts |= WAKE_PHY;
1297
1298 if (pm_config & PM_Magic)
1299 wolinfo->wolopts |= WAKE_MAGIC;
1300
1301 if (pm_config & PM_WakeUp)
1302 // FIXME: Guessed
1303 wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1304}
1305
1306static int sc92031_ethtool_set_wol(struct net_device *dev,
1307 struct ethtool_wolinfo *wolinfo)
1308{
1309 struct sc92031_priv *priv = netdev_priv(dev);
1310 void __iomem *port_base = priv->port_base;
1311 u32 pm_config;
1312
1313 spin_lock_bh(&priv->lock);
1314
1315 pm_config = ioread32(port_base + PMConfig)
1316 & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1317
1318 if (wolinfo->wolopts & WAKE_PHY)
1319 pm_config |= PM_LinkUp;
1320
1321 if (wolinfo->wolopts & WAKE_MAGIC)
1322 pm_config |= PM_Magic;
1323
1324 // FIXME: Guessed
1325 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1326 pm_config |= PM_WakeUp;
1327
1328 priv->pm_config = pm_config;
1329 iowrite32(pm_config, port_base + PMConfig);
1330 mmiowb();
1331
1332 spin_unlock_bh(&priv->lock);
1333
1334 return 0;
1335}
1336
1337static int sc92031_ethtool_nway_reset(struct net_device *dev)
1338{
1339 int err = 0;
1340 struct sc92031_priv *priv = netdev_priv(dev);
1341 void __iomem *port_base = priv->port_base;
1342 u16 bmcr;
1343
1344 spin_lock_bh(&priv->lock);
1345
1346 bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1347 if (!(bmcr & BMCR_ANENABLE)) {
1348 err = -EINVAL;
1349 goto out;
1350 }
1351
1352 _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1353
1354out:
1355 _sc92031_mii_scan(port_base);
1356 mmiowb();
1357
1358 spin_unlock_bh(&priv->lock);
1359
1360 return err;
1361}
1362
1363static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1364 "tx_timeout",
1365 "rx_loss",
1366};
1367
1368static void sc92031_ethtool_get_strings(struct net_device *dev,
1369 u32 stringset, u8 *data)
1370{
1371 if (stringset == ETH_SS_STATS)
1372 memcpy(data, sc92031_ethtool_stats_strings,
1373 SILAN_STATS_NUM * ETH_GSTRING_LEN);
1374}
1375
1376static int sc92031_ethtool_get_stats_count(struct net_device *dev)
1377{
1378 return SILAN_STATS_NUM;
1379}
1380
1381static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1382 struct ethtool_stats *stats, u64 *data)
1383{
1384 struct sc92031_priv *priv = netdev_priv(dev);
1385
1386 spin_lock_bh(&priv->lock);
1387 data[0] = priv->tx_timeouts;
1388 data[1] = priv->rx_loss;
1389 spin_unlock_bh(&priv->lock);
1390}
1391
1392static struct ethtool_ops sc92031_ethtool_ops = {
1393 .get_settings = sc92031_ethtool_get_settings,
1394 .set_settings = sc92031_ethtool_set_settings,
1395 .get_drvinfo = sc92031_ethtool_get_drvinfo,
1396 .get_wol = sc92031_ethtool_get_wol,
1397 .set_wol = sc92031_ethtool_set_wol,
1398 .nway_reset = sc92031_ethtool_nway_reset,
1399 .get_link = ethtool_op_get_link,
1400 .get_tx_csum = ethtool_op_get_tx_csum,
1401 .get_sg = ethtool_op_get_sg,
1402 .get_tso = ethtool_op_get_tso,
1403 .get_strings = sc92031_ethtool_get_strings,
1404 .get_stats_count = sc92031_ethtool_get_stats_count,
1405 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1406 .get_perm_addr = ethtool_op_get_perm_addr,
1407 .get_ufo = ethtool_op_get_ufo,
1408};
1409
1410static int __devinit sc92031_probe(struct pci_dev *pdev,
1411 const struct pci_device_id *id)
1412{
1413 int err;
1414 void __iomem* port_base;
1415 struct net_device *dev;
1416 struct sc92031_priv *priv;
1417 u32 mac0, mac1;
1418
1419 err = pci_enable_device(pdev);
1420 if (unlikely(err < 0))
1421 goto out_enable_device;
1422
1423 pci_set_master(pdev);
1424
1425 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1426 if (unlikely(err < 0))
1427 goto out_set_dma_mask;
1428
1429 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1430 if (unlikely(err < 0))
1431 goto out_set_dma_mask;
1432
1433 err = pci_request_regions(pdev, SC92031_NAME);
1434 if (unlikely(err < 0))
1435 goto out_request_regions;
1436
1437 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1438 if (unlikely(!port_base)) {
1439 err = -EIO;
1440 goto out_iomap;
1441 }
1442
1443 dev = alloc_etherdev(sizeof(struct sc92031_priv));
1444 if (unlikely(!dev)) {
1445 err = -ENOMEM;
1446 goto out_alloc_etherdev;
1447 }
1448
1449 pci_set_drvdata(pdev, dev);
1450
1451#if SC92031_USE_BAR == 0
1452 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1453 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1454#elif SC92031_USE_BAR == 1
1455 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1456#endif
1457 dev->irq = pdev->irq;
1458
1459 /* faked with skb_copy_and_csum_dev */
1460 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
1461
1462 dev->get_stats = sc92031_get_stats;
1463 dev->ethtool_ops = &sc92031_ethtool_ops;
1464 dev->hard_start_xmit = sc92031_start_xmit;
1465 dev->watchdog_timeo = TX_TIMEOUT;
1466 dev->open = sc92031_open;
1467 dev->stop = sc92031_stop;
1468 dev->set_multicast_list = sc92031_set_multicast_list;
1469 dev->tx_timeout = sc92031_tx_timeout;
1470#ifdef CONFIG_NET_POLL_CONTROLLER
1471 dev->poll_controller = sc92031_poll_controller;
1472#endif
1473
1474 priv = netdev_priv(dev);
1475 spin_lock_init(&priv->lock);
1476 priv->port_base = port_base;
1477 priv->pdev = pdev;
1478 tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1479 /* Fudge tasklet count so the call to sc92031_enable_interrupts at
1480 * sc92031_open will work correctly */
1481 tasklet_disable_nosync(&priv->tasklet);
1482
1483 /* PCI PM Wakeup */
1484 iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1485
1486 mac0 = ioread32(port_base + MAC0);
1487 mac1 = ioread32(port_base + MAC0 + 4);
1488 dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1489 dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1490 dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1491 dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1492 dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1493 dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1494
1495 err = register_netdev(dev);
1496 if (err < 0)
1497 goto out_register_netdev;
1498
1499 return 0;
1500
1501out_register_netdev:
1502 free_netdev(dev);
1503out_alloc_etherdev:
1504 pci_iounmap(pdev, port_base);
1505out_iomap:
1506 pci_release_regions(pdev);
1507out_request_regions:
1508out_set_dma_mask:
1509 pci_disable_device(pdev);
1510out_enable_device:
1511 return err;
1512}
1513
1514static void __devexit sc92031_remove(struct pci_dev *pdev)
1515{
1516 struct net_device *dev = pci_get_drvdata(pdev);
1517 struct sc92031_priv *priv = netdev_priv(dev);
1518 void __iomem* port_base = priv->port_base;
1519
1520 unregister_netdev(dev);
1521 free_netdev(dev);
1522 pci_iounmap(pdev, port_base);
1523 pci_release_regions(pdev);
1524 pci_disable_device(pdev);
1525}
1526
1527static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1528{
1529 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct sc92031_priv *priv = netdev_priv(dev);
1531
1532 pci_save_state(pdev);
1533
1534 if (!netif_running(dev))
1535 goto out;
1536
1537 netif_device_detach(dev);
1538
1539 /* Disable interrupts, stop Tx and Rx. */
1540 sc92031_disable_interrupts(dev);
1541
1542 spin_lock(&priv->lock);
1543
1544 _sc92031_disable_tx_rx(dev);
1545 _sc92031_tx_clear(dev);
1546 mmiowb();
1547
1548 spin_unlock(&priv->lock);
1549
1550out:
1551 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1552
1553 return 0;
1554}
1555
1556static int sc92031_resume(struct pci_dev *pdev)
1557{
1558 struct net_device *dev = pci_get_drvdata(pdev);
1559 struct sc92031_priv *priv = netdev_priv(dev);
1560
1561 pci_restore_state(pdev);
1562 pci_set_power_state(pdev, PCI_D0);
1563
1564 if (!netif_running(dev))
1565 goto out;
1566
1567 /* Interrupts already disabled by sc92031_suspend */
1568 spin_lock(&priv->lock);
1569
1570 _sc92031_reset(dev);
1571 mmiowb();
1572
1573 spin_unlock(&priv->lock);
1574 sc92031_enable_interrupts(dev);
1575
1576 netif_device_attach(dev);
1577
1578 if (netif_carrier_ok(dev))
1579 netif_wake_queue(dev);
1580 else
1581 netif_tx_disable(dev);
1582
1583out:
1584 return 0;
1585}
1586
1587static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
1588 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_SC92031) },
1589 { PCI_DEVICE(PCI_VENDOR_ID_SILAN, PCI_DEVICE_ID_SILAN_8139D) },
1590 { 0, }
1591};
1592MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1593
1594static struct pci_driver sc92031_pci_driver = {
1595 .name = SC92031_NAME,
1596 .id_table = sc92031_pci_device_id_table,
1597 .probe = sc92031_probe,
1598 .remove = __devexit_p(sc92031_remove),
1599 .suspend = sc92031_suspend,
1600 .resume = sc92031_resume,
1601};
1602
1603static int __init sc92031_init(void)
1604{
1605 printk(KERN_INFO SC92031_DESCRIPTION " " SC92031_VERSION "\n");
1606 return pci_register_driver(&sc92031_pci_driver);
1607}
1608
1609static void __exit sc92031_exit(void)
1610{
1611 pci_unregister_driver(&sc92031_pci_driver);
1612}
1613
1614module_init(sc92031_init);
1615module_exit(sc92031_exit);
1616
1617MODULE_LICENSE("GPL");
1618MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1619MODULE_DESCRIPTION(SC92031_DESCRIPTION);
1620MODULE_VERSION(SC92031_VERSION);
diff --git a/drivers/net/sk_mca.c b/drivers/net/sk_mca.c
deleted file mode 100644
index 96e06c51b75d..000000000000
--- a/drivers/net/sk_mca.c
+++ /dev/null
@@ -1,1216 +0,0 @@
1/*
2net-3-driver for the SKNET MCA-based cards
3
4This is an extension to the Linux operating system, and is covered by the
5same GNU General Public License that covers that work.
6
7Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
8 alfred.arnold@lancom.de)
9
10This driver is based both on the 3C523 driver and the SK_G16 driver.
11
12paper sources:
13 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
14 Hans-Peter Messmer for the basic Microchannel stuff
15
16 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
17 for help on Ethernet driver programming
18
19 'Ethernet/IEEE 802.3 Family 1992 World Network Data Book/Handbook' by AMD
20 for documentation on the AM7990 LANCE
21
22 'SKNET Personal Technisches Manual', Version 1.2 by Schneider&Koch
23 for documentation on the Junior board
24
25 'SK-NET MC2+ Technical Manual", Version 1.1 by Schneider&Koch for
26 documentation on the MC2 bord
27
28 A big thank you to the S&K support for providing me so quickly with
29 documentation!
30
31 Also see http://www.syskonnect.com/
32
33 Missing things:
34
35 -> set debug level via ioctl instead of compile-time switches
36 -> I didn't follow the development of the 2.1.x kernels, so my
37 assumptions about which things changed with which kernel version
38 are probably nonsense
39
40History:
41 May 16th, 1999
42 startup
43 May 22st, 1999
44 added private structure, methods
45 begun building data structures in RAM
46 May 23nd, 1999
47 can receive frames, send frames
48 May 24th, 1999
49 modularized initialization of LANCE
50 loadable as module
51 still Tx problem :-(
52 May 26th, 1999
53 MC2 works
54 support for multiple devices
55 display media type for MC2+
56 May 28th, 1999
57 fixed problem in GetLANCE leaving interrupts turned off
58 increase TX queue to 4 packets to improve send performance
59 May 29th, 1999
60 a few corrections in statistics, caught rcvr overruns
61 reinitialization of LANCE/board in critical situations
62 MCA info implemented
63 implemented LANCE multicast filter
64 Jun 6th, 1999
65 additions for Linux 2.2
66 Dec 25th, 1999
67 unfortunately there seem to be newer MC2+ boards that react
68 on IRQ 3/5/9/10 instead of 3/5/10/11, so we have to autoprobe
69 in questionable cases...
70 Dec 28th, 1999
71 integrated patches from David Weinehall & Bill Wendling for 2.3
72 kernels (isa_...functions). Things are defined in a way that
73 it still works with 2.0.x 8-)
74 Dec 30th, 1999
75 added handling of the remaining interrupt conditions. That
76 should cure the spurious hangs.
77 Jan 30th, 2000
78 newer kernels automatically probe more than one board, so the
79 'startslot' as a variable is also needed here
80 June 1st, 2000
81 added changes for recent 2.3 kernels
82
83 *************************************************************************/
84
85#include <linux/kernel.h>
86#include <linux/string.h>
87#include <linux/errno.h>
88#include <linux/ioport.h>
89#include <linux/slab.h>
90#include <linux/interrupt.h>
91#include <linux/delay.h>
92#include <linux/time.h>
93#include <linux/mca-legacy.h>
94#include <linux/init.h>
95#include <linux/module.h>
96#include <linux/netdevice.h>
97#include <linux/etherdevice.h>
98#include <linux/skbuff.h>
99#include <linux/bitops.h>
100
101#include <asm/processor.h>
102#include <asm/io.h>
103
104#define _SK_MCA_DRIVER_
105#include "sk_mca.h"
106
107/* ------------------------------------------------------------------------
108 * global static data - not more since we can handle multiple boards and
109 * have to pack all state info into the device struct!
110 * ------------------------------------------------------------------------ */
111
112static char *MediaNames[Media_Count] =
113 { "10Base2", "10BaseT", "10Base5", "Unknown" };
114
115static unsigned char poly[] =
116 { 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0,
117 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0
118};
119
120/* ------------------------------------------------------------------------
121 * private subfunctions
122 * ------------------------------------------------------------------------ */
123
124/* dump parts of shared memory - only needed during debugging */
125
126#ifdef DEBUG
127static void dumpmem(struct net_device *dev, u32 start, u32 len)
128{
129 skmca_priv *priv = netdev_priv(dev);
130 int z;
131
132 for (z = 0; z < len; z++) {
133 if ((z & 15) == 0)
134 printk("%04x:", z);
135 printk(" %02x", readb(priv->base + start + z));
136 if ((z & 15) == 15)
137 printk("\n");
138 }
139}
140
141/* print exact time - ditto */
142
143static void PrTime(void)
144{
145 struct timeval tv;
146
147 do_gettimeofday(&tv);
148 printk("%9d:%06d: ", tv.tv_sec, tv.tv_usec);
149}
150#endif
151
152/* deduce resources out of POS registers */
153
154static void __init getaddrs(int slot, int junior, int *base, int *irq,
155 skmca_medium * medium)
156{
157 u_char pos0, pos1, pos2;
158
159 if (junior) {
160 pos0 = mca_read_stored_pos(slot, 2);
161 *base = ((pos0 & 0x0e) << 13) + 0xc0000;
162 *irq = ((pos0 & 0x10) >> 4) + 10;
163 *medium = Media_Unknown;
164 } else {
165 /* reset POS 104 Bits 0+1 so the shared memory region goes to the
166 configured area between 640K and 1M. Afterwards, enable the MC2.
167 I really don't know what rode SK to do this... */
168
169 mca_write_pos(slot, 4,
170 mca_read_stored_pos(slot, 4) & 0xfc);
171 mca_write_pos(slot, 2,
172 mca_read_stored_pos(slot, 2) | 0x01);
173
174 pos1 = mca_read_stored_pos(slot, 3);
175 pos2 = mca_read_stored_pos(slot, 4);
176 *base = ((pos1 & 0x07) << 14) + 0xc0000;
177 switch (pos2 & 0x0c) {
178 case 0:
179 *irq = 3;
180 break;
181 case 4:
182 *irq = 5;
183 break;
184 case 8:
185 *irq = -10;
186 break;
187 case 12:
188 *irq = -11;
189 break;
190 }
191 *medium = (pos2 >> 6) & 3;
192 }
193}
194
195/* check for both cards:
196 When the MC2 is turned off, it was configured for more than 15MB RAM,
197 is disabled and won't get detected using the standard probe. We
198 therefore have to scan the slots manually :-( */
199
200static int __init dofind(int *junior, int firstslot)
201{
202 int slot;
203 unsigned int id;
204
205 for (slot = firstslot; slot < MCA_MAX_SLOT_NR; slot++) {
206 id = mca_read_stored_pos(slot, 0)
207 + (((unsigned int) mca_read_stored_pos(slot, 1)) << 8);
208
209 *junior = 0;
210 if (id == SKNET_MCA_ID)
211 return slot;
212 *junior = 1;
213 if (id == SKNET_JUNIOR_MCA_ID)
214 return slot;
215 }
216 return MCA_NOTFOUND;
217}
218
219/* reset the whole board */
220
221static void ResetBoard(struct net_device *dev)
222{
223 skmca_priv *priv = netdev_priv(dev);
224
225 writeb(CTRL_RESET_ON, priv->ctrladdr);
226 udelay(10);
227 writeb(CTRL_RESET_OFF, priv->ctrladdr);
228}
229
230/* wait for LANCE interface to become not busy */
231
232static int WaitLANCE(struct net_device *dev)
233{
234 skmca_priv *priv = netdev_priv(dev);
235 int t = 0;
236
237 while ((readb(priv->ctrladdr) & STAT_IO_BUSY) ==
238 STAT_IO_BUSY) {
239 udelay(1);
240 if (++t > 1000) {
241 printk("%s: LANCE access timeout", dev->name);
242 return 0;
243 }
244 }
245
246 return 1;
247}
248
249/* set LANCE register - must be atomic */
250
251static void SetLANCE(struct net_device *dev, u16 addr, u16 value)
252{
253 skmca_priv *priv = netdev_priv(dev);
254 unsigned long flags;
255
256 /* disable interrupts */
257
258 spin_lock_irqsave(&priv->lock, flags);
259
260 /* wait until no transfer is pending */
261
262 WaitLANCE(dev);
263
264 /* transfer register address to RAP */
265
266 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
267 writew(addr, priv->ioregaddr);
268 writeb(IOCMD_GO, priv->cmdaddr);
269 udelay(1);
270 WaitLANCE(dev);
271
272 /* transfer data to register */
273
274 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_DATA, priv->ctrladdr);
275 writew(value, priv->ioregaddr);
276 writeb(IOCMD_GO, priv->cmdaddr);
277 udelay(1);
278 WaitLANCE(dev);
279
280 /* reenable interrupts */
281
282 spin_unlock_irqrestore(&priv->lock, flags);
283}
284
285/* get LANCE register */
286
287static u16 GetLANCE(struct net_device *dev, u16 addr)
288{
289 skmca_priv *priv = netdev_priv(dev);
290 unsigned long flags;
291 unsigned int res;
292
293 /* disable interrupts */
294
295 spin_lock_irqsave(&priv->lock, flags);
296
297 /* wait until no transfer is pending */
298
299 WaitLANCE(dev);
300
301 /* transfer register address to RAP */
302
303 writeb(CTRL_RESET_OFF | CTRL_RW_WRITE | CTRL_ADR_RAP, priv->ctrladdr);
304 writew(addr, priv->ioregaddr);
305 writeb(IOCMD_GO, priv->cmdaddr);
306 udelay(1);
307 WaitLANCE(dev);
308
309 /* transfer data from register */
310
311 writeb(CTRL_RESET_OFF | CTRL_RW_READ | CTRL_ADR_DATA, priv->ctrladdr);
312 writeb(IOCMD_GO, priv->cmdaddr);
313 udelay(1);
314 WaitLANCE(dev);
315 res = readw(priv->ioregaddr);
316
317 /* reenable interrupts */
318
319 spin_unlock_irqrestore(&priv->lock, flags);
320
321 return res;
322}
323
324/* build up descriptors in shared RAM */
325
326static void InitDscrs(struct net_device *dev)
327{
328 skmca_priv *priv = netdev_priv(dev);
329 u32 bufaddr;
330
331 /* Set up Tx descriptors. The board has only 16K RAM so bits 16..23
332 are always 0. */
333
334 bufaddr = RAM_DATABASE;
335 {
336 LANCE_TxDescr descr;
337 int z;
338
339 for (z = 0; z < TXCOUNT; z++) {
340 descr.LowAddr = bufaddr;
341 descr.Flags = 0;
342 descr.Len = 0xf000;
343 descr.Status = 0;
344 memcpy_toio(priv->base + RAM_TXBASE +
345 (z * sizeof(LANCE_TxDescr)), &descr,
346 sizeof(LANCE_TxDescr));
347 memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
348 bufaddr += RAM_BUFSIZE;
349 }
350 }
351
352 /* do the same for the Rx descriptors */
353
354 {
355 LANCE_RxDescr descr;
356 int z;
357
358 for (z = 0; z < RXCOUNT; z++) {
359 descr.LowAddr = bufaddr;
360 descr.Flags = RXDSCR_FLAGS_OWN;
361 descr.MaxLen = -RAM_BUFSIZE;
362 descr.Len = 0;
363 memcpy_toio(priv->base + RAM_RXBASE +
364 (z * sizeof(LANCE_RxDescr)), &descr,
365 sizeof(LANCE_RxDescr));
366 memset_io(priv->base + bufaddr, 0, RAM_BUFSIZE);
367 bufaddr += RAM_BUFSIZE;
368 }
369 }
370}
371
372/* calculate the hash bit position for a given multicast address
373 taken more or less directly from the AMD datasheet... */
374
375static void UpdateCRC(unsigned char *CRC, int bit)
376{
377 int j;
378
379 /* shift CRC one bit */
380
381 memmove(CRC + 1, CRC, 32 * sizeof(unsigned char));
382 CRC[0] = 0;
383
384 /* if bit XOR controlbit = 1, set CRC = CRC XOR polynomial */
385
386 if (bit ^ CRC[32])
387 for (j = 0; j < 32; j++)
388 CRC[j] ^= poly[j];
389}
390
391static unsigned int GetHash(char *address)
392{
393 unsigned char CRC[33];
394 int i, byte, hashcode;
395
396 /* a multicast address has bit 0 in the first byte set */
397
398 if ((address[0] & 1) == 0)
399 return -1;
400
401 /* initialize CRC */
402
403 memset(CRC, 1, sizeof(CRC));
404
405 /* loop through address bits */
406
407 for (byte = 0; byte < 6; byte++)
408 for (i = 0; i < 8; i++)
409 UpdateCRC(CRC, (address[byte] >> i) & 1);
410
411 /* hashcode is the 6 least significant bits of the CRC */
412
413 hashcode = 0;
414 for (i = 0; i < 6; i++)
415 hashcode = (hashcode << 1) + CRC[i];
416 return hashcode;
417}
418
419/* feed ready-built initialization block into LANCE */
420
421static void InitLANCE(struct net_device *dev)
422{
423 skmca_priv *priv = netdev_priv(dev);
424
425 /* build up descriptors. */
426
427 InitDscrs(dev);
428
429 /* next RX descriptor to be read is the first one. Since the LANCE
430 will start from the beginning after initialization, we have to
431 reset out pointers too. */
432
433 priv->nextrx = 0;
434
435 /* no TX descriptors active */
436
437 priv->nexttxput = priv->nexttxdone = priv->txbusy = 0;
438
439 /* set up the LANCE bus control register - constant for SKnet boards */
440
441 SetLANCE(dev, LANCE_CSR3,
442 CSR3_BSWAP_OFF | CSR3_ALE_LOW | CSR3_BCON_HOLD);
443
444 /* write address of initialization block into LANCE */
445
446 SetLANCE(dev, LANCE_CSR1, RAM_INITBASE & 0xffff);
447 SetLANCE(dev, LANCE_CSR2, (RAM_INITBASE >> 16) & 0xff);
448
449 /* we don't get ready until the LANCE has read the init block */
450
451 netif_stop_queue(dev);
452
453 /* let LANCE read the initialization block. LANCE is ready
454 when we receive the corresponding interrupt. */
455
456 SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_INIT);
457}
458
459/* stop the LANCE so we can reinitialize it */
460
461static void StopLANCE(struct net_device *dev)
462{
463 /* can't take frames any more */
464
465 netif_stop_queue(dev);
466
467 /* disable interrupts, stop it */
468
469 SetLANCE(dev, LANCE_CSR0, CSR0_STOP);
470}
471
472/* initialize card and LANCE for proper operation */
473
474static void InitBoard(struct net_device *dev)
475{
476 skmca_priv *priv = netdev_priv(dev);
477 LANCE_InitBlock block;
478
479 /* Lay out the shared RAM - first we create the init block for the LANCE.
480 We do not overwrite it later because we need it again when we switch
481 promiscous mode on/off. */
482
483 block.Mode = 0;
484 if (dev->flags & IFF_PROMISC)
485 block.Mode |= LANCE_INIT_PROM;
486 memcpy(block.PAdr, dev->dev_addr, 6);
487 memset(block.LAdrF, 0, sizeof(block.LAdrF));
488 block.RdrP = (RAM_RXBASE & 0xffffff) | (LRXCOUNT << 29);
489 block.TdrP = (RAM_TXBASE & 0xffffff) | (LTXCOUNT << 29);
490
491 memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
492
493 /* initialize LANCE. Implicitly sets up other structures in RAM. */
494
495 InitLANCE(dev);
496}
497
498/* deinitialize card and LANCE */
499
500static void DeinitBoard(struct net_device *dev)
501{
502 /* stop LANCE */
503
504 StopLANCE(dev);
505
506 /* reset board */
507
508 ResetBoard(dev);
509}
510
511/* probe for device's irq */
512
513static int __init ProbeIRQ(struct net_device *dev)
514{
515 unsigned long imaskval, njiffies, irq;
516 u16 csr0val;
517
518 /* enable all interrupts */
519
520 imaskval = probe_irq_on();
521
522 /* initialize the board. Wait for interrupt 'Initialization done'. */
523
524 ResetBoard(dev);
525 InitBoard(dev);
526
527 njiffies = jiffies + HZ;
528 do {
529 csr0val = GetLANCE(dev, LANCE_CSR0);
530 }
531 while (((csr0val & CSR0_IDON) == 0) && (jiffies != njiffies));
532
533 /* turn of interrupts again */
534
535 irq = probe_irq_off(imaskval);
536
537 /* if we found something, ack the interrupt */
538
539 if (irq)
540 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_IDON);
541
542 /* back to idle state */
543
544 DeinitBoard(dev);
545
546 return irq;
547}
548
549/* ------------------------------------------------------------------------
550 * interrupt handler(s)
551 * ------------------------------------------------------------------------ */
552
553/* LANCE has read initialization block -> start it */
554
555static u16 irqstart_handler(struct net_device *dev, u16 oldcsr0)
556{
557 /* now we're ready to transmit */
558
559 netif_wake_queue(dev);
560
561 /* reset IDON bit, start LANCE */
562
563 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_IDON | CSR0_STRT);
564 return GetLANCE(dev, LANCE_CSR0);
565}
566
567/* did we lose blocks due to a FIFO overrun ? */
568
569static u16 irqmiss_handler(struct net_device *dev, u16 oldcsr0)
570{
571 skmca_priv *priv = netdev_priv(dev);
572
573 /* update statistics */
574
575 priv->stat.rx_fifo_errors++;
576
577 /* reset MISS bit */
578
579 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_MISS);
580 return GetLANCE(dev, LANCE_CSR0);
581}
582
583/* receive interrupt */
584
585static u16 irqrx_handler(struct net_device *dev, u16 oldcsr0)
586{
587 skmca_priv *priv = netdev_priv(dev);
588 LANCE_RxDescr descr;
589 unsigned int descraddr;
590
591 /* run through queue until we reach a descriptor we do not own */
592
593 descraddr = RAM_RXBASE + (priv->nextrx * sizeof(LANCE_RxDescr));
594 while (1) {
595 /* read descriptor */
596 memcpy_fromio(&descr, priv->base + descraddr,
597 sizeof(LANCE_RxDescr));
598
599 /* if we reach a descriptor we do not own, we're done */
600 if ((descr.Flags & RXDSCR_FLAGS_OWN) != 0)
601 break;
602
603#ifdef DEBUG
604 PrTime();
605 printk("Receive packet on descr %d len %d\n", priv->nextrx,
606 descr.Len);
607#endif
608
609 /* erroneous packet ? */
610 if ((descr.Flags & RXDSCR_FLAGS_ERR) != 0) {
611 priv->stat.rx_errors++;
612 if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
613 priv->stat.rx_crc_errors++;
614 else if ((descr.Flags & RXDSCR_FLAGS_CRC) != 0)
615 priv->stat.rx_frame_errors++;
616 else if ((descr.Flags & RXDSCR_FLAGS_OFLO) != 0)
617 priv->stat.rx_fifo_errors++;
618 }
619
620 /* good packet ? */
621 else {
622 struct sk_buff *skb;
623
624 skb = dev_alloc_skb(descr.Len + 2);
625 if (skb == NULL)
626 priv->stat.rx_dropped++;
627 else {
628 memcpy_fromio(skb_put(skb, descr.Len),
629 priv->base +
630 descr.LowAddr, descr.Len);
631 skb->dev = dev;
632 skb->protocol = eth_type_trans(skb, dev);
633 skb->ip_summed = CHECKSUM_NONE;
634 priv->stat.rx_packets++;
635 priv->stat.rx_bytes += descr.Len;
636 netif_rx(skb);
637 dev->last_rx = jiffies;
638 }
639 }
640
641 /* give descriptor back to LANCE */
642 descr.Len = 0;
643 descr.Flags |= RXDSCR_FLAGS_OWN;
644
645 /* update descriptor in shared RAM */
646 memcpy_toio(priv->base + descraddr, &descr,
647 sizeof(LANCE_RxDescr));
648
649 /* go to next descriptor */
650 priv->nextrx++;
651 descraddr += sizeof(LANCE_RxDescr);
652 if (priv->nextrx >= RXCOUNT) {
653 priv->nextrx = 0;
654 descraddr = RAM_RXBASE;
655 }
656 }
657
658 /* reset RINT bit */
659
660 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_RINT);
661 return GetLANCE(dev, LANCE_CSR0);
662}
663
664/* transmit interrupt */
665
666static u16 irqtx_handler(struct net_device *dev, u16 oldcsr0)
667{
668 skmca_priv *priv = netdev_priv(dev);
669 LANCE_TxDescr descr;
670 unsigned int descraddr;
671
672 /* check descriptors at most until no busy one is left */
673
674 descraddr =
675 RAM_TXBASE + (priv->nexttxdone * sizeof(LANCE_TxDescr));
676 while (priv->txbusy > 0) {
677 /* read descriptor */
678 memcpy_fromio(&descr, priv->base + descraddr,
679 sizeof(LANCE_TxDescr));
680
681 /* if the LANCE still owns this one, we've worked out all sent packets */
682 if ((descr.Flags & TXDSCR_FLAGS_OWN) != 0)
683 break;
684
685#ifdef DEBUG
686 PrTime();
687 printk("Send packet done on descr %d\n", priv->nexttxdone);
688#endif
689
690 /* update statistics */
691 if ((descr.Flags & TXDSCR_FLAGS_ERR) == 0) {
692 priv->stat.tx_packets++;
693 priv->stat.tx_bytes++;
694 } else {
695 priv->stat.tx_errors++;
696 if ((descr.Status & TXDSCR_STATUS_UFLO) != 0) {
697 priv->stat.tx_fifo_errors++;
698 InitLANCE(dev);
699 }
700 else
701 if ((descr.Status & TXDSCR_STATUS_LCOL) !=
702 0) priv->stat.tx_window_errors++;
703 else if ((descr.Status & TXDSCR_STATUS_LCAR) != 0)
704 priv->stat.tx_carrier_errors++;
705 else if ((descr.Status & TXDSCR_STATUS_RTRY) != 0)
706 priv->stat.tx_aborted_errors++;
707 }
708
709 /* go to next descriptor */
710 priv->nexttxdone++;
711 descraddr += sizeof(LANCE_TxDescr);
712 if (priv->nexttxdone >= TXCOUNT) {
713 priv->nexttxdone = 0;
714 descraddr = RAM_TXBASE;
715 }
716 priv->txbusy--;
717 }
718
719 /* reset TX interrupt bit */
720
721 SetLANCE(dev, LANCE_CSR0, oldcsr0 | CSR0_TINT);
722 oldcsr0 = GetLANCE(dev, LANCE_CSR0);
723
724 /* at least one descriptor is freed. Therefore we can accept
725 a new one */
726 /* inform upper layers we're in business again */
727
728 netif_wake_queue(dev);
729
730 return oldcsr0;
731}
732
733/* general interrupt entry */
734
735static irqreturn_t irq_handler(int irq, void *device)
736{
737 struct net_device *dev = (struct net_device *) device;
738 u16 csr0val;
739
740 /* read CSR0 to get interrupt cause */
741
742 csr0val = GetLANCE(dev, LANCE_CSR0);
743
744 /* in case we're not meant... */
745
746 if ((csr0val & CSR0_INTR) == 0)
747 return IRQ_NONE;
748
749#if 0
750 set_bit(LINK_STATE_RXSEM, &dev->state);
751#endif
752
753 /* loop through the interrupt bits until everything is clear */
754
755 do {
756 if ((csr0val & CSR0_IDON) != 0)
757 csr0val = irqstart_handler(dev, csr0val);
758 if ((csr0val & CSR0_RINT) != 0)
759 csr0val = irqrx_handler(dev, csr0val);
760 if ((csr0val & CSR0_MISS) != 0)
761 csr0val = irqmiss_handler(dev, csr0val);
762 if ((csr0val & CSR0_TINT) != 0)
763 csr0val = irqtx_handler(dev, csr0val);
764 if ((csr0val & CSR0_MERR) != 0) {
765 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_MERR);
766 csr0val = GetLANCE(dev, LANCE_CSR0);
767 }
768 if ((csr0val & CSR0_BABL) != 0) {
769 SetLANCE(dev, LANCE_CSR0, csr0val | CSR0_BABL);
770 csr0val = GetLANCE(dev, LANCE_CSR0);
771 }
772 }
773 while ((csr0val & CSR0_INTR) != 0);
774
775#if 0
776 clear_bit(LINK_STATE_RXSEM, &dev->state);
777#endif
778 return IRQ_HANDLED;
779}
780
781/* ------------------------------------------------------------------------
782 * driver methods
783 * ------------------------------------------------------------------------ */
784
785/* MCA info */
786
787static int skmca_getinfo(char *buf, int slot, void *d)
788{
789 int len = 0, i;
790 struct net_device *dev = (struct net_device *) d;
791 skmca_priv *priv;
792
793 /* can't say anything about an uninitialized device... */
794
795 if (dev == NULL)
796 return len;
797 priv = netdev_priv(dev);
798
799 /* print info */
800
801 len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
802 len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start,
803 dev->mem_end - 1);
804 len +=
805 sprintf(buf + len, "Transceiver: %s\n",
806 MediaNames[priv->medium]);
807 len += sprintf(buf + len, "Device: %s\n", dev->name);
808 len += sprintf(buf + len, "MAC address:");
809 for (i = 0; i < 6; i++)
810 len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
811 buf[len++] = '\n';
812 buf[len] = 0;
813
814 return len;
815}
816
817/* open driver. Means also initialization and start of LANCE */
818
819static int skmca_open(struct net_device *dev)
820{
821 int result;
822 skmca_priv *priv = netdev_priv(dev);
823
824 /* register resources - only necessary for IRQ */
825 result =
826 request_irq(priv->realirq, irq_handler,
827 IRQF_SHARED | IRQF_SAMPLE_RANDOM, "sk_mca", dev);
828 if (result != 0) {
829 printk("%s: failed to register irq %d\n", dev->name,
830 dev->irq);
831 return result;
832 }
833 dev->irq = priv->realirq;
834
835 /* set up the card and LANCE */
836
837 InitBoard(dev);
838
839 /* set up flags */
840
841 netif_start_queue(dev);
842
843 return 0;
844}
845
846/* close driver. Shut down board and free allocated resources */
847
848static int skmca_close(struct net_device *dev)
849{
850 /* turn off board */
851 DeinitBoard(dev);
852
853 /* release resources */
854 if (dev->irq != 0)
855 free_irq(dev->irq, dev);
856 dev->irq = 0;
857
858 return 0;
859}
860
861/* transmit a block. */
862
863static int skmca_tx(struct sk_buff *skb, struct net_device *dev)
864{
865 skmca_priv *priv = netdev_priv(dev);
866 LANCE_TxDescr descr;
867 unsigned int address;
868 int tmplen, retval = 0;
869 unsigned long flags;
870
871 /* if we get called with a NULL descriptor, the Ethernet layer thinks
872 our card is stuck an we should reset it. We'll do this completely: */
873
874 if (skb == NULL) {
875 DeinitBoard(dev);
876 InitBoard(dev);
877 return 0; /* don't try to free the block here ;-) */
878 }
879
880 /* is there space in the Tx queue ? If no, the upper layer gave us a
881 packet in spite of us not being ready and is really in trouble.
882 We'll do the dropping for him: */
883 if (priv->txbusy >= TXCOUNT) {
884 priv->stat.tx_dropped++;
885 retval = -EIO;
886 goto tx_done;
887 }
888
889 /* get TX descriptor */
890 address = RAM_TXBASE + (priv->nexttxput * sizeof(LANCE_TxDescr));
891 memcpy_fromio(&descr, priv->base + address, sizeof(LANCE_TxDescr));
892
893 /* enter packet length as 2s complement - assure minimum length */
894 tmplen = skb->len;
895 if (tmplen < 60)
896 tmplen = 60;
897 descr.Len = 65536 - tmplen;
898
899 /* copy filler into RAM - in case we're filling up...
900 we're filling a bit more than necessary, but that doesn't harm
901 since the buffer is far larger... */
902 if (tmplen > skb->len) {
903 char *fill = "NetBSD is a nice OS too! ";
904 unsigned int destoffs = 0, l = strlen(fill);
905
906 while (destoffs < tmplen) {
907 memcpy_toio(priv->base + descr.LowAddr +
908 destoffs, fill, l);
909 destoffs += l;
910 }
911 }
912
913 /* do the real data copying */
914 memcpy_toio(priv->base + descr.LowAddr, skb->data, skb->len);
915
916 /* hand descriptor over to LANCE - this is the first and last chunk */
917 descr.Flags =
918 TXDSCR_FLAGS_OWN | TXDSCR_FLAGS_STP | TXDSCR_FLAGS_ENP;
919
920#ifdef DEBUG
921 PrTime();
922 printk("Send packet on descr %d len %d\n", priv->nexttxput,
923 skb->len);
924#endif
925
926 /* one more descriptor busy */
927
928 spin_lock_irqsave(&priv->lock, flags);
929
930 priv->nexttxput++;
931 if (priv->nexttxput >= TXCOUNT)
932 priv->nexttxput = 0;
933 priv->txbusy++;
934
935 /* are we saturated ? */
936
937 if (priv->txbusy >= TXCOUNT)
938 netif_stop_queue(dev);
939
940 /* write descriptor back to RAM */
941 memcpy_toio(priv->base + address, &descr, sizeof(LANCE_TxDescr));
942
943 /* if no descriptors were active, give the LANCE a hint to read it
944 immediately */
945
946 if (priv->txbusy == 0)
947 SetLANCE(dev, LANCE_CSR0, CSR0_INEA | CSR0_TDMD);
948
949 spin_unlock_irqrestore(&priv->lock, flags);
950
951 tx_done:
952
953 dev_kfree_skb(skb);
954
955 return retval;
956}
957
958/* return pointer to Ethernet statistics */
959
960static struct net_device_stats *skmca_stats(struct net_device *dev)
961{
962 skmca_priv *priv = netdev_priv(dev);
963
964 return &(priv->stat);
965}
966
967/* switch receiver mode. We use the LANCE's multicast filter to prefilter
968 multicast addresses. */
969
970static void skmca_set_multicast_list(struct net_device *dev)
971{
972 skmca_priv *priv = netdev_priv(dev);
973 LANCE_InitBlock block;
974
975 /* first stop the LANCE... */
976 StopLANCE(dev);
977
978 /* ...then modify the initialization block... */
979 memcpy_fromio(&block, priv->base + RAM_INITBASE, sizeof(block));
980 if (dev->flags & IFF_PROMISC)
981 block.Mode |= LANCE_INIT_PROM;
982 else
983 block.Mode &= ~LANCE_INIT_PROM;
984
985 if (dev->flags & IFF_ALLMULTI) { /* get all multicasts */
986 memset(block.LAdrF, 0xff, sizeof(block.LAdrF));
987 } else { /* get selected/no multicasts */
988
989 struct dev_mc_list *mptr;
990 int code;
991
992 memset(block.LAdrF, 0, sizeof(block.LAdrF));
993 for (mptr = dev->mc_list; mptr != NULL; mptr = mptr->next) {
994 code = GetHash(mptr->dmi_addr);
995 block.LAdrF[(code >> 3) & 7] |= 1 << (code & 7);
996 }
997 }
998
999 memcpy_toio(priv->base + RAM_INITBASE, &block, sizeof(block));
1000
1001 /* ...then reinit LANCE with the correct flags */
1002 InitLANCE(dev);
1003}
1004
1005/* ------------------------------------------------------------------------
1006 * hardware check
1007 * ------------------------------------------------------------------------ */
1008
1009static int startslot; /* counts through slots when probing multiple devices */
1010
1011static void cleanup_card(struct net_device *dev)
1012{
1013 skmca_priv *priv = netdev_priv(dev);
1014 DeinitBoard(dev);
1015 if (dev->irq != 0)
1016 free_irq(dev->irq, dev);
1017 iounmap(priv->base);
1018 mca_mark_as_unused(priv->slot);
1019 mca_set_adapter_procfn(priv->slot, NULL, NULL);
1020}
1021
1022struct net_device * __init skmca_probe(int unit)
1023{
1024 struct net_device *dev;
1025 int force_detect = 0;
1026 int junior, slot, i;
1027 int base = 0, irq = 0;
1028 skmca_priv *priv;
1029 skmca_medium medium;
1030 int err;
1031
1032 /* can't work without an MCA bus ;-) */
1033
1034 if (MCA_bus == 0)
1035 return ERR_PTR(-ENODEV);
1036
1037 dev = alloc_etherdev(sizeof(skmca_priv));
1038 if (!dev)
1039 return ERR_PTR(-ENOMEM);
1040
1041 if (unit >= 0) {
1042 sprintf(dev->name, "eth%d", unit);
1043 netdev_boot_setup_check(dev);
1044 }
1045
1046 SET_MODULE_OWNER(dev);
1047
1048 /* start address of 1 --> forced detection */
1049
1050 if (dev->mem_start == 1)
1051 force_detect = 1;
1052
1053 /* search through slots */
1054
1055 base = dev->mem_start;
1056 irq = dev->base_addr;
1057 for (slot = startslot; (slot = dofind(&junior, slot)) != -1; slot++) {
1058 /* deduce card addresses */
1059
1060 getaddrs(slot, junior, &base, &irq, &medium);
1061
1062 /* slot already in use ? */
1063
1064 if (mca_is_adapter_used(slot))
1065 continue;
1066
1067 /* were we looking for something different ? */
1068
1069 if (dev->irq && dev->irq != irq)
1070 continue;
1071 if (dev->mem_start && dev->mem_start != base)
1072 continue;
1073
1074 /* found something that matches */
1075
1076 break;
1077 }
1078
1079 /* nothing found ? */
1080
1081 if (slot == -1) {
1082 free_netdev(dev);
1083 return (base || irq) ? ERR_PTR(-ENXIO) : ERR_PTR(-ENODEV);
1084 }
1085
1086 /* make procfs entries */
1087
1088 if (junior)
1089 mca_set_adapter_name(slot,
1090 "SKNET junior MC2 Ethernet Adapter");
1091 else
1092 mca_set_adapter_name(slot, "SKNET MC2+ Ethernet Adapter");
1093 mca_set_adapter_procfn(slot, (MCA_ProcFn) skmca_getinfo, dev);
1094
1095 mca_mark_as_used(slot);
1096
1097 /* announce success */
1098 printk("%s: SKNet %s adapter found in slot %d\n", dev->name,
1099 junior ? "Junior MC2" : "MC2+", slot + 1);
1100
1101 priv = netdev_priv(dev);
1102 priv->base = ioremap(base, 0x4000);
1103 if (!priv->base) {
1104 mca_set_adapter_procfn(slot, NULL, NULL);
1105 mca_mark_as_unused(slot);
1106 free_netdev(dev);
1107 return ERR_PTR(-ENOMEM);
1108 }
1109
1110 priv->slot = slot;
1111 priv->macbase = priv->base + 0x3fc0;
1112 priv->ioregaddr = priv->base + 0x3ff0;
1113 priv->ctrladdr = priv->base + 0x3ff2;
1114 priv->cmdaddr = priv->base + 0x3ff3;
1115 priv->medium = medium;
1116 memset(&priv->stat, 0, sizeof(struct net_device_stats));
1117 spin_lock_init(&priv->lock);
1118
1119 /* set base + irq for this device (irq not allocated so far) */
1120 dev->irq = 0;
1121 dev->mem_start = base;
1122 dev->mem_end = base + 0x4000;
1123
1124 /* autoprobe ? */
1125 if (irq < 0) {
1126 int nirq;
1127
1128 printk
1129 ("%s: ambigous POS bit combination, must probe for IRQ...\n",
1130 dev->name);
1131 nirq = ProbeIRQ(dev);
1132 if (nirq <= 0)
1133 printk("%s: IRQ probe failed, assuming IRQ %d",
1134 dev->name, priv->realirq = -irq);
1135 else
1136 priv->realirq = nirq;
1137 } else
1138 priv->realirq = irq;
1139
1140 /* set methods */
1141 dev->open = skmca_open;
1142 dev->stop = skmca_close;
1143 dev->hard_start_xmit = skmca_tx;
1144 dev->do_ioctl = NULL;
1145 dev->get_stats = skmca_stats;
1146 dev->set_multicast_list = skmca_set_multicast_list;
1147 dev->flags |= IFF_MULTICAST;
1148
1149 /* copy out MAC address */
1150 for (i = 0; i < 6; i++)
1151 dev->dev_addr[i] = readb(priv->macbase + (i << 1));
1152
1153 /* print config */
1154 printk("%s: IRQ %d, memory %#lx-%#lx, "
1155 "MAC address %02x:%02x:%02x:%02x:%02x:%02x.\n",
1156 dev->name, priv->realirq, dev->mem_start, dev->mem_end - 1,
1157 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1158 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1159 printk("%s: %s medium\n", dev->name, MediaNames[priv->medium]);
1160
1161 /* reset board */
1162
1163 ResetBoard(dev);
1164
1165 startslot = slot + 1;
1166
1167 err = register_netdev(dev);
1168 if (err) {
1169 cleanup_card(dev);
1170 free_netdev(dev);
1171 dev = ERR_PTR(err);
1172 }
1173 return dev;
1174}
1175
1176/* ------------------------------------------------------------------------
1177 * modularization support
1178 * ------------------------------------------------------------------------ */
1179
1180#ifdef MODULE
1181MODULE_LICENSE("GPL");
1182
1183#define DEVMAX 5
1184
1185static struct net_device *moddevs[DEVMAX];
1186
1187int init_module(void)
1188{
1189 int z;
1190
1191 startslot = 0;
1192 for (z = 0; z < DEVMAX; z++) {
1193 struct net_device *dev = skmca_probe(-1);
1194 if (IS_ERR(dev))
1195 break;
1196 moddevs[z] = dev;
1197 }
1198 if (!z)
1199 return -EIO;
1200 return 0;
1201}
1202
1203void cleanup_module(void)
1204{
1205 int z;
1206
1207 for (z = 0; z < DEVMAX; z++) {
1208 struct net_device *dev = moddevs[z];
1209 if (dev) {
1210 unregister_netdev(dev);
1211 cleanup_card(dev);
1212 free_netdev(dev);
1213 }
1214 }
1215}
1216#endif /* MODULE */
diff --git a/drivers/net/sk_mca.h b/drivers/net/sk_mca.h
deleted file mode 100644
index 0dae056fed99..000000000000
--- a/drivers/net/sk_mca.h
+++ /dev/null
@@ -1,170 +0,0 @@
1#ifndef _SK_MCA_INCLUDE_
2#define _SK_MCA_INCLUDE_
3
4#ifdef _SK_MCA_DRIVER_
5
6/* Adapter ID's */
7#define SKNET_MCA_ID 0x6afd
8#define SKNET_JUNIOR_MCA_ID 0x6be9
9
10/* media enumeration - defined in a way that it fits onto the MC2+'s
11 POS registers... */
12
13typedef enum { Media_10Base2, Media_10BaseT,
14 Media_10Base5, Media_Unknown, Media_Count
15} skmca_medium;
16
17/* private structure */
18typedef struct {
19 unsigned int slot; /* MCA-Slot-# */
20 void __iomem *base;
21 void __iomem *macbase; /* base address of MAC address PROM */
22 void __iomem *ioregaddr;/* address of I/O-register (Lo) */
23 void __iomem *ctrladdr; /* address of control/stat register */
24 void __iomem *cmdaddr; /* address of I/O-command register */
25 int nextrx; /* index of next RX descriptor to
26 be read */
27 int nexttxput; /* index of next free TX descriptor */
28 int nexttxdone; /* index of next TX descriptor to
29 be finished */
30 int txbusy; /* # of busy TX descriptors */
31 struct net_device_stats stat; /* packet statistics */
32 int realirq; /* memorizes actual IRQ, even when
33 currently not allocated */
34 skmca_medium medium; /* physical cannector */
35 spinlock_t lock;
36} skmca_priv;
37
38/* card registers: control/status register bits */
39
40#define CTRL_ADR_DATA 0 /* Bit 0 = 0 ->access data register */
41#define CTRL_ADR_RAP 1 /* Bit 0 = 1 ->access RAP register */
42#define CTRL_RW_WRITE 0 /* Bit 1 = 0 ->write register */
43#define CTRL_RW_READ 2 /* Bit 1 = 1 ->read register */
44#define CTRL_RESET_ON 0 /* Bit 3 = 0 ->reset board */
45#define CTRL_RESET_OFF 8 /* Bit 3 = 1 ->no reset of board */
46
47#define STAT_ADR_DATA 0 /* Bit 0 of ctrl register read back */
48#define STAT_ADR_RAP 1
49#define STAT_RW_WRITE 0 /* Bit 1 of ctrl register read back */
50#define STAT_RW_READ 2
51#define STAT_RESET_ON 0 /* Bit 3 of ctrl register read back */
52#define STAT_RESET_OFF 8
53#define STAT_IRQ_ACT 0 /* interrupt pending */
54#define STAT_IRQ_NOACT 16 /* no interrupt pending */
55#define STAT_IO_NOBUSY 0 /* no transfer busy */
56#define STAT_IO_BUSY 32 /* transfer busy */
57
58/* I/O command register bits */
59
60#define IOCMD_GO 128 /* Bit 7 = 1 -> start register xfer */
61
62/* LANCE registers */
63
64#define LANCE_CSR0 0 /* Status/Control */
65
66#define CSR0_ERR 0x8000 /* general error flag */
67#define CSR0_BABL 0x4000 /* transmitter timeout */
68#define CSR0_CERR 0x2000 /* collision error */
69#define CSR0_MISS 0x1000 /* lost Rx block */
70#define CSR0_MERR 0x0800 /* memory access error */
71#define CSR0_RINT 0x0400 /* receiver interrupt */
72#define CSR0_TINT 0x0200 /* transmitter interrupt */
73#define CSR0_IDON 0x0100 /* initialization done */
74#define CSR0_INTR 0x0080 /* general interrupt flag */
75#define CSR0_INEA 0x0040 /* interrupt enable */
76#define CSR0_RXON 0x0020 /* receiver enabled */
77#define CSR0_TXON 0x0010 /* transmitter enabled */
78#define CSR0_TDMD 0x0008 /* force transmission now */
79#define CSR0_STOP 0x0004 /* stop LANCE */
80#define CSR0_STRT 0x0002 /* start LANCE */
81#define CSR0_INIT 0x0001 /* read initialization block */
82
83#define LANCE_CSR1 1 /* addr bit 0..15 of initialization */
84#define LANCE_CSR2 2 /* 16..23 block */
85
86#define LANCE_CSR3 3 /* Bus control */
87#define CSR3_BCON_HOLD 0 /* Bit 0 = 0 -> BM1,BM0,HOLD */
88#define CSR3_BCON_BUSRQ 1 /* Bit 0 = 1 -> BUSAK0,BYTE,BUSRQ */
89#define CSR3_ALE_HIGH 0 /* Bit 1 = 0 -> ALE asserted high */
90#define CSR3_ALE_LOW 2 /* Bit 1 = 1 -> ALE asserted low */
91#define CSR3_BSWAP_OFF 0 /* Bit 2 = 0 -> no byte swap */
92#define CSR3_BSWAP_ON 4 /* Bit 2 = 1 -> byte swap */
93
94/* LANCE structures */
95
96typedef struct { /* LANCE initialization block */
97 u16 Mode; /* mode flags */
98 u8 PAdr[6]; /* MAC address */
99 u8 LAdrF[8]; /* Multicast filter */
100 u32 RdrP; /* Receive descriptor */
101 u32 TdrP; /* Transmit descriptor */
102} LANCE_InitBlock;
103
104/* Mode flags init block */
105
106#define LANCE_INIT_PROM 0x8000 /* enable promiscous mode */
107#define LANCE_INIT_INTL 0x0040 /* internal loopback */
108#define LANCE_INIT_DRTY 0x0020 /* disable retry */
109#define LANCE_INIT_COLL 0x0010 /* force collision */
110#define LANCE_INIT_DTCR 0x0008 /* disable transmit CRC */
111#define LANCE_INIT_LOOP 0x0004 /* loopback */
112#define LANCE_INIT_DTX 0x0002 /* disable transmitter */
113#define LANCE_INIT_DRX 0x0001 /* disable receiver */
114
115typedef struct { /* LANCE Tx descriptor */
116 u16 LowAddr; /* bit 0..15 of address */
117 u16 Flags; /* bit 16..23 of address + Flags */
118 u16 Len; /* 2s complement of packet length */
119 u16 Status; /* Result of transmission */
120} LANCE_TxDescr;
121
122#define TXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
123#define TXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
124#define TXDSCR_FLAGS_MORE 0x1000 /* more than one retry needed? */
125#define TXDSCR_FLAGS_ONE 0x0800 /* one retry? */
126#define TXDSCR_FLAGS_DEF 0x0400 /* transmission deferred? */
127#define TXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
128#define TXDSCR_FLAGS_ENP 0x0100 /* last packet in chain? */
129
130#define TXDSCR_STATUS_BUFF 0x8000 /* buffer error? */
131#define TXDSCR_STATUS_UFLO 0x4000 /* silo underflow during transmit? */
132#define TXDSCR_STATUS_LCOL 0x1000 /* late collision? */
133#define TXDSCR_STATUS_LCAR 0x0800 /* loss of carrier? */
134#define TXDSCR_STATUS_RTRY 0x0400 /* retry error? */
135
136typedef struct { /* LANCE Rx descriptor */
137 u16 LowAddr; /* bit 0..15 of address */
138 u16 Flags; /* bit 16..23 of address + Flags */
139 u16 MaxLen; /* 2s complement of buffer length */
140 u16 Len; /* packet length */
141} LANCE_RxDescr;
142
143#define RXDSCR_FLAGS_OWN 0x8000 /* LANCE owns descriptor */
144#define RXDSCR_FLAGS_ERR 0x4000 /* summary error flag */
145#define RXDSCR_FLAGS_FRAM 0x2000 /* framing error flag */
146#define RXDSCR_FLAGS_OFLO 0x1000 /* FIFO overflow? */
147#define RXDSCR_FLAGS_CRC 0x0800 /* CRC error? */
148#define RXDSCR_FLAGS_BUFF 0x0400 /* buffer error? */
149#define RXDSCR_FLAGS_STP 0x0200 /* first packet in chain? */
150#define RXDCSR_FLAGS_ENP 0x0100 /* last packet in chain? */
151
152/* RAM layout */
153
154#define TXCOUNT 4 /* length of TX descriptor queue */
155#define LTXCOUNT 2 /* log2 of it */
156#define RXCOUNT 4 /* length of RX descriptor queue */
157#define LRXCOUNT 2 /* log2 of it */
158
159#define RAM_INITBASE 0 /* LANCE init block */
160#define RAM_TXBASE 24 /* Start of TX descriptor queue */
161#define RAM_RXBASE \
162(RAM_TXBASE + (TXCOUNT * 8)) /* Start of RX descriptor queue */
163#define RAM_DATABASE \
164(RAM_RXBASE + (RXCOUNT * 8)) /* Start of data area for frames */
165#define RAM_BUFSIZE 1580 /* max. frame size - should never be
166 reached */
167
168#endif /* _SK_MCA_DRIVER_ */
169
170#endif /* _SK_MCA_INCLUDE_ */
diff --git a/drivers/net/skfp/can.c b/drivers/net/skfp/can.c
deleted file mode 100644
index 8a49abce7961..000000000000
--- a/drivers/net/skfp/can.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/******************************************************************************
2 *
3 * (C)Copyright 1998,1999 SysKonnect,
4 * a business unit of Schneider & Koch & Co. Datensysteme GmbH.
5 *
6 * See the file "skfddi.c" for further information.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * The information in this file is provided "AS IS" without warranty.
14 *
15 ******************************************************************************/
16
17#ifndef lint
18static const char xID_sccs[] = "@(#)can.c 1.5 97/04/07 (C) SK " ;
19#endif
20
21/*
22 * canonical bit order
23 */
24const u_char canonical[256] = {
25 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,
26 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0,
27 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8,
28 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8,
29 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4,
30 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4,
31 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec,
32 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc,
33 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2,
34 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2,
35 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea,
36 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa,
37 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6,
38 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6,
39 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee,
40 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe,
41 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1,
42 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1,
43 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9,
44 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9,
45 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5,
46 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5,
47 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed,
48 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd,
49 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3,
50 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3,
51 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb,
52 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb,
53 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7,
54 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7,
55 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef,
56 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
57} ;
58
59#ifdef MAKE_TABLE
60int byte_reverse(x)
61int x ;
62{
63 int y = 0 ;
64
65 if (x & 0x01)
66 y |= 0x80 ;
67 if (x & 0x02)
68 y |= 0x40 ;
69 if (x & 0x04)
70 y |= 0x20 ;
71 if (x & 0x08)
72 y |= 0x10 ;
73 if (x & 0x10)
74 y |= 0x08 ;
75 if (x & 0x20)
76 y |= 0x04 ;
77 if (x & 0x40)
78 y |= 0x02 ;
79 if (x & 0x80)
80 y |= 0x01 ;
81 return(y) ;
82}
83#endif
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
index 5b475833f645..4fe624b0dd25 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/skfp/drvfbi.c
@@ -23,6 +23,7 @@
23#include "h/smc.h" 23#include "h/smc.h"
24#include "h/supern_2.h" 24#include "h/supern_2.h"
25#include "h/skfbiinc.h" 25#include "h/skfbiinc.h"
26#include <linux/bitrev.h>
26 27
27#ifndef lint 28#ifndef lint
28static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ; 29static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
@@ -445,16 +446,14 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
445 char PmdType ; 446 char PmdType ;
446 int i ; 447 int i ;
447 448
448 extern const u_char canonical[256] ;
449
450#if (defined(ISA) || defined(MCA)) 449#if (defined(ISA) || defined(MCA))
451 for (i = 0; i < 4 ;i++) { /* read mac address from board */ 450 for (i = 0; i < 4 ;i++) { /* read mac address from board */
452 smc->hw.fddi_phys_addr.a[i] = 451 smc->hw.fddi_phys_addr.a[i] =
453 canonical[(inpw(PR_A(i+SA_MAC))&0xff)] ; 452 bitrev8(inpw(PR_A(i+SA_MAC)));
454 } 453 }
455 for (i = 4; i < 6; i++) { 454 for (i = 4; i < 6; i++) {
456 smc->hw.fddi_phys_addr.a[i] = 455 smc->hw.fddi_phys_addr.a[i] =
457 canonical[(inpw(PR_A(i+SA_MAC+PRA_OFF))&0xff)] ; 456 bitrev8(inpw(PR_A(i+SA_MAC+PRA_OFF)));
458 } 457 }
459#endif 458#endif
460#ifdef EISA 459#ifdef EISA
@@ -464,17 +463,17 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
464 */ 463 */
465 for (i = 0; i < 4 ;i++) { /* read mac address from board */ 464 for (i = 0; i < 4 ;i++) { /* read mac address from board */
466 smc->hw.fddi_phys_addr.a[i] = 465 smc->hw.fddi_phys_addr.a[i] =
467 canonical[inp(PR_A(i+SA_MAC))] ; 466 bitrev8(inp(PR_A(i+SA_MAC)));
468 } 467 }
469 for (i = 4; i < 6; i++) { 468 for (i = 4; i < 6; i++) {
470 smc->hw.fddi_phys_addr.a[i] = 469 smc->hw.fddi_phys_addr.a[i] =
471 canonical[inp(PR_A(i+SA_MAC+PRA_OFF))] ; 470 bitrev8(inp(PR_A(i+SA_MAC+PRA_OFF)));
472 } 471 }
473#endif 472#endif
474#ifdef PCI 473#ifdef PCI
475 for (i = 0; i < 6; i++) { /* read mac address from board */ 474 for (i = 0; i < 6; i++) { /* read mac address from board */
476 smc->hw.fddi_phys_addr.a[i] = 475 smc->hw.fddi_phys_addr.a[i] =
477 canonical[inp(ADDR(B2_MAC_0+i))] ; 476 bitrev8(inp(ADDR(B2_MAC_0+i)));
478 } 477 }
479#endif 478#endif
480#ifndef PCI 479#ifndef PCI
@@ -493,7 +492,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
493 if (mac_addr) { 492 if (mac_addr) {
494 for (i = 0; i < 6 ;i++) { 493 for (i = 0; i < 6 ;i++) {
495 smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ; 494 smc->hw.fddi_canon_addr.a[i] = mac_addr[i] ;
496 smc->hw.fddi_home_addr.a[i] = canonical[mac_addr[i]] ; 495 smc->hw.fddi_home_addr.a[i] = bitrev8(mac_addr[i]);
497 } 496 }
498 return ; 497 return ;
499 } 498 }
@@ -501,7 +500,7 @@ void read_address(struct s_smc *smc, u_char *mac_addr)
501 500
502 for (i = 0; i < 6 ;i++) { 501 for (i = 0; i < 6 ;i++) {
503 smc->hw.fddi_canon_addr.a[i] = 502 smc->hw.fddi_canon_addr.a[i] =
504 canonical[smc->hw.fddi_phys_addr.a[i]] ; 503 bitrev8(smc->hw.fddi_phys_addr.a[i]);
505 } 504 }
506} 505}
507 506
@@ -1269,11 +1268,8 @@ void driver_get_bia(struct s_smc *smc, struct fddi_addr *bia_addr)
1269{ 1268{
1270 int i ; 1269 int i ;
1271 1270
1272 extern const u_char canonical[256] ; 1271 for (i = 0 ; i < 6 ; i++)
1273 1272 bia_addr->a[i] = bitrev8(smc->hw.fddi_phys_addr.a[i]);
1274 for (i = 0 ; i < 6 ; i++) {
1275 bia_addr->a[i] = canonical[smc->hw.fddi_phys_addr.a[i]] ;
1276 }
1277} 1273}
1278 1274
1279void smt_start_watchdog(struct s_smc *smc) 1275void smt_start_watchdog(struct s_smc *smc)
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 0784f558ca9a..a45205da8033 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -22,7 +22,7 @@
22#include "h/fddi.h" 22#include "h/fddi.h"
23#include "h/smc.h" 23#include "h/smc.h"
24#include "h/supern_2.h" 24#include "h/supern_2.h"
25#include "can.c" 25#include <linux/bitrev.h>
26 26
27#ifndef lint 27#ifndef lint
28static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ; 28static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ;
@@ -1073,7 +1073,7 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
1073 if (can) { 1073 if (can) {
1074 p = own->a ; 1074 p = own->a ;
1075 for (i = 0 ; i < 6 ; i++, p++) 1075 for (i = 0 ; i < 6 ; i++, p++)
1076 *p = canonical[*p] ; 1076 *p = bitrev8(*p);
1077 } 1077 }
1078 slot = NULL; 1078 slot = NULL;
1079 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){ 1079 for (i = 0, tb = smc->hw.fp.mc.table ; i < FPMAX_MULTICAST ; i++, tb++){
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 99a776a51fb5..fe847800acdc 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -18,6 +18,7 @@
18#include "h/fddi.h" 18#include "h/fddi.h"
19#include "h/smc.h" 19#include "h/smc.h"
20#include "h/smt_p.h" 20#include "h/smt_p.h"
21#include <linux/bitrev.h>
21 22
22#define KERNEL 23#define KERNEL
23#include "h/smtstate.h" 24#include "h/smtstate.h"
@@ -26,8 +27,6 @@
26static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; 27static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ;
27#endif 28#endif
28 29
29extern const u_char canonical[256] ;
30
31/* 30/*
32 * FC in SMbuf 31 * FC in SMbuf
33 */ 32 */
@@ -180,7 +179,7 @@ void smt_agent_init(struct s_smc *smc)
180 driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ; 179 driver_get_bia(smc,&smc->mib.fddiSMTStationId.sid_node) ;
181 for (i = 0 ; i < 6 ; i ++) { 180 for (i = 0 ; i < 6 ; i ++) {
182 smc->mib.fddiSMTStationId.sid_node.a[i] = 181 smc->mib.fddiSMTStationId.sid_node.a[i] =
183 canonical[smc->mib.fddiSMTStationId.sid_node.a[i]] ; 182 bitrev8(smc->mib.fddiSMTStationId.sid_node.a[i]);
184 } 183 }
185 smc->mib.fddiSMTManufacturerData[0] = 184 smc->mib.fddiSMTManufacturerData[0] =
186 smc->mib.fddiSMTStationId.sid_node.a[0] ; 185 smc->mib.fddiSMTStationId.sid_node.a[0] ;
@@ -2049,9 +2048,8 @@ static void hwm_conv_can(struct s_smc *smc, char *data, int len)
2049 2048
2050 SK_UNUSED(smc) ; 2049 SK_UNUSED(smc) ;
2051 2050
2052 for (i = len; i ; i--, data++) { 2051 for (i = len; i ; i--, data++)
2053 *data = canonical[*(u_char *)data] ; 2052 *data = bitrev8(*data);
2054 }
2055} 2053}
2056#endif 2054#endif
2057 2055
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 45283f3f95e4..e482e7fcbb2b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "1.9" 45#define DRV_VERSION "1.10"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -132,18 +132,93 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
132} 132}
133 133
134/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 134/* Wake on Lan only supported on Yukon chips with rev 1 or above */
135static int wol_supported(const struct skge_hw *hw) 135static u32 wol_supported(const struct skge_hw *hw)
136{ 136{
137 return !((hw->chip_id == CHIP_ID_GENESIS || 137 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0)
138 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0))); 138 return WAKE_MAGIC | WAKE_PHY;
139 else
140 return 0;
141}
142
143static u32 pci_wake_enabled(struct pci_dev *dev)
144{
145 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
146 u16 value;
147
148 /* If device doesn't support PM Capabilities, but request is to disable
149 * wake events, it's a nop; otherwise fail */
150 if (!pm)
151 return 0;
152
153 pci_read_config_word(dev, pm + PCI_PM_PMC, &value);
154
155 value &= PCI_PM_CAP_PME_MASK;
156 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
157
158 return value != 0;
159}
160
161static void skge_wol_init(struct skge_port *skge)
162{
163 struct skge_hw *hw = skge->hw;
164 int port = skge->port;
165 enum pause_control save_mode;
166 u32 ctrl;
167
168 /* Bring hardware out of reset */
169 skge_write16(hw, B0_CTST, CS_RST_CLR);
170 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
171
172 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
173 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
174
175 /* Force to 10/100 skge_reset will re-enable on resume */
176 save_mode = skge->flow_control;
177 skge->flow_control = FLOW_MODE_SYMMETRIC;
178
179 ctrl = skge->advertising;
180 skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
181
182 skge_phy_reset(skge);
183
184 skge->flow_control = save_mode;
185 skge->advertising = ctrl;
186
187 /* Set GMAC to no flow control and auto update for speed/duplex */
188 gma_write16(hw, port, GM_GP_CTRL,
189 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
190 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
191
192 /* Set WOL address */
193 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
194 skge->netdev->dev_addr, ETH_ALEN);
195
196 /* Turn on appropriate WOL control bits */
197 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
198 ctrl = 0;
199 if (skge->wol & WAKE_PHY)
200 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
201 else
202 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
203
204 if (skge->wol & WAKE_MAGIC)
205 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
206 else
207 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
208
209 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
210 skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
211
212 /* block receiver */
213 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
139} 214}
140 215
141static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 216static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
142{ 217{
143 struct skge_port *skge = netdev_priv(dev); 218 struct skge_port *skge = netdev_priv(dev);
144 219
145 wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0; 220 wol->supported = wol_supported(skge->hw);
146 wol->wolopts = skge->wol ? WAKE_MAGIC : 0; 221 wol->wolopts = skge->wol;
147} 222}
148 223
149static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 224static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -151,23 +226,12 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
151 struct skge_port *skge = netdev_priv(dev); 226 struct skge_port *skge = netdev_priv(dev);
152 struct skge_hw *hw = skge->hw; 227 struct skge_hw *hw = skge->hw;
153 228
154 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 229 if (wol->wolopts & wol_supported(hw))
155 return -EOPNOTSUPP; 230 return -EOPNOTSUPP;
156 231
157 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) 232 skge->wol = wol->wolopts;
158 return -EOPNOTSUPP; 233 if (!netif_running(dev))
159 234 skge_wol_init(skge);
160 skge->wol = wol->wolopts == WAKE_MAGIC;
161
162 if (skge->wol) {
163 memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
164
165 skge_write16(hw, WOL_CTRL_STAT,
166 WOL_CTL_ENA_PME_ON_MAGIC_PKT |
167 WOL_CTL_ENA_MAGIC_PKT_UNIT);
168 } else
169 skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
170
171 return 0; 235 return 0;
172} 236}
173 237
@@ -2373,6 +2437,9 @@ static int skge_up(struct net_device *dev)
2373 size_t rx_size, tx_size; 2437 size_t rx_size, tx_size;
2374 int err; 2438 int err;
2375 2439
2440 if (!is_valid_ether_addr(dev->dev_addr))
2441 return -EINVAL;
2442
2376 if (netif_msg_ifup(skge)) 2443 if (netif_msg_ifup(skge))
2377 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2444 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2378 2445
@@ -2392,7 +2459,7 @@ static int skge_up(struct net_device *dev)
2392 BUG_ON(skge->dma & 7); 2459 BUG_ON(skge->dma & 7);
2393 2460
2394 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { 2461 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2395 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n"); 2462 dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
2396 err = -EINVAL; 2463 err = -EINVAL;
2397 goto free_pci_mem; 2464 goto free_pci_mem;
2398 } 2465 }
@@ -3001,6 +3068,7 @@ static void skge_mac_intr(struct skge_hw *hw, int port)
3001/* Handle device specific framing and timeout interrupts */ 3068/* Handle device specific framing and timeout interrupts */
3002static void skge_error_irq(struct skge_hw *hw) 3069static void skge_error_irq(struct skge_hw *hw)
3003{ 3070{
3071 struct pci_dev *pdev = hw->pdev;
3004 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3072 u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3005 3073
3006 if (hw->chip_id == CHIP_ID_GENESIS) { 3074 if (hw->chip_id == CHIP_ID_GENESIS) {
@@ -3016,12 +3084,12 @@ static void skge_error_irq(struct skge_hw *hw)
3016 } 3084 }
3017 3085
3018 if (hwstatus & IS_RAM_RD_PAR) { 3086 if (hwstatus & IS_RAM_RD_PAR) {
3019 printk(KERN_ERR PFX "Ram read data parity error\n"); 3087 dev_err(&pdev->dev, "Ram read data parity error\n");
3020 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); 3088 skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR);
3021 } 3089 }
3022 3090
3023 if (hwstatus & IS_RAM_WR_PAR) { 3091 if (hwstatus & IS_RAM_WR_PAR) {
3024 printk(KERN_ERR PFX "Ram write data parity error\n"); 3092 dev_err(&pdev->dev, "Ram write data parity error\n");
3025 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); 3093 skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR);
3026 } 3094 }
3027 3095
@@ -3032,38 +3100,38 @@ static void skge_error_irq(struct skge_hw *hw)
3032 skge_mac_parity(hw, 1); 3100 skge_mac_parity(hw, 1);
3033 3101
3034 if (hwstatus & IS_R1_PAR_ERR) { 3102 if (hwstatus & IS_R1_PAR_ERR) {
3035 printk(KERN_ERR PFX "%s: receive queue parity error\n", 3103 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3036 hw->dev[0]->name); 3104 hw->dev[0]->name);
3037 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 3105 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
3038 } 3106 }
3039 3107
3040 if (hwstatus & IS_R2_PAR_ERR) { 3108 if (hwstatus & IS_R2_PAR_ERR) {
3041 printk(KERN_ERR PFX "%s: receive queue parity error\n", 3109 dev_err(&pdev->dev, "%s: receive queue parity error\n",
3042 hw->dev[1]->name); 3110 hw->dev[1]->name);
3043 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 3111 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
3044 } 3112 }
3045 3113
3046 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 3114 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
3047 u16 pci_status, pci_cmd; 3115 u16 pci_status, pci_cmd;
3048 3116
3049 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd); 3117 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
3050 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); 3118 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
3051 3119
3052 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n", 3120 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n",
3053 pci_name(hw->pdev), pci_cmd, pci_status); 3121 pci_cmd, pci_status);
3054 3122
3055 /* Write the error bits back to clear them. */ 3123 /* Write the error bits back to clear them. */
3056 pci_status &= PCI_STATUS_ERROR_BITS; 3124 pci_status &= PCI_STATUS_ERROR_BITS;
3057 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3125 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3058 pci_write_config_word(hw->pdev, PCI_COMMAND, 3126 pci_write_config_word(pdev, PCI_COMMAND,
3059 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 3127 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
3060 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status); 3128 pci_write_config_word(pdev, PCI_STATUS, pci_status);
3061 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3129 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3062 3130
3063 /* if error still set then just ignore it */ 3131 /* if error still set then just ignore it */
3064 hwstatus = skge_read32(hw, B0_HWE_ISRC); 3132 hwstatus = skge_read32(hw, B0_HWE_ISRC);
3065 if (hwstatus & IS_IRQ_STAT) { 3133 if (hwstatus & IS_IRQ_STAT) {
3066 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n"); 3134 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n");
3067 hw->intr_mask &= ~IS_HW_ERR; 3135 hw->intr_mask &= ~IS_HW_ERR;
3068 } 3136 }
3069 } 3137 }
@@ -3277,8 +3345,8 @@ static int skge_reset(struct skge_hw *hw)
3277 hw->phy_addr = PHY_ADDR_BCOM; 3345 hw->phy_addr = PHY_ADDR_BCOM;
3278 break; 3346 break;
3279 default: 3347 default:
3280 printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", 3348 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n",
3281 pci_name(hw->pdev), hw->phy_type); 3349 hw->phy_type);
3282 return -EOPNOTSUPP; 3350 return -EOPNOTSUPP;
3283 } 3351 }
3284 break; 3352 break;
@@ -3293,8 +3361,8 @@ static int skge_reset(struct skge_hw *hw)
3293 break; 3361 break;
3294 3362
3295 default: 3363 default:
3296 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", 3364 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
3297 pci_name(hw->pdev), hw->chip_id); 3365 hw->chip_id);
3298 return -EOPNOTSUPP; 3366 return -EOPNOTSUPP;
3299 } 3367 }
3300 3368
@@ -3334,7 +3402,7 @@ static int skge_reset(struct skge_hw *hw)
3334 /* avoid boards with stuck Hardware error bits */ 3402 /* avoid boards with stuck Hardware error bits */
3335 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && 3403 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
3336 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { 3404 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
3337 printk(KERN_WARNING PFX "stuck hardware sensor bit\n"); 3405 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n");
3338 hw->intr_mask &= ~IS_HW_ERR; 3406 hw->intr_mask &= ~IS_HW_ERR;
3339 } 3407 }
3340 3408
@@ -3408,7 +3476,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3408 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3476 struct net_device *dev = alloc_etherdev(sizeof(*skge));
3409 3477
3410 if (!dev) { 3478 if (!dev) {
3411 printk(KERN_ERR "skge etherdev alloc failed"); 3479 dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
3412 return NULL; 3480 return NULL;
3413 } 3481 }
3414 3482
@@ -3452,6 +3520,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3452 skge->duplex = -1; 3520 skge->duplex = -1;
3453 skge->speed = -1; 3521 skge->speed = -1;
3454 skge->advertising = skge_supported_modes(hw); 3522 skge->advertising = skge_supported_modes(hw);
3523 skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0;
3455 3524
3456 hw->dev[port] = dev; 3525 hw->dev[port] = dev;
3457 3526
@@ -3496,15 +3565,13 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3496 3565
3497 err = pci_enable_device(pdev); 3566 err = pci_enable_device(pdev);
3498 if (err) { 3567 if (err) {
3499 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3568 dev_err(&pdev->dev, "cannot enable PCI device\n");
3500 pci_name(pdev));
3501 goto err_out; 3569 goto err_out;
3502 } 3570 }
3503 3571
3504 err = pci_request_regions(pdev, DRV_NAME); 3572 err = pci_request_regions(pdev, DRV_NAME);
3505 if (err) { 3573 if (err) {
3506 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3574 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3507 pci_name(pdev));
3508 goto err_out_disable_pdev; 3575 goto err_out_disable_pdev;
3509 } 3576 }
3510 3577
@@ -3519,8 +3586,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3519 } 3586 }
3520 3587
3521 if (err) { 3588 if (err) {
3522 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3589 dev_err(&pdev->dev, "no usable DMA configuration\n");
3523 pci_name(pdev));
3524 goto err_out_free_regions; 3590 goto err_out_free_regions;
3525 } 3591 }
3526 3592
@@ -3538,8 +3604,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3538 err = -ENOMEM; 3604 err = -ENOMEM;
3539 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3605 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3540 if (!hw) { 3606 if (!hw) {
3541 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3607 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3542 pci_name(pdev));
3543 goto err_out_free_regions; 3608 goto err_out_free_regions;
3544 } 3609 }
3545 3610
@@ -3550,8 +3615,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3550 3615
3551 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3616 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3552 if (!hw->regs) { 3617 if (!hw->regs) {
3553 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3618 dev_err(&pdev->dev, "cannot map device registers\n");
3554 pci_name(pdev));
3555 goto err_out_free_hw; 3619 goto err_out_free_hw;
3556 } 3620 }
3557 3621
@@ -3567,23 +3631,19 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3567 if (!dev) 3631 if (!dev)
3568 goto err_out_led_off; 3632 goto err_out_led_off;
3569 3633
3570 if (!is_valid_ether_addr(dev->dev_addr)) { 3634 /* Some motherboards are broken and has zero in ROM. */
3571 printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n", 3635 if (!is_valid_ether_addr(dev->dev_addr))
3572 pci_name(pdev)); 3636 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n");
3573 err = -EIO;
3574 goto err_out_free_netdev;
3575 }
3576 3637
3577 err = register_netdev(dev); 3638 err = register_netdev(dev);
3578 if (err) { 3639 if (err) {
3579 printk(KERN_ERR PFX "%s: cannot register net device\n", 3640 dev_err(&pdev->dev, "cannot register net device\n");
3580 pci_name(pdev));
3581 goto err_out_free_netdev; 3641 goto err_out_free_netdev;
3582 } 3642 }
3583 3643
3584 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); 3644 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
3585 if (err) { 3645 if (err) {
3586 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3646 dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
3587 dev->name, pdev->irq); 3647 dev->name, pdev->irq);
3588 goto err_out_unregister; 3648 goto err_out_unregister;
3589 } 3649 }
@@ -3594,7 +3654,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3594 skge_show_addr(dev1); 3654 skge_show_addr(dev1);
3595 else { 3655 else {
3596 /* Failure to register second port need not be fatal */ 3656 /* Failure to register second port need not be fatal */
3597 printk(KERN_WARNING PFX "register of second port failed\n"); 3657 dev_warn(&pdev->dev, "register of second port failed\n");
3598 hw->dev[1] = NULL; 3658 hw->dev[1] = NULL;
3599 free_netdev(dev1); 3659 free_netdev(dev1);
3600 } 3660 }
@@ -3659,28 +3719,46 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3659} 3719}
3660 3720
3661#ifdef CONFIG_PM 3721#ifdef CONFIG_PM
3722static int vaux_avail(struct pci_dev *pdev)
3723{
3724 int pm_cap;
3725
3726 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3727 if (pm_cap) {
3728 u16 ctl;
3729 pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl);
3730 if (ctl & PCI_PM_CAP_AUX_POWER)
3731 return 1;
3732 }
3733 return 0;
3734}
3735
3736
3662static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 3737static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3663{ 3738{
3664 struct skge_hw *hw = pci_get_drvdata(pdev); 3739 struct skge_hw *hw = pci_get_drvdata(pdev);
3665 int i, wol = 0; 3740 int i, err, wol = 0;
3741
3742 err = pci_save_state(pdev);
3743 if (err)
3744 return err;
3666 3745
3667 pci_save_state(pdev);
3668 for (i = 0; i < hw->ports; i++) { 3746 for (i = 0; i < hw->ports; i++) {
3669 struct net_device *dev = hw->dev[i]; 3747 struct net_device *dev = hw->dev[i];
3748 struct skge_port *skge = netdev_priv(dev);
3670 3749
3671 if (netif_running(dev)) { 3750 if (netif_running(dev))
3672 struct skge_port *skge = netdev_priv(dev); 3751 skge_down(dev);
3752 if (skge->wol)
3753 skge_wol_init(skge);
3673 3754
3674 netif_carrier_off(dev); 3755 wol |= skge->wol;
3675 if (skge->wol)
3676 netif_stop_queue(dev);
3677 else
3678 skge_down(dev);
3679 wol |= skge->wol;
3680 }
3681 netif_device_detach(dev);
3682 } 3756 }
3683 3757
3758 if (wol && vaux_avail(pdev))
3759 skge_write8(hw, B0_POWER_CTRL,
3760 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
3761
3684 skge_write32(hw, B0_IMSK, 0); 3762 skge_write32(hw, B0_IMSK, 0);
3685 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 3763 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3686 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3764 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3693,8 +3771,14 @@ static int skge_resume(struct pci_dev *pdev)
3693 struct skge_hw *hw = pci_get_drvdata(pdev); 3771 struct skge_hw *hw = pci_get_drvdata(pdev);
3694 int i, err; 3772 int i, err;
3695 3773
3696 pci_set_power_state(pdev, PCI_D0); 3774 err = pci_set_power_state(pdev, PCI_D0);
3697 pci_restore_state(pdev); 3775 if (err)
3776 goto out;
3777
3778 err = pci_restore_state(pdev);
3779 if (err)
3780 goto out;
3781
3698 pci_enable_wake(pdev, PCI_D0, 0); 3782 pci_enable_wake(pdev, PCI_D0, 0);
3699 3783
3700 err = skge_reset(hw); 3784 err = skge_reset(hw);
@@ -3704,7 +3788,6 @@ static int skge_resume(struct pci_dev *pdev)
3704 for (i = 0; i < hw->ports; i++) { 3788 for (i = 0; i < hw->ports; i++) {
3705 struct net_device *dev = hw->dev[i]; 3789 struct net_device *dev = hw->dev[i];
3706 3790
3707 netif_device_attach(dev);
3708 if (netif_running(dev)) { 3791 if (netif_running(dev)) {
3709 err = skge_up(dev); 3792 err = skge_up(dev);
3710 3793
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f6223c533c01..17b1b479dff5 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -876,11 +876,13 @@ enum {
876 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 876 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
877 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 877 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
878}; 878};
879#define WOL_REGS(port, x) (x + (port)*0x80)
879 880
880enum { 881enum {
881 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ 882 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
882 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ 883 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
883}; 884};
885#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
884 886
885enum { 887enum {
886 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ 888 BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 822dd0b13133..f2ab3d56e565 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -49,7 +49,7 @@
49#include "sky2.h" 49#include "sky2.h"
50 50
51#define DRV_NAME "sky2" 51#define DRV_NAME "sky2"
52#define DRV_VERSION "1.10" 52#define DRV_VERSION "1.12"
53#define PFX DRV_NAME " " 53#define PFX DRV_NAME " "
54 54
55/* 55/*
@@ -105,6 +105,7 @@ static const struct pci_device_id sky2_id_table[] = {
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) }, /* DGE-550T */
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */ 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */ 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */ 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
@@ -126,6 +127,9 @@ static const struct pci_device_id sky2_id_table[] = {
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */ 127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */ 128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ 129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
129 { 0 } 133 { 0 }
130}; 134};
131 135
@@ -140,7 +144,7 @@ static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
140static const char *yukon2_name[] = { 144static const char *yukon2_name[] = {
141 "XL", /* 0xb3 */ 145 "XL", /* 0xb3 */
142 "EC Ultra", /* 0xb4 */ 146 "EC Ultra", /* 0xb4 */
143 "UNKNOWN", /* 0xb5 */ 147 "Extreme", /* 0xb5 */
144 "EC", /* 0xb6 */ 148 "EC", /* 0xb6 */
145 "FE", /* 0xb7 */ 149 "FE", /* 0xb7 */
146}; 150};
@@ -192,76 +196,52 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
192 return v; 196 return v;
193} 197}
194 198
195static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
196{
197 u16 power_control;
198 int vaux;
199
200 pr_debug("sky2_set_power_state %d\n", state);
201 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
202
203 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
204 vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
205 (power_control & PCI_PM_CAP_PME_D3cold);
206
207 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
208
209 power_control |= PCI_PM_CTRL_PME_STATUS;
210 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
211 199
212 switch (state) { 200static void sky2_power_on(struct sky2_hw *hw)
213 case PCI_D0: 201{
214 /* switch power to VCC (WA for VAUX problem) */ 202 /* switch power to VCC (WA for VAUX problem) */
215 sky2_write8(hw, B0_POWER_CTRL, 203 sky2_write8(hw, B0_POWER_CTRL,
216 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 204 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
217
218 /* disable Core Clock Division, */
219 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
220
221 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
222 /* enable bits are inverted */
223 sky2_write8(hw, B2_Y2_CLK_GATE,
224 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
225 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
226 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
227 else
228 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
229 205
230 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 206 /* disable Core Clock Division, */
231 u32 reg1; 207 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
232 208
233 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 209 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
234 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 210 /* enable bits are inverted */
235 reg1 &= P_ASPM_CONTROL_MSK; 211 sky2_write8(hw, B2_Y2_CLK_GATE,
236 sky2_pci_write32(hw, PCI_DEV_REG4, reg1); 212 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
237 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 213 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
238 } 214 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
215 else
216 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
239 217
240 break; 218 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
219 u32 reg1;
241 220
242 case PCI_D3hot: 221 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
243 case PCI_D3cold: 222 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
244 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 223 reg1 &= P_ASPM_CONTROL_MSK;
245 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 224 sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
246 else 225 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
247 /* enable bits are inverted */
248 sky2_write8(hw, B2_Y2_CLK_GATE,
249 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
250 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
251 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
252
253 /* switch power to VAUX */
254 if (vaux && state != PCI_D3cold)
255 sky2_write8(hw, B0_POWER_CTRL,
256 (PC_VAUX_ENA | PC_VCC_ENA |
257 PC_VAUX_ON | PC_VCC_OFF));
258 break;
259 default:
260 printk(KERN_ERR PFX "Unknown power state %d\n", state);
261 } 226 }
227}
262 228
263 sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); 229static void sky2_power_aux(struct sky2_hw *hw)
264 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 230{
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
232 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
233 else
234 /* enable bits are inverted */
235 sky2_write8(hw, B2_Y2_CLK_GATE,
236 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
237 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
238 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
239
240 /* switch power to VAUX */
241 if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
242 sky2_write8(hw, B0_POWER_CTRL,
243 (PC_VAUX_ENA | PC_VCC_ENA |
244 PC_VAUX_ON | PC_VCC_OFF));
265} 245}
266 246
267static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 247static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
@@ -313,8 +293,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
313 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 293 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
314 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg; 294 u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
315 295
316 if (sky2->autoneg == AUTONEG_ENABLE && 296 if (sky2->autoneg == AUTONEG_ENABLE
317 !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 297 && !(hw->chip_id == CHIP_ID_YUKON_XL
298 || hw->chip_id == CHIP_ID_YUKON_EC_U
299 || hw->chip_id == CHIP_ID_YUKON_EX)) {
318 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 300 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
319 301
320 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 302 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -341,8 +323,10 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
341 /* enable automatic crossover */ 323 /* enable automatic crossover */
342 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO); 324 ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
343 325
344 if (sky2->autoneg == AUTONEG_ENABLE && 326 if (sky2->autoneg == AUTONEG_ENABLE
345 (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 327 && (hw->chip_id == CHIP_ID_YUKON_XL
328 || hw->chip_id == CHIP_ID_YUKON_EC_U
329 || hw->chip_id == CHIP_ID_YUKON_EX)) {
346 ctrl &= ~PHY_M_PC_DSC_MSK; 330 ctrl &= ~PHY_M_PC_DSC_MSK;
347 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA; 331 ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
348 } 332 }
@@ -497,7 +481,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
497 /* restore page register */ 481 /* restore page register */
498 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 482 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
499 break; 483 break;
484
500 case CHIP_ID_YUKON_EC_U: 485 case CHIP_ID_YUKON_EC_U:
486 case CHIP_ID_YUKON_EX:
501 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 487 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
502 488
503 /* select page 3 to access LED control register */ 489 /* select page 3 to access LED control register */
@@ -539,7 +525,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
539 525
540 /* set page register to 0 */ 526 /* set page register to 0 */
541 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 527 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
542 } else { 528 } else if (hw->chip_id != CHIP_ID_YUKON_EX) {
543 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 529 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
544 530
545 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 531 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
@@ -591,6 +577,73 @@ static void sky2_phy_reinit(struct sky2_port *sky2)
591 spin_unlock_bh(&sky2->phy_lock); 577 spin_unlock_bh(&sky2->phy_lock);
592} 578}
593 579
580/* Put device in state to listen for Wake On Lan */
581static void sky2_wol_init(struct sky2_port *sky2)
582{
583 struct sky2_hw *hw = sky2->hw;
584 unsigned port = sky2->port;
585 enum flow_control save_mode;
586 u16 ctrl;
587 u32 reg1;
588
589 /* Bring hardware out of reset */
590 sky2_write16(hw, B0_CTST, CS_RST_CLR);
591 sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
592
593 sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
594 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
595
596 /* Force to 10/100
597 * sky2_reset will re-enable on resume
598 */
599 save_mode = sky2->flow_mode;
600 ctrl = sky2->advertising;
601
602 sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
603 sky2->flow_mode = FC_NONE;
604 sky2_phy_power(hw, port, 1);
605 sky2_phy_reinit(sky2);
606
607 sky2->flow_mode = save_mode;
608 sky2->advertising = ctrl;
609
610 /* Set GMAC to no flow control and auto update for speed/duplex */
611 gma_write16(hw, port, GM_GP_CTRL,
612 GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
613 GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
614
615 /* Set WOL address */
616 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
617 sky2->netdev->dev_addr, ETH_ALEN);
618
619 /* Turn on appropriate WOL control bits */
620 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
621 ctrl = 0;
622 if (sky2->wol & WAKE_PHY)
623 ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
624 else
625 ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
626
627 if (sky2->wol & WAKE_MAGIC)
628 ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
629 else
630 ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
631
632 ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
633 sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
634
635 /* Turn on legacy PCI-Express PME mode */
636 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
637 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
638 reg1 |= PCI_Y2_PME_LEGACY;
639 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
640 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
641
642 /* block receiver */
643 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
644
645}
646
594static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 647static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
595{ 648{
596 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 649 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -684,7 +737,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
684 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 737 sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
685 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 738 sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
686 739
687 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 740 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
688 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 741 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
689 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 742 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
690 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 743 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
@@ -1467,6 +1520,9 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1467 if (unlikely(netif_msg_tx_done(sky2))) 1520 if (unlikely(netif_msg_tx_done(sky2)))
1468 printk(KERN_DEBUG "%s: tx done %u\n", 1521 printk(KERN_DEBUG "%s: tx done %u\n",
1469 dev->name, idx); 1522 dev->name, idx);
1523 sky2->net_stats.tx_packets++;
1524 sky2->net_stats.tx_bytes += re->skb->len;
1525
1470 dev_kfree_skb_any(re->skb); 1526 dev_kfree_skb_any(re->skb);
1471 } 1527 }
1472 1528
@@ -1641,7 +1697,9 @@ static void sky2_link_up(struct sky2_port *sky2)
1641 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1697 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1642 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); 1698 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1643 1699
1644 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) { 1700 if (hw->chip_id == CHIP_ID_YUKON_XL
1701 || hw->chip_id == CHIP_ID_YUKON_EC_U
1702 || hw->chip_id == CHIP_ID_YUKON_EX) {
1645 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 1703 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
1646 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */ 1704 u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
1647 1705
@@ -1734,14 +1792,16 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1734 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; 1792 sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1735 1793
1736 /* Pause bits are offset (9..8) */ 1794 /* Pause bits are offset (9..8) */
1737 if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) 1795 if (hw->chip_id == CHIP_ID_YUKON_XL
1796 || hw->chip_id == CHIP_ID_YUKON_EC_U
1797 || hw->chip_id == CHIP_ID_YUKON_EX)
1738 aux >>= 6; 1798 aux >>= 6;
1739 1799
1740 sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN, 1800 sky2->flow_status = sky2_flow(aux & PHY_M_PS_RX_P_EN,
1741 aux & PHY_M_PS_TX_P_EN); 1801 aux & PHY_M_PS_TX_P_EN);
1742 1802
1743 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000 1803 if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1744 && hw->chip_id != CHIP_ID_YUKON_EC_U) 1804 && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
1745 sky2->flow_status = FC_NONE; 1805 sky2->flow_status = FC_NONE;
1746 1806
1747 if (aux & PHY_M_PS_RX_P_EN) 1807 if (aux & PHY_M_PS_RX_P_EN)
@@ -1794,48 +1854,37 @@ out:
1794} 1854}
1795 1855
1796 1856
1797/* Transmit timeout is only called if we are running, carries is up 1857/* Transmit timeout is only called if we are running, carrier is up
1798 * and tx queue is full (stopped). 1858 * and tx queue is full (stopped).
1859 * Called with netif_tx_lock held.
1799 */ 1860 */
1800static void sky2_tx_timeout(struct net_device *dev) 1861static void sky2_tx_timeout(struct net_device *dev)
1801{ 1862{
1802 struct sky2_port *sky2 = netdev_priv(dev); 1863 struct sky2_port *sky2 = netdev_priv(dev);
1803 struct sky2_hw *hw = sky2->hw; 1864 struct sky2_hw *hw = sky2->hw;
1804 unsigned txq = txqaddr[sky2->port]; 1865 u32 imask;
1805 u16 report, done;
1806 1866
1807 if (netif_msg_timer(sky2)) 1867 if (netif_msg_timer(sky2))
1808 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); 1868 printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
1809 1869
1810 report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
1811 done = sky2_read16(hw, Q_ADDR(txq, Q_DONE));
1812
1813 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", 1870 printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
1814 dev->name, 1871 dev->name, sky2->tx_cons, sky2->tx_prod,
1815 sky2->tx_cons, sky2->tx_prod, report, done); 1872 sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
1873 sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
1816 1874
1817 if (report != done) { 1875 imask = sky2_read32(hw, B0_IMSK); /* block IRQ in hw */
1818 printk(KERN_INFO PFX "status burst pending (irq moderation?)\n"); 1876 sky2_write32(hw, B0_IMSK, 0);
1819 1877 sky2_read32(hw, B0_IMSK);
1820 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
1821 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
1822 } else if (report != sky2->tx_cons) {
1823 printk(KERN_INFO PFX "status report lost?\n");
1824 1878
1825 netif_tx_lock_bh(dev); 1879 netif_poll_disable(hw->dev[0]); /* stop NAPI poll */
1826 sky2_tx_complete(sky2, report); 1880 synchronize_irq(hw->pdev->irq);
1827 netif_tx_unlock_bh(dev);
1828 } else {
1829 printk(KERN_INFO PFX "hardware hung? flushing\n");
1830 1881
1831 sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); 1882 netif_start_queue(dev); /* don't wakeup during flush */
1832 sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); 1883 sky2_tx_complete(sky2, sky2->tx_prod); /* Flush transmit queue */
1833 1884
1834 sky2_tx_clean(dev); 1885 sky2_write32(hw, B0_IMSK, imask);
1835 1886
1836 sky2_qset(hw, txq); 1887 sky2_phy_reinit(sky2); /* this clears flow control etc */
1837 sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
1838 }
1839} 1888}
1840 1889
1841static int sky2_change_mtu(struct net_device *dev, int new_mtu) 1890static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1849,8 +1898,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1849 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 1898 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
1850 return -EINVAL; 1899 return -EINVAL;
1851 1900
1901 /* TSO on Yukon Ultra and MTU > 1500 not supported */
1852 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN) 1902 if (hw->chip_id == CHIP_ID_YUKON_EC_U && new_mtu > ETH_DATA_LEN)
1853 return -EINVAL; 1903 dev->features &= ~NETIF_F_TSO;
1854 1904
1855 if (!netif_running(dev)) { 1905 if (!netif_running(dev)) {
1856 dev->mtu = new_mtu; 1906 dev->mtu = new_mtu;
@@ -2089,6 +2139,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2089 goto force_update; 2139 goto force_update;
2090 2140
2091 skb->protocol = eth_type_trans(skb, dev); 2141 skb->protocol = eth_type_trans(skb, dev);
2142 sky2->net_stats.rx_packets++;
2143 sky2->net_stats.rx_bytes += skb->len;
2092 dev->last_rx = jiffies; 2144 dev->last_rx = jiffies;
2093 2145
2094#ifdef SKY2_VLAN_TAG_USED 2146#ifdef SKY2_VLAN_TAG_USED
@@ -2218,8 +2270,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2218 2270
2219 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2271 pci_err = sky2_pci_read16(hw, PCI_STATUS);
2220 if (net_ratelimit()) 2272 if (net_ratelimit())
2221 printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", 2273 dev_err(&hw->pdev->dev, "PCI hardware error (0x%x)\n",
2222 pci_name(hw->pdev), pci_err); 2274 pci_err);
2223 2275
2224 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2276 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2225 sky2_pci_write16(hw, PCI_STATUS, 2277 sky2_pci_write16(hw, PCI_STATUS,
@@ -2234,8 +2286,8 @@ static void sky2_hw_intr(struct sky2_hw *hw)
2234 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); 2286 pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
2235 2287
2236 if (net_ratelimit()) 2288 if (net_ratelimit())
2237 printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", 2289 dev_err(&hw->pdev->dev, "PCI Express error (0x%x)\n",
2238 pci_name(hw->pdev), pex_err); 2290 pex_err);
2239 2291
2240 /* clear the interrupt */ 2292 /* clear the interrupt */
2241 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2293 sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -2404,6 +2456,7 @@ static inline u32 sky2_mhz(const struct sky2_hw *hw)
2404 switch (hw->chip_id) { 2456 switch (hw->chip_id) {
2405 case CHIP_ID_YUKON_EC: 2457 case CHIP_ID_YUKON_EC:
2406 case CHIP_ID_YUKON_EC_U: 2458 case CHIP_ID_YUKON_EC_U:
2459 case CHIP_ID_YUKON_EX:
2407 return 125; /* 125 Mhz */ 2460 return 125; /* 125 Mhz */
2408 case CHIP_ID_YUKON_FE: 2461 case CHIP_ID_YUKON_FE:
2409 return 100; /* 100 Mhz */ 2462 return 100; /* 100 Mhz */
@@ -2423,34 +2476,62 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2423} 2476}
2424 2477
2425 2478
2426static int sky2_reset(struct sky2_hw *hw) 2479static int __devinit sky2_init(struct sky2_hw *hw)
2427{ 2480{
2428 u16 status;
2429 u8 t8; 2481 u8 t8;
2430 int i;
2431 2482
2432 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2483 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2433 2484
2434 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); 2485 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2435 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) { 2486 if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
2436 printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", 2487 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
2437 pci_name(hw->pdev), hw->chip_id); 2488 hw->chip_id);
2438 return -EOPNOTSUPP; 2489 return -EOPNOTSUPP;
2439 } 2490 }
2440 2491
2492 if (hw->chip_id == CHIP_ID_YUKON_EX)
2493 dev_warn(&hw->pdev->dev, "this driver not yet tested on this chip type\n"
2494 "Please report success or failure to <netdev@vger.kernel.org>\n");
2495
2496 /* Make sure and enable all clocks */
2497 if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
2498 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2499
2441 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; 2500 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2442 2501
2443 /* This rev is really old, and requires untested workarounds */ 2502 /* This rev is really old, and requires untested workarounds */
2444 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { 2503 if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
2445 printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n", 2504 dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n",
2446 pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 2505 yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
2447 hw->chip_id, hw->chip_rev); 2506 hw->chip_id, hw->chip_rev);
2448 return -EOPNOTSUPP; 2507 return -EOPNOTSUPP;
2449 } 2508 }
2450 2509
2510 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2511 hw->ports = 1;
2512 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2513 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2514 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2515 ++hw->ports;
2516 }
2517
2518 return 0;
2519}
2520
2521static void sky2_reset(struct sky2_hw *hw)
2522{
2523 u16 status;
2524 int i;
2525
2451 /* disable ASF */ 2526 /* disable ASF */
2452 if (hw->chip_id <= CHIP_ID_YUKON_EC) { 2527 if (hw->chip_id <= CHIP_ID_YUKON_EC) {
2453 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 2528 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2529 status = sky2_read16(hw, HCU_CCSR);
2530 status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2531 HCU_CCSR_UC_STATE_MSK);
2532 sky2_write16(hw, HCU_CCSR, status);
2533 } else
2534 sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2454 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE); 2535 sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2455 } 2536 }
2456 2537
@@ -2472,15 +2553,7 @@ static int sky2_reset(struct sky2_hw *hw)
2472 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); 2553 sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
2473 2554
2474 2555
2475 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2556 sky2_power_on(hw);
2476 hw->ports = 1;
2477 t8 = sky2_read8(hw, B2_Y2_HW_RES);
2478 if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2479 if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2480 ++hw->ports;
2481 }
2482
2483 sky2_set_power_state(hw, PCI_D0);
2484 2557
2485 for (i = 0; i < hw->ports; i++) { 2558 for (i = 0; i < hw->ports; i++) {
2486 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2559 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
@@ -2563,7 +2636,37 @@ static int sky2_reset(struct sky2_hw *hw)
2563 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); 2636 sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2564 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); 2637 sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2565 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); 2638 sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2639}
2640
2641static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
2642{
2643 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
2644}
2645
2646static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2647{
2648 const struct sky2_port *sky2 = netdev_priv(dev);
2649
2650 wol->supported = sky2_wol_supported(sky2->hw);
2651 wol->wolopts = sky2->wol;
2652}
2653
2654static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2655{
2656 struct sky2_port *sky2 = netdev_priv(dev);
2657 struct sky2_hw *hw = sky2->hw;
2658
2659 if (wol->wolopts & ~sky2_wol_supported(sky2->hw))
2660 return -EOPNOTSUPP;
2661
2662 sky2->wol = wol->wolopts;
2663
2664 if (hw->chip_id == CHIP_ID_YUKON_EC_U)
2665 sky2_write32(hw, B0_CTST, sky2->wol
2666 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
2566 2667
2668 if (!netif_running(dev))
2669 sky2_wol_init(sky2);
2567 return 0; 2670 return 0;
2568} 2671}
2569 2672
@@ -2814,25 +2917,9 @@ static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2814 } 2917 }
2815} 2918}
2816 2919
2817/* Use hardware MIB variables for critical path statistics and
2818 * transmit feedback not reported at interrupt.
2819 * Other errors are accounted for in interrupt handler.
2820 */
2821static struct net_device_stats *sky2_get_stats(struct net_device *dev) 2920static struct net_device_stats *sky2_get_stats(struct net_device *dev)
2822{ 2921{
2823 struct sky2_port *sky2 = netdev_priv(dev); 2922 struct sky2_port *sky2 = netdev_priv(dev);
2824 u64 data[13];
2825
2826 sky2_phy_stats(sky2, data, ARRAY_SIZE(data));
2827
2828 sky2->net_stats.tx_bytes = data[0];
2829 sky2->net_stats.rx_bytes = data[1];
2830 sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
2831 sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
2832 sky2->net_stats.multicast = data[3] + data[5];
2833 sky2->net_stats.collisions = data[10];
2834 sky2->net_stats.tx_aborted_errors = data[12];
2835
2836 return &sky2->net_stats; 2923 return &sky2->net_stats;
2837} 2924}
2838 2925
@@ -3191,7 +3278,9 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3191static const struct ethtool_ops sky2_ethtool_ops = { 3278static const struct ethtool_ops sky2_ethtool_ops = {
3192 .get_settings = sky2_get_settings, 3279 .get_settings = sky2_get_settings,
3193 .set_settings = sky2_set_settings, 3280 .set_settings = sky2_set_settings,
3194 .get_drvinfo = sky2_get_drvinfo, 3281 .get_drvinfo = sky2_get_drvinfo,
3282 .get_wol = sky2_get_wol,
3283 .set_wol = sky2_set_wol,
3195 .get_msglevel = sky2_get_msglevel, 3284 .get_msglevel = sky2_get_msglevel,
3196 .set_msglevel = sky2_set_msglevel, 3285 .set_msglevel = sky2_set_msglevel,
3197 .nway_reset = sky2_nway_reset, 3286 .nway_reset = sky2_nway_reset,
@@ -3221,13 +3310,14 @@ static const struct ethtool_ops sky2_ethtool_ops = {
3221 3310
3222/* Initialize network device */ 3311/* Initialize network device */
3223static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, 3312static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3224 unsigned port, int highmem) 3313 unsigned port,
3314 int highmem, int wol)
3225{ 3315{
3226 struct sky2_port *sky2; 3316 struct sky2_port *sky2;
3227 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 3317 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
3228 3318
3229 if (!dev) { 3319 if (!dev) {
3230 printk(KERN_ERR "sky2 etherdev alloc failed"); 3320 dev_err(&hw->pdev->dev, "etherdev alloc failed");
3231 return NULL; 3321 return NULL;
3232 } 3322 }
3233 3323
@@ -3269,6 +3359,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3269 sky2->speed = -1; 3359 sky2->speed = -1;
3270 sky2->advertising = sky2_supported_modes(hw); 3360 sky2->advertising = sky2_supported_modes(hw);
3271 sky2->rx_csum = 1; 3361 sky2->rx_csum = 1;
3362 sky2->wol = wol;
3272 3363
3273 spin_lock_init(&sky2->phy_lock); 3364 spin_lock_init(&sky2->phy_lock);
3274 sky2->tx_pending = TX_DEF_PENDING; 3365 sky2->tx_pending = TX_DEF_PENDING;
@@ -3278,11 +3369,9 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3278 3369
3279 sky2->port = port; 3370 sky2->port = port;
3280 3371
3281 if (hw->chip_id != CHIP_ID_YUKON_EC_U) 3372 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
3282 dev->features |= NETIF_F_TSO;
3283 if (highmem) 3373 if (highmem)
3284 dev->features |= NETIF_F_HIGHDMA; 3374 dev->features |= NETIF_F_HIGHDMA;
3285 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3286 3375
3287#ifdef SKY2_VLAN_TAG_USED 3376#ifdef SKY2_VLAN_TAG_USED
3288 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3377 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -3343,8 +3432,7 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3343 3432
3344 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 3433 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
3345 if (err) { 3434 if (err) {
3346 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3435 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3347 pci_name(pdev), pdev->irq);
3348 return err; 3436 return err;
3349 } 3437 }
3350 3438
@@ -3355,9 +3443,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3355 3443
3356 if (!hw->msi) { 3444 if (!hw->msi) {
3357 /* MSI test failed, go back to INTx mode */ 3445 /* MSI test failed, go back to INTx mode */
3358 printk(KERN_INFO PFX "%s: No interrupt generated using MSI, " 3446 dev_info(&pdev->dev, "No interrupt generated using MSI, "
3359 "switching to INTx mode.\n", 3447 "switching to INTx mode.\n");
3360 pci_name(pdev));
3361 3448
3362 err = -EOPNOTSUPP; 3449 err = -EOPNOTSUPP;
3363 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); 3450 sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
@@ -3371,62 +3458,62 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3371 return err; 3458 return err;
3372} 3459}
3373 3460
3461static int __devinit pci_wake_enabled(struct pci_dev *dev)
3462{
3463 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3464 u16 value;
3465
3466 if (!pm)
3467 return 0;
3468 if (pci_read_config_word(dev, pm + PCI_PM_CTRL, &value))
3469 return 0;
3470 return value & PCI_PM_CTRL_PME_ENABLE;
3471}
3472
3374static int __devinit sky2_probe(struct pci_dev *pdev, 3473static int __devinit sky2_probe(struct pci_dev *pdev,
3375 const struct pci_device_id *ent) 3474 const struct pci_device_id *ent)
3376{ 3475{
3377 struct net_device *dev, *dev1 = NULL; 3476 struct net_device *dev;
3378 struct sky2_hw *hw; 3477 struct sky2_hw *hw;
3379 int err, pm_cap, using_dac = 0; 3478 int err, using_dac = 0, wol_default;
3380 3479
3381 err = pci_enable_device(pdev); 3480 err = pci_enable_device(pdev);
3382 if (err) { 3481 if (err) {
3383 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3482 dev_err(&pdev->dev, "cannot enable PCI device\n");
3384 pci_name(pdev));
3385 goto err_out; 3483 goto err_out;
3386 } 3484 }
3387 3485
3388 err = pci_request_regions(pdev, DRV_NAME); 3486 err = pci_request_regions(pdev, DRV_NAME);
3389 if (err) { 3487 if (err) {
3390 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3488 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
3391 pci_name(pdev));
3392 goto err_out; 3489 goto err_out;
3393 } 3490 }
3394 3491
3395 pci_set_master(pdev); 3492 pci_set_master(pdev);
3396 3493
3397 /* Find power-management capability. */
3398 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3399 if (pm_cap == 0) {
3400 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
3401 "aborting.\n");
3402 err = -EIO;
3403 goto err_out_free_regions;
3404 }
3405
3406 if (sizeof(dma_addr_t) > sizeof(u32) && 3494 if (sizeof(dma_addr_t) > sizeof(u32) &&
3407 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { 3495 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3408 using_dac = 1; 3496 using_dac = 1;
3409 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3497 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3410 if (err < 0) { 3498 if (err < 0) {
3411 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " 3499 dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
3412 "for consistent allocations\n", pci_name(pdev)); 3500 "for consistent allocations\n");
3413 goto err_out_free_regions; 3501 goto err_out_free_regions;
3414 } 3502 }
3415
3416 } else { 3503 } else {
3417 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3504 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3418 if (err) { 3505 if (err) {
3419 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3506 dev_err(&pdev->dev, "no usable DMA configuration\n");
3420 pci_name(pdev));
3421 goto err_out_free_regions; 3507 goto err_out_free_regions;
3422 } 3508 }
3423 } 3509 }
3424 3510
3511 wol_default = pci_wake_enabled(pdev) ? WAKE_MAGIC : 0;
3512
3425 err = -ENOMEM; 3513 err = -ENOMEM;
3426 hw = kzalloc(sizeof(*hw), GFP_KERNEL); 3514 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
3427 if (!hw) { 3515 if (!hw) {
3428 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", 3516 dev_err(&pdev->dev, "cannot allocate hardware struct\n");
3429 pci_name(pdev));
3430 goto err_out_free_regions; 3517 goto err_out_free_regions;
3431 } 3518 }
3432 3519
@@ -3434,11 +3521,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3434 3521
3435 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3522 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3436 if (!hw->regs) { 3523 if (!hw->regs) {
3437 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3524 dev_err(&pdev->dev, "cannot map device registers\n");
3438 pci_name(pdev));
3439 goto err_out_free_hw; 3525 goto err_out_free_hw;
3440 } 3526 }
3441 hw->pm_cap = pm_cap;
3442 3527
3443#ifdef __BIG_ENDIAN 3528#ifdef __BIG_ENDIAN
3444 /* The sk98lin vendor driver uses hardware byte swapping but 3529 /* The sk98lin vendor driver uses hardware byte swapping but
@@ -3458,18 +3543,22 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3458 if (!hw->st_le) 3543 if (!hw->st_le)
3459 goto err_out_iounmap; 3544 goto err_out_iounmap;
3460 3545
3461 err = sky2_reset(hw); 3546 err = sky2_init(hw);
3462 if (err) 3547 if (err)
3463 goto err_out_iounmap; 3548 goto err_out_iounmap;
3464 3549
3465 printk(KERN_INFO PFX "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n", 3550 dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-%s (0x%x) rev %d\n",
3466 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), 3551 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0),
3467 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], 3552 pdev->irq, yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
3468 hw->chip_id, hw->chip_rev); 3553 hw->chip_id, hw->chip_rev);
3469 3554
3470 dev = sky2_init_netdev(hw, 0, using_dac); 3555 sky2_reset(hw);
3471 if (!dev) 3556
3557 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
3558 if (!dev) {
3559 err = -ENOMEM;
3472 goto err_out_free_pci; 3560 goto err_out_free_pci;
3561 }
3473 3562
3474 if (!disable_msi && pci_enable_msi(pdev) == 0) { 3563 if (!disable_msi && pci_enable_msi(pdev) == 0) {
3475 err = sky2_test_msi(hw); 3564 err = sky2_test_msi(hw);
@@ -3481,32 +3570,33 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
3481 3570
3482 err = register_netdev(dev); 3571 err = register_netdev(dev);
3483 if (err) { 3572 if (err) {
3484 printk(KERN_ERR PFX "%s: cannot register net device\n", 3573 dev_err(&pdev->dev, "cannot register net device\n");
3485 pci_name(pdev));
3486 goto err_out_free_netdev; 3574 goto err_out_free_netdev;
3487 } 3575 }
3488 3576
3489 err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED, 3577 err = request_irq(pdev->irq, sky2_intr, hw->msi ? 0 : IRQF_SHARED,
3490 dev->name, hw); 3578 dev->name, hw);
3491 if (err) { 3579 if (err) {
3492 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3580 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
3493 pci_name(pdev), pdev->irq);
3494 goto err_out_unregister; 3581 goto err_out_unregister;
3495 } 3582 }
3496 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3583 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3497 3584
3498 sky2_show_addr(dev); 3585 sky2_show_addr(dev);
3499 3586
3500 if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) { 3587 if (hw->ports > 1) {
3501 if (register_netdev(dev1) == 0) 3588 struct net_device *dev1;
3502 sky2_show_addr(dev1); 3589
3503 else { 3590 dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
3504 /* Failure to register second port need not be fatal */ 3591 if (!dev1)
3505 printk(KERN_WARNING PFX 3592 dev_warn(&pdev->dev, "allocation for second device failed\n");
3506 "register of second port failed\n"); 3593 else if ((err = register_netdev(dev1))) {
3594 dev_warn(&pdev->dev,
3595 "register of second port failed (%d)\n", err);
3507 hw->dev[1] = NULL; 3596 hw->dev[1] = NULL;
3508 free_netdev(dev1); 3597 free_netdev(dev1);
3509 } 3598 } else
3599 sky2_show_addr(dev1);
3510 } 3600 }
3511 3601
3512 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); 3602 setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
@@ -3555,7 +3645,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3555 unregister_netdev(dev1); 3645 unregister_netdev(dev1);
3556 unregister_netdev(dev0); 3646 unregister_netdev(dev0);
3557 3647
3558 sky2_set_power_state(hw, PCI_D3hot); 3648 sky2_power_aux(hw);
3649
3559 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 3650 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
3560 sky2_write8(hw, B0_CTST, CS_RST_SET); 3651 sky2_write8(hw, B0_CTST, CS_RST_SET);
3561 sky2_read8(hw, B0_CTST); 3652 sky2_read8(hw, B0_CTST);
@@ -3580,27 +3671,31 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
3580static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 3671static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
3581{ 3672{
3582 struct sky2_hw *hw = pci_get_drvdata(pdev); 3673 struct sky2_hw *hw = pci_get_drvdata(pdev);
3583 int i; 3674 int i, wol = 0;
3584 pci_power_t pstate = pci_choose_state(pdev, state);
3585
3586 if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
3587 return -EINVAL;
3588 3675
3589 del_timer_sync(&hw->idle_timer); 3676 del_timer_sync(&hw->idle_timer);
3590 netif_poll_disable(hw->dev[0]); 3677 netif_poll_disable(hw->dev[0]);
3591 3678
3592 for (i = 0; i < hw->ports; i++) { 3679 for (i = 0; i < hw->ports; i++) {
3593 struct net_device *dev = hw->dev[i]; 3680 struct net_device *dev = hw->dev[i];
3681 struct sky2_port *sky2 = netdev_priv(dev);
3594 3682
3595 if (netif_running(dev)) { 3683 if (netif_running(dev))
3596 sky2_down(dev); 3684 sky2_down(dev);
3597 netif_device_detach(dev); 3685
3598 } 3686 if (sky2->wol)
3687 sky2_wol_init(sky2);
3688
3689 wol |= sky2->wol;
3599 } 3690 }
3600 3691
3601 sky2_write32(hw, B0_IMSK, 0); 3692 sky2_write32(hw, B0_IMSK, 0);
3693 sky2_power_aux(hw);
3694
3602 pci_save_state(pdev); 3695 pci_save_state(pdev);
3603 sky2_set_power_state(hw, pstate); 3696 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3697 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3698
3604 return 0; 3699 return 0;
3605} 3700}
3606 3701
@@ -3609,21 +3704,22 @@ static int sky2_resume(struct pci_dev *pdev)
3609 struct sky2_hw *hw = pci_get_drvdata(pdev); 3704 struct sky2_hw *hw = pci_get_drvdata(pdev);
3610 int i, err; 3705 int i, err;
3611 3706
3612 pci_restore_state(pdev); 3707 err = pci_set_power_state(pdev, PCI_D0);
3613 pci_enable_wake(pdev, PCI_D0, 0); 3708 if (err)
3614 sky2_set_power_state(hw, PCI_D0); 3709 goto out;
3615 3710
3616 err = sky2_reset(hw); 3711 err = pci_restore_state(pdev);
3617 if (err) 3712 if (err)
3618 goto out; 3713 goto out;
3619 3714
3715 pci_enable_wake(pdev, PCI_D0, 0);
3716 sky2_reset(hw);
3717
3620 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 3718 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3621 3719
3622 for (i = 0; i < hw->ports; i++) { 3720 for (i = 0; i < hw->ports; i++) {
3623 struct net_device *dev = hw->dev[i]; 3721 struct net_device *dev = hw->dev[i];
3624 if (netif_running(dev)) { 3722 if (netif_running(dev)) {
3625 netif_device_attach(dev);
3626
3627 err = sky2_up(dev); 3723 err = sky2_up(dev);
3628 if (err) { 3724 if (err) {
3629 printk(KERN_ERR PFX "%s: could not up: %d\n", 3725 printk(KERN_ERR PFX "%s: could not up: %d\n",
@@ -3636,11 +3732,43 @@ static int sky2_resume(struct pci_dev *pdev)
3636 3732
3637 netif_poll_enable(hw->dev[0]); 3733 netif_poll_enable(hw->dev[0]);
3638 sky2_idle_start(hw); 3734 sky2_idle_start(hw);
3735 return 0;
3639out: 3736out:
3737 dev_err(&pdev->dev, "resume failed (%d)\n", err);
3738 pci_disable_device(pdev);
3640 return err; 3739 return err;
3641} 3740}
3642#endif 3741#endif
3643 3742
3743static void sky2_shutdown(struct pci_dev *pdev)
3744{
3745 struct sky2_hw *hw = pci_get_drvdata(pdev);
3746 int i, wol = 0;
3747
3748 del_timer_sync(&hw->idle_timer);
3749 netif_poll_disable(hw->dev[0]);
3750
3751 for (i = 0; i < hw->ports; i++) {
3752 struct net_device *dev = hw->dev[i];
3753 struct sky2_port *sky2 = netdev_priv(dev);
3754
3755 if (sky2->wol) {
3756 wol = 1;
3757 sky2_wol_init(sky2);
3758 }
3759 }
3760
3761 if (wol)
3762 sky2_power_aux(hw);
3763
3764 pci_enable_wake(pdev, PCI_D3hot, wol);
3765 pci_enable_wake(pdev, PCI_D3cold, wol);
3766
3767 pci_disable_device(pdev);
3768 pci_set_power_state(pdev, PCI_D3hot);
3769
3770}
3771
3644static struct pci_driver sky2_driver = { 3772static struct pci_driver sky2_driver = {
3645 .name = DRV_NAME, 3773 .name = DRV_NAME,
3646 .id_table = sky2_id_table, 3774 .id_table = sky2_id_table,
@@ -3650,6 +3778,7 @@ static struct pci_driver sky2_driver = {
3650 .suspend = sky2_suspend, 3778 .suspend = sky2_suspend,
3651 .resume = sky2_resume, 3779 .resume = sky2_resume,
3652#endif 3780#endif
3781 .shutdown = sky2_shutdown,
3653}; 3782};
3654 3783
3655static int __init sky2_init_module(void) 3784static int __init sky2_init_module(void)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 6ed1d47dbbd3..3b0189569d52 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -32,6 +32,7 @@ enum pci_dev_reg_1 {
32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ 32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ 33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
34 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */ 34 PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
35 PCI_Y2_PME_LEGACY= 1<<15, /* PCI Express legacy power management mode */
35}; 36};
36 37
37enum pci_dev_reg_2 { 38enum pci_dev_reg_2 {
@@ -370,12 +371,9 @@ enum {
370 371
371/* B2_CHIP_ID 8 bit Chip Identification Number */ 372/* B2_CHIP_ID 8 bit Chip Identification Number */
372enum { 373enum {
373 CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */
374 CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */
375 CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
376 CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */
377 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ 374 CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */
378 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */ 375 CHIP_ID_YUKON_EC_U = 0xb4, /* Chip ID for YUKON-2 EC Ultra */
376 CHIP_ID_YUKON_EX = 0xb5, /* Chip ID for YUKON-2 Extreme */
379 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ 377 CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */
380 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ 378 CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */
381 379
@@ -767,6 +765,24 @@ enum {
767 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */ 765 POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit Poll. List Start Addr (high) */
768}; 766};
769 767
768enum {
769 SMB_CFG = 0x0e40, /* 32 bit SMBus Config Register */
770 SMB_CSR = 0x0e44, /* 32 bit SMBus Control/Status Register */
771};
772
773enum {
774 CPU_WDOG = 0x0e48, /* 32 bit Watchdog Register */
775 CPU_CNTR = 0x0e4C, /* 32 bit Counter Register */
776 CPU_TIM = 0x0e50,/* 32 bit Timer Compare Register */
777 CPU_AHB_ADDR = 0x0e54, /* 32 bit CPU AHB Debug Register */
778 CPU_AHB_WDATA = 0x0e58, /* 32 bit CPU AHB Debug Register */
779 CPU_AHB_RDATA = 0x0e5C, /* 32 bit CPU AHB Debug Register */
780 HCU_MAP_BASE = 0x0e60, /* 32 bit Reset Mapping Base */
781 CPU_AHB_CTRL = 0x0e64, /* 32 bit CPU AHB Debug Register */
782 HCU_CCSR = 0x0e68, /* 32 bit CPU Control and Status Register */
783 HCU_HCSR = 0x0e6C, /* 32 bit Host Control and Status Register */
784};
785
770/* ASF Subsystem Registers (Yukon-2 only) */ 786/* ASF Subsystem Registers (Yukon-2 only) */
771enum { 787enum {
772 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */ 788 B28_Y2_SMB_CONFIG = 0x0e40,/* 32 bit ASF SMBus Config Register */
@@ -837,33 +853,27 @@ enum {
837 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ 853 GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */
838 854
839/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ 855/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
840
841 WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */
842
843 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ 856 WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */
844 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ 857 WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */
845 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ 858 WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */
846 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ 859 WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */
847 WOL_PATT_PME = 0x0f2a,/* 8 bit WOL PME Match Enable (Yukon-2) */
848 WOL_PATT_ASFM = 0x0f2b,/* 8 bit WOL ASF Match Enable (Yukon-2) */
849 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ 860 WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */
850 861
851/* WOL Pattern Length Registers (YUKON only) */ 862/* WOL Pattern Length Registers (YUKON only) */
852
853 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ 863 WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */
854 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ 864 WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */
855 865
856/* WOL Pattern Counter Registers (YUKON only) */ 866/* WOL Pattern Counter Registers (YUKON only) */
857
858
859 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ 867 WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */
860 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ 868 WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */
861}; 869};
870#define WOL_REGS(port, x) (x + (port)*0x80)
862 871
863enum { 872enum {
864 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ 873 WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */
865 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ 874 WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */
866}; 875};
876#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400)
867 877
868enum { 878enum {
869 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ 879 BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */
@@ -1654,6 +1664,39 @@ enum {
1654 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */ 1664 Y2_ASF_CLR_ASFI = 1<<1, /* Clear host IRQ */
1655 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */ 1665 Y2_ASF_HOST_IRQ = 1<<0, /* Issue an IRQ to HOST system */
1656}; 1666};
1667/* HCU_CCSR CPU Control and Status Register */
1668enum {
1669 HCU_CCSR_SMBALERT_MONITOR= 1<<27, /* SMBALERT pin monitor */
1670 HCU_CCSR_CPU_SLEEP = 1<<26, /* CPU sleep status */
1671 /* Clock Stretching Timeout */
1672 HCU_CCSR_CS_TO = 1<<25,
1673 HCU_CCSR_WDOG = 1<<24, /* Watchdog Reset */
1674
1675 HCU_CCSR_CLR_IRQ_HOST = 1<<17, /* Clear IRQ_HOST */
1676 HCU_CCSR_SET_IRQ_HCU = 1<<16, /* Set IRQ_HCU */
1677
1678 HCU_CCSR_AHB_RST = 1<<9, /* Reset AHB bridge */
1679 HCU_CCSR_CPU_RST_MODE = 1<<8, /* CPU Reset Mode */
1680
1681 HCU_CCSR_SET_SYNC_CPU = 1<<5,
1682 HCU_CCSR_CPU_CLK_DIVIDE_MSK = 3<<3,/* CPU Clock Divide */
1683 HCU_CCSR_CPU_CLK_DIVIDE_BASE= 1<<3,
1684 HCU_CCSR_OS_PRSNT = 1<<2, /* ASF OS Present */
1685/* Microcontroller State */
1686 HCU_CCSR_UC_STATE_MSK = 3,
1687 HCU_CCSR_UC_STATE_BASE = 1<<0,
1688 HCU_CCSR_ASF_RESET = 0,
1689 HCU_CCSR_ASF_HALTED = 1<<1,
1690 HCU_CCSR_ASF_RUNNING = 1<<0,
1691};
1692
1693/* HCU_HCSR Host Control and Status Register */
1694enum {
1695 HCU_HCSR_SET_IRQ_CPU = 1<<16, /* Set IRQ_CPU */
1696
1697 HCU_HCSR_CLR_IRQ_HCU = 1<<1, /* Clear IRQ_HCU */
1698 HCU_HCSR_SET_IRQ_HOST = 1<<0, /* Set IRQ_HOST */
1699};
1657 1700
1658/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */ 1701/* STAT_CTRL 32 bit Status BMU control register (Yukon-2 only) */
1659enum { 1702enum {
@@ -1715,14 +1758,17 @@ enum {
1715 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 1758 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
1716 1759
1717#define GMAC_DEF_MSK GM_IS_TX_FF_UR 1760#define GMAC_DEF_MSK GM_IS_TX_FF_UR
1761};
1718 1762
1719/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 1763/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1720 /* Bits 15.. 2: reserved */ 1764enum { /* Bits 15.. 2: reserved */
1721 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ 1765 GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */
1722 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ 1766 GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */
1767};
1723 1768
1724 1769
1725/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ 1770/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1771enum {
1726 WOL_CTL_LINK_CHG_OCC = 1<<15, 1772 WOL_CTL_LINK_CHG_OCC = 1<<15,
1727 WOL_CTL_MAGIC_PKT_OCC = 1<<14, 1773 WOL_CTL_MAGIC_PKT_OCC = 1<<14,
1728 WOL_CTL_PATTERN_OCC = 1<<13, 1774 WOL_CTL_PATTERN_OCC = 1<<13,
@@ -1741,17 +1787,6 @@ enum {
1741 WOL_CTL_DIS_PATTERN_UNIT = 1<<0, 1787 WOL_CTL_DIS_PATTERN_UNIT = 1<<0,
1742}; 1788};
1743 1789
1744#define WOL_CTL_DEFAULT \
1745 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1746 WOL_CTL_DIS_PME_ON_PATTERN | \
1747 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1748 WOL_CTL_DIS_LINK_CHG_UNIT | \
1749 WOL_CTL_DIS_PATTERN_UNIT | \
1750 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1751
1752/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1753#define WOL_CTL_PATT_ENA(x) (1 << (x))
1754
1755 1790
1756/* Control flags */ 1791/* Control flags */
1757enum { 1792enum {
@@ -1875,6 +1910,7 @@ struct sky2_port {
1875 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ 1910 u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
1876 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 1911 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
1877 u8 rx_csum; 1912 u8 rx_csum;
1913 u8 wol;
1878 enum flow_control flow_mode; 1914 enum flow_control flow_mode;
1879 enum flow_control flow_status; 1915 enum flow_control flow_status;
1880 1916
@@ -1887,7 +1923,6 @@ struct sky2_hw {
1887 struct pci_dev *pdev; 1923 struct pci_dev *pdev;
1888 struct net_device *dev[2]; 1924 struct net_device *dev[2];
1889 1925
1890 int pm_cap;
1891 u8 chip_id; 1926 u8 chip_id;
1892 u8 chip_rev; 1927 u8 chip_rev;
1893 u8 pmd_type; 1928 u8 pmd_type;
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index a0806d262fc6..2f4b1de7a2b4 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -1343,15 +1343,12 @@ static int __init slip_init(void)
1343 printk(KERN_INFO "SLIP linefill/keepalive option.\n"); 1343 printk(KERN_INFO "SLIP linefill/keepalive option.\n");
1344#endif 1344#endif
1345 1345
1346 slip_devs = kmalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL); 1346 slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
1347 if (!slip_devs) { 1347 if (!slip_devs) {
1348 printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n"); 1348 printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n");
1349 return -ENOMEM; 1349 return -ENOMEM;
1350 } 1350 }
1351 1351
1352 /* Clear the pointer array, we allocate devices when we need them */
1353 memset(slip_devs, 0, sizeof(struct net_device *)*slip_maxdev);
1354
1355 /* Fill in our line protocol discipline, and register it */ 1352 /* Fill in our line protocol discipline, and register it */
1356 if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) { 1353 if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) {
1357 printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); 1354 printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index 7122932eac90..ae1ae343beed 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -482,8 +482,7 @@ static void ultramca_block_input(struct net_device *dev, int count, struct sk_bu
482 count -= semi_count; 482 count -= semi_count;
483 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); 483 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
484 } else { 484 } else {
485 /* Packet is in one chunk -- we can copy + cksum. */ 485 memcpy_fromio(skb->data, xfer_start, count);
486 eth_io_copy_and_sum(skb, xfer_start, count, 0);
487 } 486 }
488 487
489} 488}
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index d70bc9795346..a52b22d7db65 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -454,8 +454,7 @@ ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ri
454 count -= semi_count; 454 count -= semi_count;
455 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); 455 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
456 } else { 456 } else {
457 /* Packet is in one chunk -- we can copy + cksum. */ 457 memcpy_fromio(skb->data, xfer_start, count);
458 eth_io_copy_and_sum(skb, xfer_start, count, 0);
459 } 458 }
460 459
461 outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ 460 outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index 2c5319c62fa5..88a30e56c64c 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -395,8 +395,7 @@ static void ultra32_block_input(struct net_device *dev,
395 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); 395 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
396 } 396 }
397 } else { 397 } else {
398 /* Packet is in one chunk -- we can copy + cksum. */ 398 memcpy_fromio(skb->data, xfer_start, count);
399 eth_io_copy_and_sum(skb, xfer_start, count, 0);
400 } 399 }
401} 400}
402 401
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 880d9fdd7c67..c95614131980 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -968,11 +968,11 @@ static void smc911x_phy_configure(struct work_struct *work)
968 * We should not be called if phy_type is zero. 968 * We should not be called if phy_type is zero.
969 */ 969 */
970 if (lp->phy_type == 0) 970 if (lp->phy_type == 0)
971 goto smc911x_phy_configure_exit; 971 goto smc911x_phy_configure_exit_nolock;
972 972
973 if (smc911x_phy_reset(dev, phyaddr)) { 973 if (smc911x_phy_reset(dev, phyaddr)) {
974 printk("%s: PHY reset timed out\n", dev->name); 974 printk("%s: PHY reset timed out\n", dev->name);
975 goto smc911x_phy_configure_exit; 975 goto smc911x_phy_configure_exit_nolock;
976 } 976 }
977 spin_lock_irqsave(&lp->lock, flags); 977 spin_lock_irqsave(&lp->lock, flags);
978 978
@@ -1041,6 +1041,7 @@ static void smc911x_phy_configure(struct work_struct *work)
1041 1041
1042smc911x_phy_configure_exit: 1042smc911x_phy_configure_exit:
1043 spin_unlock_irqrestore(&lp->lock, flags); 1043 spin_unlock_irqrestore(&lp->lock, flags);
1044smc911x_phy_configure_exit_nolock:
1044 lp->work_pending = 0; 1045 lp->work_pending = 0;
1045} 1046}
1046 1047
@@ -1658,7 +1659,7 @@ smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1658{ 1659{
1659 strncpy(info->driver, CARDNAME, sizeof(info->driver)); 1660 strncpy(info->driver, CARDNAME, sizeof(info->driver));
1660 strncpy(info->version, version, sizeof(info->version)); 1661 strncpy(info->version, version, sizeof(info->version));
1661 strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); 1662 strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
1662} 1663}
1663 1664
1664static int smc911x_ethtool_nwayreset(struct net_device *dev) 1665static int smc911x_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index e62a9586fb95..49f4b7712ebf 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1712,7 +1712,7 @@ smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1712{ 1712{
1713 strncpy(info->driver, CARDNAME, sizeof(info->driver)); 1713 strncpy(info->driver, CARDNAME, sizeof(info->driver));
1714 strncpy(info->version, version, sizeof(info->version)); 1714 strncpy(info->version, version, sizeof(info->version));
1715 strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); 1715 strncpy(info->bus_info, dev->dev.parent->bus_id, sizeof(info->bus_info));
1716} 1716}
1717 1717
1718static int smc_ethtool_nwayreset(struct net_device *dev) 1718static int smc_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index ebb6aa39f9c7..64ed8ff5b03a 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -280,72 +280,67 @@ spider_net_free_chain(struct spider_net_card *card,
280{ 280{
281 struct spider_net_descr *descr; 281 struct spider_net_descr *descr;
282 282
283 for (descr = chain->tail; !descr->bus_addr; descr = descr->next) { 283 descr = chain->ring;
284 pci_unmap_single(card->pdev, descr->bus_addr, 284 do {
285 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
286 descr->bus_addr = 0; 285 descr->bus_addr = 0;
287 } 286 descr->next_descr_addr = 0;
287 descr = descr->next;
288 } while (descr != chain->ring);
289
290 dma_free_coherent(&card->pdev->dev, chain->num_desc,
291 chain->ring, chain->dma_addr);
288} 292}
289 293
290/** 294/**
291 * spider_net_init_chain - links descriptor chain 295 * spider_net_init_chain - alloc and link descriptor chain
292 * @card: card structure 296 * @card: card structure
293 * @chain: address of chain 297 * @chain: address of chain
294 * @start_descr: address of descriptor array
295 * @no: number of descriptors
296 * 298 *
297 * we manage a circular list that mirrors the hardware structure, 299 * We manage a circular list that mirrors the hardware structure,
298 * except that the hardware uses bus addresses. 300 * except that the hardware uses bus addresses.
299 * 301 *
300 * returns 0 on success, <0 on failure 302 * Returns 0 on success, <0 on failure
301 */ 303 */
302static int 304static int
303spider_net_init_chain(struct spider_net_card *card, 305spider_net_init_chain(struct spider_net_card *card,
304 struct spider_net_descr_chain *chain, 306 struct spider_net_descr_chain *chain)
305 struct spider_net_descr *start_descr,
306 int no)
307{ 307{
308 int i; 308 int i;
309 struct spider_net_descr *descr; 309 struct spider_net_descr *descr;
310 dma_addr_t buf; 310 dma_addr_t buf;
311 size_t alloc_size;
311 312
312 descr = start_descr; 313 alloc_size = chain->num_desc * sizeof (struct spider_net_descr);
313 memset(descr, 0, sizeof(*descr) * no);
314 314
315 /* set up the hardware pointers in each descriptor */ 315 chain->ring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
316 for (i=0; i<no; i++, descr++) { 316 &chain->dma_addr, GFP_KERNEL);
317 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 317
318 if (!chain->ring)
319 return -ENOMEM;
318 320
319 buf = pci_map_single(card->pdev, descr, 321 descr = chain->ring;
320 SPIDER_NET_DESCR_SIZE, 322 memset(descr, 0, alloc_size);
321 PCI_DMA_BIDIRECTIONAL);
322 323
323 if (pci_dma_mapping_error(buf)) 324 /* Set up the hardware pointers in each descriptor */
324 goto iommu_error; 325 buf = chain->dma_addr;
326 for (i=0; i < chain->num_desc; i++, descr++) {
327 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
325 328
326 descr->bus_addr = buf; 329 descr->bus_addr = buf;
330 descr->next_descr_addr = 0;
327 descr->next = descr + 1; 331 descr->next = descr + 1;
328 descr->prev = descr - 1; 332 descr->prev = descr - 1;
329 333
334 buf += sizeof(struct spider_net_descr);
330 } 335 }
331 /* do actual circular list */ 336 /* do actual circular list */
332 (descr-1)->next = start_descr; 337 (descr-1)->next = chain->ring;
333 start_descr->prev = descr-1; 338 chain->ring->prev = descr-1;
334 339
335 spin_lock_init(&chain->lock); 340 spin_lock_init(&chain->lock);
336 chain->head = start_descr; 341 chain->head = chain->ring;
337 chain->tail = start_descr; 342 chain->tail = chain->ring;
338
339 return 0; 343 return 0;
340
341iommu_error:
342 descr = start_descr;
343 for (i=0; i < no; i++, descr++)
344 if (descr->bus_addr)
345 pci_unmap_single(card->pdev, descr->bus_addr,
346 SPIDER_NET_DESCR_SIZE,
347 PCI_DMA_BIDIRECTIONAL);
348 return -ENOMEM;
349} 344}
350 345
351/** 346/**
@@ -372,21 +367,20 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
372} 367}
373 368
374/** 369/**
375 * spider_net_prepare_rx_descr - reinitializes a rx descriptor 370 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
376 * @card: card structure 371 * @card: card structure
377 * @descr: descriptor to re-init 372 * @descr: descriptor to re-init
378 * 373 *
379 * return 0 on succes, <0 on failure 374 * Return 0 on succes, <0 on failure.
380 * 375 *
381 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. 376 * Allocates a new rx skb, iommu-maps it and attaches it to the
382 * Activate the descriptor state-wise 377 * descriptor. Mark the descriptor as activated, ready-to-use.
383 */ 378 */
384static int 379static int
385spider_net_prepare_rx_descr(struct spider_net_card *card, 380spider_net_prepare_rx_descr(struct spider_net_card *card,
386 struct spider_net_descr *descr) 381 struct spider_net_descr *descr)
387{ 382{
388 dma_addr_t buf; 383 dma_addr_t buf;
389 int error = 0;
390 int offset; 384 int offset;
391 int bufsize; 385 int bufsize;
392 386
@@ -414,7 +408,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
414 (SPIDER_NET_RXBUF_ALIGN - 1); 408 (SPIDER_NET_RXBUF_ALIGN - 1);
415 if (offset) 409 if (offset)
416 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 410 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
417 /* io-mmu-map the skb */ 411 /* iommu-map the skb */
418 buf = pci_map_single(card->pdev, descr->skb->data, 412 buf = pci_map_single(card->pdev, descr->skb->data,
419 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 413 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
420 descr->buf_addr = buf; 414 descr->buf_addr = buf;
@@ -425,11 +419,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
425 card->spider_stats.rx_iommu_map_error++; 419 card->spider_stats.rx_iommu_map_error++;
426 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 420 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
427 } else { 421 } else {
422 descr->next_descr_addr = 0;
423 wmb();
428 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | 424 descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
429 SPIDER_NET_DMAC_NOINTR_COMPLETE; 425 SPIDER_NET_DMAC_NOINTR_COMPLETE;
426
427 wmb();
428 descr->prev->next_descr_addr = descr->bus_addr;
430 } 429 }
431 430
432 return error; 431 return 0;
433} 432}
434 433
435/** 434/**
@@ -493,10 +492,10 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
493} 492}
494 493
495/** 494/**
496 * spider_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 495 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
497 * @card: card structure 496 * @card: card structure
498 * 497 *
499 * returns 0 on success, <0 on failure 498 * Returns 0 on success, <0 on failure.
500 */ 499 */
501static int 500static int
502spider_net_alloc_rx_skbs(struct spider_net_card *card) 501spider_net_alloc_rx_skbs(struct spider_net_card *card)
@@ -507,16 +506,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
507 result = -ENOMEM; 506 result = -ENOMEM;
508 507
509 chain = &card->rx_chain; 508 chain = &card->rx_chain;
510 /* put at least one buffer into the chain. if this fails, 509 /* Put at least one buffer into the chain. if this fails,
511 * we've got a problem. if not, spider_net_refill_rx_chain 510 * we've got a problem. If not, spider_net_refill_rx_chain
512 * will do the rest at the end of this function */ 511 * will do the rest at the end of this function. */
513 if (spider_net_prepare_rx_descr(card, chain->head)) 512 if (spider_net_prepare_rx_descr(card, chain->head))
514 goto error; 513 goto error;
515 else 514 else
516 chain->head = chain->head->next; 515 chain->head = chain->head->next;
517 516
518 /* this will allocate the rest of the rx buffers; if not, it's 517 /* This will allocate the rest of the rx buffers;
519 * business as usual later on */ 518 * if not, it's business as usual later on. */
520 spider_net_refill_rx_chain(card); 519 spider_net_refill_rx_chain(card);
521 spider_net_enable_rxdmac(card); 520 spider_net_enable_rxdmac(card);
522 return 0; 521 return 0;
@@ -707,7 +706,7 @@ spider_net_set_low_watermark(struct spider_net_card *card)
707 } 706 }
708 707
709 /* If TX queue is short, don't even bother with interrupts */ 708 /* If TX queue is short, don't even bother with interrupts */
710 if (cnt < card->num_tx_desc/4) 709 if (cnt < card->tx_chain.num_desc/4)
711 return cnt; 710 return cnt;
712 711
713 /* Set low-watermark 3/4th's of the way into the queue. */ 712 /* Set low-watermark 3/4th's of the way into the queue. */
@@ -915,16 +914,13 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
915 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 914 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
916 * @descr: descriptor to process 915 * @descr: descriptor to process
917 * @card: card structure 916 * @card: card structure
918 * @napi: whether caller is in NAPI context
919 *
920 * returns 1 on success, 0 if no packet was passed to the stack
921 * 917 *
922 * iommu-unmaps the skb, fills out skb structure and passes the data to the 918 * Fills out skb structure and passes the data to the stack.
923 * stack. The descriptor state is not changed. 919 * The descriptor state is not changed.
924 */ 920 */
925static int 921static void
926spider_net_pass_skb_up(struct spider_net_descr *descr, 922spider_net_pass_skb_up(struct spider_net_descr *descr,
927 struct spider_net_card *card, int napi) 923 struct spider_net_card *card)
928{ 924{
929 struct sk_buff *skb; 925 struct sk_buff *skb;
930 struct net_device *netdev; 926 struct net_device *netdev;
@@ -932,23 +928,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
932 928
933 data_status = descr->data_status; 929 data_status = descr->data_status;
934 data_error = descr->data_error; 930 data_error = descr->data_error;
935
936 netdev = card->netdev; 931 netdev = card->netdev;
937 932
938 /* unmap descriptor */
939 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
940 PCI_DMA_FROMDEVICE);
941
942 /* the cases we'll throw away the packet immediately */
943 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
944 if (netif_msg_rx_err(card))
945 pr_err("error in received descriptor found, "
946 "data_status=x%08x, data_error=x%08x\n",
947 data_status, data_error);
948 card->spider_stats.rx_desc_error++;
949 return 0;
950 }
951
952 skb = descr->skb; 933 skb = descr->skb;
953 skb->dev = netdev; 934 skb->dev = netdev;
954 skb_put(skb, descr->valid_size); 935 skb_put(skb, descr->valid_size);
@@ -977,57 +958,72 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
977 } 958 }
978 959
979 /* pass skb up to stack */ 960 /* pass skb up to stack */
980 if (napi) 961 netif_receive_skb(skb);
981 netif_receive_skb(skb);
982 else
983 netif_rx_ni(skb);
984 962
985 /* update netdevice statistics */ 963 /* update netdevice statistics */
986 card->netdev_stats.rx_packets++; 964 card->netdev_stats.rx_packets++;
987 card->netdev_stats.rx_bytes += skb->len; 965 card->netdev_stats.rx_bytes += skb->len;
966}
988 967
989 return 1; 968#ifdef DEBUG
969static void show_rx_chain(struct spider_net_card *card)
970{
971 struct spider_net_descr_chain *chain = &card->rx_chain;
972 struct spider_net_descr *start= chain->tail;
973 struct spider_net_descr *descr= start;
974 int status;
975
976 int cnt = 0;
977 int cstat = spider_net_get_descr_status(descr);
978 printk(KERN_INFO "RX chain tail at descr=%ld\n",
979 (start - card->descr) - card->tx_chain.num_desc);
980 status = cstat;
981 do
982 {
983 status = spider_net_get_descr_status(descr);
984 if (cstat != status) {
985 printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
986 cstat = status;
987 cnt = 0;
988 }
989 cnt ++;
990 descr = descr->next;
991 } while (descr != start);
992 printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
990} 993}
994#endif
991 995
992/** 996/**
993 * spider_net_decode_one_descr - processes an rx descriptor 997 * spider_net_decode_one_descr - processes an rx descriptor
994 * @card: card structure 998 * @card: card structure
995 * @napi: whether caller is in NAPI context
996 * 999 *
997 * returns 1 if a packet has been sent to the stack, otherwise 0 1000 * Returns 1 if a packet has been sent to the stack, otherwise 0
998 * 1001 *
999 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1002 * Processes an rx descriptor by iommu-unmapping the data buffer and passing
1000 * the packet up to the stack. This function is called in softirq 1003 * the packet up to the stack. This function is called in softirq
1001 * context, e.g. either bottom half from interrupt or NAPI polling context 1004 * context, e.g. either bottom half from interrupt or NAPI polling context
1002 */ 1005 */
1003static int 1006static int
1004spider_net_decode_one_descr(struct spider_net_card *card, int napi) 1007spider_net_decode_one_descr(struct spider_net_card *card)
1005{ 1008{
1006 struct spider_net_descr_chain *chain = &card->rx_chain; 1009 struct spider_net_descr_chain *chain = &card->rx_chain;
1007 struct spider_net_descr *descr = chain->tail; 1010 struct spider_net_descr *descr = chain->tail;
1008 int status; 1011 int status;
1009 int result;
1010 1012
1011 status = spider_net_get_descr_status(descr); 1013 status = spider_net_get_descr_status(descr);
1012 1014
1013 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1015 /* Nothing in the descriptor, or ring must be empty */
1014 /* nothing in the descriptor yet */ 1016 if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1015 result=0; 1017 (status == SPIDER_NET_DESCR_NOT_IN_USE))
1016 goto out; 1018 return 0;
1017 }
1018
1019 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1020 /* not initialized yet, the ring must be empty */
1021 spider_net_refill_rx_chain(card);
1022 spider_net_enable_rxdmac(card);
1023 result=0;
1024 goto out;
1025 }
1026 1019
1027 /* descriptor definitively used -- move on tail */ 1020 /* descriptor definitively used -- move on tail */
1028 chain->tail = descr->next; 1021 chain->tail = descr->next;
1029 1022
1030 result = 0; 1023 /* unmap descriptor */
1024 pci_unmap_single(card->pdev, descr->buf_addr,
1025 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1026
1031 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1027 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1032 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || 1028 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1033 (status == SPIDER_NET_DESCR_FORCE_END) ) { 1029 (status == SPIDER_NET_DESCR_FORCE_END) ) {
@@ -1035,31 +1031,55 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1035 pr_err("%s: dropping RX descriptor with state %d\n", 1031 pr_err("%s: dropping RX descriptor with state %d\n",
1036 card->netdev->name, status); 1032 card->netdev->name, status);
1037 card->netdev_stats.rx_dropped++; 1033 card->netdev_stats.rx_dropped++;
1038 pci_unmap_single(card->pdev, descr->buf_addr, 1034 goto bad_desc;
1039 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1040 dev_kfree_skb_irq(descr->skb);
1041 goto refill;
1042 } 1035 }
1043 1036
1044 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1037 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1045 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1038 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1046 if (netif_msg_rx_err(card)) { 1039 if (netif_msg_rx_err(card))
1047 pr_err("%s: RX descriptor with state %d\n", 1040 pr_err("%s: RX descriptor with unkown state %d\n",
1048 card->netdev->name, status); 1041 card->netdev->name, status);
1049 card->spider_stats.rx_desc_unk_state++; 1042 card->spider_stats.rx_desc_unk_state++;
1050 } 1043 goto bad_desc;
1051 goto refill; 1044 }
1045
1046 /* The cases we'll throw away the packet immediately */
1047 if (descr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1048 if (netif_msg_rx_err(card))
1049 pr_err("%s: error in received descriptor found, "
1050 "data_status=x%08x, data_error=x%08x\n",
1051 card->netdev->name,
1052 descr->data_status, descr->data_error);
1053 goto bad_desc;
1052 } 1054 }
1053 1055
1054 /* ok, we've got a packet in descr */ 1056 if (descr->dmac_cmd_status & 0xfefe) {
1055 result = spider_net_pass_skb_up(descr, card, napi); 1057 pr_err("%s: bad status, cmd_status=x%08x\n",
1056refill: 1058 card->netdev->name,
1059 descr->dmac_cmd_status);
1060 pr_err("buf_addr=x%08x\n", descr->buf_addr);
1061 pr_err("buf_size=x%08x\n", descr->buf_size);
1062 pr_err("next_descr_addr=x%08x\n", descr->next_descr_addr);
1063 pr_err("result_size=x%08x\n", descr->result_size);
1064 pr_err("valid_size=x%08x\n", descr->valid_size);
1065 pr_err("data_status=x%08x\n", descr->data_status);
1066 pr_err("data_error=x%08x\n", descr->data_error);
1067 pr_err("bus_addr=x%08x\n", descr->bus_addr);
1068 pr_err("which=%ld\n", descr - card->rx_chain.ring);
1069
1070 card->spider_stats.rx_desc_error++;
1071 goto bad_desc;
1072 }
1073
1074 /* Ok, we've got a packet in descr */
1075 spider_net_pass_skb_up(descr, card);
1057 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1076 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1058 /* change the descriptor state: */ 1077 return 1;
1059 if (!napi) 1078
1060 spider_net_refill_rx_chain(card); 1079bad_desc:
1061out: 1080 dev_kfree_skb_irq(descr->skb);
1062 return result; 1081 descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1082 return 0;
1063} 1083}
1064 1084
1065/** 1085/**
@@ -1085,7 +1105,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1085 packets_to_do = min(*budget, netdev->quota); 1105 packets_to_do = min(*budget, netdev->quota);
1086 1106
1087 while (packets_to_do) { 1107 while (packets_to_do) {
1088 if (spider_net_decode_one_descr(card, 1)) { 1108 if (spider_net_decode_one_descr(card)) {
1089 packets_done++; 1109 packets_done++;
1090 packets_to_do--; 1110 packets_to_do--;
1091 } else { 1111 } else {
@@ -1098,6 +1118,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1098 netdev->quota -= packets_done; 1118 netdev->quota -= packets_done;
1099 *budget -= packets_done; 1119 *budget -= packets_done;
1100 spider_net_refill_rx_chain(card); 1120 spider_net_refill_rx_chain(card);
1121 spider_net_enable_rxdmac(card);
1101 1122
1102 /* if all packets are in the stack, enable interrupts and return 0 */ 1123 /* if all packets are in the stack, enable interrupts and return 0 */
1103 /* if not, return 1 */ 1124 /* if not, return 1 */
@@ -1227,24 +1248,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
1227} 1248}
1228 1249
1229/** 1250/**
1230 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1231 * @card: card structure
1232 *
1233 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1234 * more packets in it and empty its RX RAM. This is called in bottom half
1235 * context
1236 */
1237static void
1238spider_net_handle_rxram_full(struct spider_net_card *card)
1239{
1240 while (spider_net_decode_one_descr(card, 0))
1241 ;
1242 spider_net_enable_rxchtails(card);
1243 spider_net_enable_rxdmac(card);
1244 netif_rx_schedule(card->netdev);
1245}
1246
1247/**
1248 * spider_net_handle_error_irq - handles errors raised by an interrupt 1251 * spider_net_handle_error_irq - handles errors raised by an interrupt
1249 * @card: card structure 1252 * @card: card structure
1250 * @status_reg: interrupt status register 0 (GHIINT0STS) 1253 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1366,10 +1369,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1366 case SPIDER_NET_GRFAFLLINT: /* fallthrough */ 1369 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1367 case SPIDER_NET_GRMFLLINT: 1370 case SPIDER_NET_GRMFLLINT:
1368 if (netif_msg_intr(card) && net_ratelimit()) 1371 if (netif_msg_intr(card) && net_ratelimit())
1369 pr_debug("Spider RX RAM full, incoming packets " 1372 pr_err("Spider RX RAM full, incoming packets "
1370 "might be discarded!\n"); 1373 "might be discarded!\n");
1371 spider_net_rx_irq_off(card); 1374 spider_net_rx_irq_off(card);
1372 tasklet_schedule(&card->rxram_full_tl); 1375 netif_rx_schedule(card->netdev);
1373 show_error = 0; 1376 show_error = 0;
1374 break; 1377 break;
1375 1378
@@ -1384,7 +1387,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1384 case SPIDER_NET_GDCDCEINT: /* fallthrough */ 1387 case SPIDER_NET_GDCDCEINT: /* fallthrough */
1385 case SPIDER_NET_GDBDCEINT: /* fallthrough */ 1388 case SPIDER_NET_GDBDCEINT: /* fallthrough */
1386 case SPIDER_NET_GDADCEINT: 1389 case SPIDER_NET_GDADCEINT:
1387 if (netif_msg_intr(card)) 1390 if (netif_msg_intr(card) && net_ratelimit())
1388 pr_err("got descriptor chain end interrupt, " 1391 pr_err("got descriptor chain end interrupt, "
1389 "restarting DMAC %c.\n", 1392 "restarting DMAC %c.\n",
1390 'D'-(i-SPIDER_NET_GDDDCEINT)/3); 1393 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
@@ -1455,7 +1458,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1455 break; 1458 break;
1456 } 1459 }
1457 1460
1458 if ((show_error) && (netif_msg_intr(card))) 1461 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1459 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, " 1462 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
1460 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1463 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1461 card->netdev->name, 1464 card->netdev->name,
@@ -1651,27 +1654,18 @@ int
1651spider_net_open(struct net_device *netdev) 1654spider_net_open(struct net_device *netdev)
1652{ 1655{
1653 struct spider_net_card *card = netdev_priv(netdev); 1656 struct spider_net_card *card = netdev_priv(netdev);
1654 struct spider_net_descr *descr; 1657 int result;
1655 int i, result;
1656 1658
1657 result = -ENOMEM; 1659 result = spider_net_init_chain(card, &card->tx_chain);
1658 if (spider_net_init_chain(card, &card->tx_chain, card->descr, 1660 if (result)
1659 card->num_tx_desc))
1660 goto alloc_tx_failed; 1661 goto alloc_tx_failed;
1661
1662 card->low_watermark = NULL; 1662 card->low_watermark = NULL;
1663 1663
1664 /* rx_chain is after tx_chain, so offset is descr + tx_count */ 1664 result = spider_net_init_chain(card, &card->rx_chain);
1665 if (spider_net_init_chain(card, &card->rx_chain, 1665 if (result)
1666 card->descr + card->num_tx_desc,
1667 card->num_rx_desc))
1668 goto alloc_rx_failed; 1666 goto alloc_rx_failed;
1669 1667
1670 descr = card->rx_chain.head; 1668 /* Allocate rx skbs */
1671 for (i=0; i < card->num_rx_desc; i++, descr++)
1672 descr->next_descr_addr = descr->next->bus_addr;
1673
1674 /* allocate rx skbs */
1675 if (spider_net_alloc_rx_skbs(card)) 1669 if (spider_net_alloc_rx_skbs(card))
1676 goto alloc_skbs_failed; 1670 goto alloc_skbs_failed;
1677 1671
@@ -1902,7 +1896,6 @@ spider_net_stop(struct net_device *netdev)
1902{ 1896{
1903 struct spider_net_card *card = netdev_priv(netdev); 1897 struct spider_net_card *card = netdev_priv(netdev);
1904 1898
1905 tasklet_kill(&card->rxram_full_tl);
1906 netif_poll_disable(netdev); 1899 netif_poll_disable(netdev);
1907 netif_carrier_off(netdev); 1900 netif_carrier_off(netdev);
1908 netif_stop_queue(netdev); 1901 netif_stop_queue(netdev);
@@ -1914,7 +1907,7 @@ spider_net_stop(struct net_device *netdev)
1914 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); 1907 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
1915 1908
1916 /* free_irq(netdev->irq, netdev);*/ 1909 /* free_irq(netdev->irq, netdev);*/
1917 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); 1910 free_irq(to_pci_dev(netdev->dev.parent)->irq, netdev);
1918 1911
1919 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, 1912 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1920 SPIDER_NET_DMA_TX_FEND_VALUE); 1913 SPIDER_NET_DMA_TX_FEND_VALUE);
@@ -1924,6 +1917,9 @@ spider_net_stop(struct net_device *netdev)
1924 1917
1925 /* release chains */ 1918 /* release chains */
1926 spider_net_release_tx_chain(card, 1); 1919 spider_net_release_tx_chain(card, 1);
1920 spider_net_free_rx_chain_contents(card);
1921
1922 spider_net_free_rx_chain_contents(card);
1927 1923
1928 spider_net_free_chain(card, &card->tx_chain); 1924 spider_net_free_chain(card, &card->tx_chain);
1929 spider_net_free_chain(card, &card->rx_chain); 1925 spider_net_free_chain(card, &card->rx_chain);
@@ -2044,9 +2040,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
2044 2040
2045 pci_set_drvdata(card->pdev, netdev); 2041 pci_set_drvdata(card->pdev, netdev);
2046 2042
2047 card->rxram_full_tl.data = (unsigned long) card;
2048 card->rxram_full_tl.func =
2049 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2050 init_timer(&card->tx_timer); 2043 init_timer(&card->tx_timer);
2051 card->tx_timer.function = 2044 card->tx_timer.function =
2052 (void (*)(unsigned long)) spider_net_cleanup_tx_ring; 2045 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
@@ -2055,8 +2048,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
2055 2048
2056 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2049 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2057 2050
2058 card->num_tx_desc = tx_descriptors; 2051 card->tx_chain.num_desc = tx_descriptors;
2059 card->num_rx_desc = rx_descriptors; 2052 card->rx_chain.num_desc = rx_descriptors;
2060 2053
2061 spider_net_setup_netdev_ops(netdev); 2054 spider_net_setup_netdev_ops(netdev);
2062 2055
@@ -2105,12 +2098,8 @@ spider_net_alloc_card(void)
2105{ 2098{
2106 struct net_device *netdev; 2099 struct net_device *netdev;
2107 struct spider_net_card *card; 2100 struct spider_net_card *card;
2108 size_t alloc_size;
2109 2101
2110 alloc_size = sizeof (*card) + 2102 netdev = alloc_etherdev(sizeof(struct spider_net_card));
2111 sizeof (struct spider_net_descr) * rx_descriptors +
2112 sizeof (struct spider_net_descr) * tx_descriptors;
2113 netdev = alloc_etherdev(alloc_size);
2114 if (!netdev) 2103 if (!netdev)
2115 return NULL; 2104 return NULL;
2116 2105
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 3e196df29790..2fec5cf76926 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -24,7 +24,7 @@
24#ifndef _SPIDER_NET_H 24#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H 25#define _SPIDER_NET_H
26 26
27#define VERSION "1.6 A" 27#define VERSION "1.6 B"
28 28
29#include "sungem_phy.h" 29#include "sungem_phy.h"
30 30
@@ -378,6 +378,9 @@ struct spider_net_descr_chain {
378 spinlock_t lock; 378 spinlock_t lock;
379 struct spider_net_descr *head; 379 struct spider_net_descr *head;
380 struct spider_net_descr *tail; 380 struct spider_net_descr *tail;
381 struct spider_net_descr *ring;
382 int num_desc;
383 dma_addr_t dma_addr;
381}; 384};
382 385
383/* descriptor data_status bits */ 386/* descriptor data_status bits */
@@ -397,8 +400,6 @@ struct spider_net_descr_chain {
397 * 701b8000 would be correct, but every packets gets that flag */ 400 * 701b8000 would be correct, but every packets gets that flag */
398#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000 401#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
399 402
400#define SPIDER_NET_DESCR_SIZE 32
401
402/* this will be bigger some time */ 403/* this will be bigger some time */
403struct spider_net_options { 404struct spider_net_options {
404 int rx_csum; /* for rx: if 0 ip_summed=NONE, 405 int rx_csum; /* for rx: if 0 ip_summed=NONE,
@@ -441,25 +442,16 @@ struct spider_net_card {
441 struct spider_net_descr_chain rx_chain; 442 struct spider_net_descr_chain rx_chain;
442 struct spider_net_descr *low_watermark; 443 struct spider_net_descr *low_watermark;
443 444
444 struct net_device_stats netdev_stats;
445
446 struct spider_net_options options;
447
448 spinlock_t intmask_lock;
449 struct tasklet_struct rxram_full_tl;
450 struct timer_list tx_timer; 445 struct timer_list tx_timer;
451
452 struct work_struct tx_timeout_task; 446 struct work_struct tx_timeout_task;
453 atomic_t tx_timeout_task_counter; 447 atomic_t tx_timeout_task_counter;
454 wait_queue_head_t waitq; 448 wait_queue_head_t waitq;
455 449
456 /* for ethtool */ 450 /* for ethtool */
457 int msg_enable; 451 int msg_enable;
458 int num_rx_desc; 452 struct net_device_stats netdev_stats;
459 int num_tx_desc;
460 struct spider_net_extra_stats spider_stats; 453 struct spider_net_extra_stats spider_stats;
461 454 struct spider_net_options options;
462 struct spider_net_descr descr[0];
463}; 455};
464 456
465#define pr_err(fmt,arg...) \ 457#define pr_err(fmt,arg...) \
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 91b995102915..6bcf03fc89be 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
158 struct spider_net_card *card = netdev->priv; 158 struct spider_net_card *card = netdev->priv;
159 159
160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; 160 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
161 ering->tx_pending = card->num_tx_desc; 161 ering->tx_pending = card->tx_chain.num_desc;
162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; 162 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
163 ering->rx_pending = card->num_rx_desc; 163 ering->rx_pending = card->rx_chain.num_desc;
164} 164}
165 165
166static int spider_net_get_stats_count(struct net_device *netdev) 166static int spider_net_get_stats_count(struct net_device *netdev)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f4bf62c2a7a5..e136bae61970 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -58,11 +58,7 @@
58#define TG3_VLAN_TAG_USED 0 58#define TG3_VLAN_TAG_USED 0
59#endif 59#endif
60 60
61#ifdef NETIF_F_TSO
62#define TG3_TSO_SUPPORT 1 61#define TG3_TSO_SUPPORT 1
63#else
64#define TG3_TSO_SUPPORT 0
65#endif
66 62
67#include "tg3.h" 63#include "tg3.h"
68 64
@@ -3384,7 +3380,7 @@ next_pkt:
3384 } 3380 }
3385next_pkt_nopost: 3381next_pkt_nopost:
3386 sw_idx++; 3382 sw_idx++;
3387 sw_idx %= TG3_RX_RCB_RING_SIZE(tp); 3383 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3388 3384
3389 /* Refresh hw_idx to see if there is new work */ 3385 /* Refresh hw_idx to see if there is new work */
3390 if (sw_idx == hw_idx) { 3386 if (sw_idx == hw_idx) {
@@ -3873,7 +3869,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3873 3869
3874 entry = tp->tx_prod; 3870 entry = tp->tx_prod;
3875 base_flags = 0; 3871 base_flags = 0;
3876#if TG3_TSO_SUPPORT != 0
3877 mss = 0; 3872 mss = 0;
3878 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 3873 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3879 (mss = skb_shinfo(skb)->gso_size) != 0) { 3874 (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -3906,11 +3901,6 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3906 } 3901 }
3907 else if (skb->ip_summed == CHECKSUM_PARTIAL) 3902 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3908 base_flags |= TXD_FLAG_TCPUDP_CSUM; 3903 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3909#else
3910 mss = 0;
3911 if (skb->ip_summed == CHECKSUM_PARTIAL)
3912 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3913#endif
3914#if TG3_VLAN_TAG_USED 3904#if TG3_VLAN_TAG_USED
3915 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 3905 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3916 base_flags |= (TXD_FLAG_VLAN | 3906 base_flags |= (TXD_FLAG_VLAN |
@@ -3970,7 +3960,6 @@ out_unlock:
3970 return NETDEV_TX_OK; 3960 return NETDEV_TX_OK;
3971} 3961}
3972 3962
3973#if TG3_TSO_SUPPORT != 0
3974static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); 3963static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3975 3964
3976/* Use GSO to workaround a rare TSO bug that may be triggered when the 3965/* Use GSO to workaround a rare TSO bug that may be triggered when the
@@ -4002,7 +3991,6 @@ tg3_tso_bug_end:
4002 3991
4003 return NETDEV_TX_OK; 3992 return NETDEV_TX_OK;
4004} 3993}
4005#endif
4006 3994
4007/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 3995/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4008 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 3996 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
@@ -4036,7 +4024,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4036 base_flags = 0; 4024 base_flags = 0;
4037 if (skb->ip_summed == CHECKSUM_PARTIAL) 4025 if (skb->ip_summed == CHECKSUM_PARTIAL)
4038 base_flags |= TXD_FLAG_TCPUDP_CSUM; 4026 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4039#if TG3_TSO_SUPPORT != 0
4040 mss = 0; 4027 mss = 0;
4041 if (skb->len > (tp->dev->mtu + ETH_HLEN) && 4028 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
4042 (mss = skb_shinfo(skb)->gso_size) != 0) { 4029 (mss = skb_shinfo(skb)->gso_size) != 0) {
@@ -4091,9 +4078,6 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4091 } 4078 }
4092 } 4079 }
4093 } 4080 }
4094#else
4095 mss = 0;
4096#endif
4097#if TG3_VLAN_TAG_USED 4081#if TG3_VLAN_TAG_USED
4098 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 4082 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4099 base_flags |= (TXD_FLAG_VLAN | 4083 base_flags |= (TXD_FLAG_VLAN |
@@ -5329,7 +5313,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5329 return 0; 5313 return 0;
5330} 5314}
5331 5315
5332#if TG3_TSO_SUPPORT != 0
5333 5316
5334#define TG3_TSO_FW_RELEASE_MAJOR 0x1 5317#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5335#define TG3_TSO_FW_RELASE_MINOR 0x6 5318#define TG3_TSO_FW_RELASE_MINOR 0x6
@@ -5906,7 +5889,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
5906 return 0; 5889 return 0;
5907} 5890}
5908 5891
5909#endif /* TG3_TSO_SUPPORT != 0 */
5910 5892
5911/* tp->lock is held. */ 5893/* tp->lock is held. */
5912static void __tg3_set_mac_addr(struct tg3 *tp) 5894static void __tg3_set_mac_addr(struct tg3 *tp)
@@ -6120,7 +6102,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6120 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 6102 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6121 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 6103 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6122 } 6104 }
6123#if TG3_TSO_SUPPORT != 0
6124 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6105 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6125 int fw_len; 6106 int fw_len;
6126 6107
@@ -6135,7 +6116,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6135 tw32(BUFMGR_MB_POOL_SIZE, 6116 tw32(BUFMGR_MB_POOL_SIZE,
6136 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00); 6117 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6137 } 6118 }
6138#endif
6139 6119
6140 if (tp->dev->mtu <= ETH_DATA_LEN) { 6120 if (tp->dev->mtu <= ETH_DATA_LEN) {
6141 tw32(BUFMGR_MB_RDMA_LOW_WATER, 6121 tw32(BUFMGR_MB_RDMA_LOW_WATER,
@@ -6337,10 +6317,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6337 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6317 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6338 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 6318 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6339 6319
6340#if TG3_TSO_SUPPORT != 0
6341 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6320 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6342 rdmac_mode |= (1 << 27); 6321 rdmac_mode |= (1 << 27);
6343#endif
6344 6322
6345 /* Receive/send statistics. */ 6323 /* Receive/send statistics. */
6346 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 6324 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
@@ -6511,10 +6489,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6511 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 6489 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6512 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); 6490 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6513 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 6491 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6514#if TG3_TSO_SUPPORT != 0
6515 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 6492 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6516 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 6493 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6517#endif
6518 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE); 6494 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6519 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); 6495 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6520 6496
@@ -6524,13 +6500,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6524 return err; 6500 return err;
6525 } 6501 }
6526 6502
6527#if TG3_TSO_SUPPORT != 0
6528 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 6503 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6529 err = tg3_load_tso_firmware(tp); 6504 err = tg3_load_tso_firmware(tp);
6530 if (err) 6505 if (err)
6531 return err; 6506 return err;
6532 } 6507 }
6533#endif
6534 6508
6535 tp->tx_mode = TX_MODE_ENABLE; 6509 tp->tx_mode = TX_MODE_ENABLE;
6536 tw32_f(MAC_TX_MODE, tp->tx_mode); 6510 tw32_f(MAC_TX_MODE, tp->tx_mode);
@@ -8062,7 +8036,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
8062 tp->msg_enable = value; 8036 tp->msg_enable = value;
8063} 8037}
8064 8038
8065#if TG3_TSO_SUPPORT != 0
8066static int tg3_set_tso(struct net_device *dev, u32 value) 8039static int tg3_set_tso(struct net_device *dev, u32 value)
8067{ 8040{
8068 struct tg3 *tp = netdev_priv(dev); 8041 struct tg3 *tp = netdev_priv(dev);
@@ -8081,7 +8054,6 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
8081 } 8054 }
8082 return ethtool_op_set_tso(dev, value); 8055 return ethtool_op_set_tso(dev, value);
8083} 8056}
8084#endif
8085 8057
8086static int tg3_nway_reset(struct net_device *dev) 8058static int tg3_nway_reset(struct net_device *dev)
8087{ 8059{
@@ -9212,10 +9184,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
9212 .set_tx_csum = tg3_set_tx_csum, 9184 .set_tx_csum = tg3_set_tx_csum,
9213 .get_sg = ethtool_op_get_sg, 9185 .get_sg = ethtool_op_get_sg,
9214 .set_sg = ethtool_op_set_sg, 9186 .set_sg = ethtool_op_set_sg,
9215#if TG3_TSO_SUPPORT != 0
9216 .get_tso = ethtool_op_get_tso, 9187 .get_tso = ethtool_op_get_tso,
9217 .set_tso = tg3_set_tso, 9188 .set_tso = tg3_set_tso,
9218#endif
9219 .self_test_count = tg3_get_test_count, 9189 .self_test_count = tg3_get_test_count,
9220 .self_test = tg3_self_test, 9190 .self_test = tg3_self_test,
9221 .get_strings = tg3_get_strings, 9191 .get_strings = tg3_get_strings,
@@ -11856,7 +11826,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11856 11826
11857 tg3_init_bufmgr_config(tp); 11827 tg3_init_bufmgr_config(tp);
11858 11828
11859#if TG3_TSO_SUPPORT != 0
11860 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 11829 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11861 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 11830 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11862 } 11831 }
@@ -11881,7 +11850,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11881 dev->features |= NETIF_F_TSO6; 11850 dev->features |= NETIF_F_TSO6;
11882 } 11851 }
11883 11852
11884#endif
11885 11853
11886 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 11854 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11887 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 11855 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 7e4b23c7c1ba..31c97a6591a4 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1709,75 +1709,13 @@ static void adjust_link(struct net_device *dev)
1709 if (mii_info->speed != ugeth->oldspeed) { 1709 if (mii_info->speed != ugeth->oldspeed) {
1710 switch (mii_info->speed) { 1710 switch (mii_info->speed) {
1711 case 1000: 1711 case 1000:
1712#ifdef CONFIG_PPC_MPC836x 1712 ugeth->ug_info->enet_interface = ENET_1000_RGMII;
1713/* FIXME: This code is for 100Mbs BUG fixing,
1714remove this when it is fixed!!! */
1715 if (ugeth->ug_info->enet_interface ==
1716 ENET_1000_GMII)
1717 /* Run the commands which initialize the PHY */
1718 {
1719 tempval =
1720 (u32) mii_info->mdio_read(ugeth->
1721 dev, mii_info->mii_id, 0x1b);
1722 tempval |= 0x000f;
1723 mii_info->mdio_write(ugeth->dev,
1724 mii_info->mii_id, 0x1b,
1725 (u16) tempval);
1726 tempval =
1727 (u32) mii_info->mdio_read(ugeth->
1728 dev, mii_info->mii_id,
1729 MII_BMCR);
1730 mii_info->mdio_write(ugeth->dev,
1731 mii_info->mii_id, MII_BMCR,
1732 (u16) (tempval | BMCR_RESET));
1733 } else if (ugeth->ug_info->enet_interface ==
1734 ENET_1000_RGMII)
1735 /* Run the commands which initialize the PHY */
1736 {
1737 tempval =
1738 (u32) mii_info->mdio_read(ugeth->
1739 dev, mii_info->mii_id, 0x1b);
1740 tempval = (tempval & ~0x000f) | 0x000b;
1741 mii_info->mdio_write(ugeth->dev,
1742 mii_info->mii_id, 0x1b,
1743 (u16) tempval);
1744 tempval =
1745 (u32) mii_info->mdio_read(ugeth->
1746 dev, mii_info->mii_id,
1747 MII_BMCR);
1748 mii_info->mdio_write(ugeth->dev,
1749 mii_info->mii_id, MII_BMCR,
1750 (u16) (tempval | BMCR_RESET));
1751 }
1752 msleep(4000);
1753#endif /* CONFIG_MPC8360 */
1754 adjust_enet_interface(ugeth);
1755 break; 1713 break;
1756 case 100: 1714 case 100:
1757 case 10:
1758#ifdef CONFIG_PPC_MPC836x
1759/* FIXME: This code is for 100Mbs BUG fixing,
1760remove this lines when it will be fixed!!! */
1761 ugeth->ug_info->enet_interface = ENET_100_RGMII; 1715 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1762 tempval = 1716 break;
1763 (u32) mii_info->mdio_read(ugeth->dev, 1717 case 10:
1764 mii_info->mii_id, 1718 ugeth->ug_info->enet_interface = ENET_10_RGMII;
1765 0x1b);
1766 tempval = (tempval & ~0x000f) | 0x000b;
1767 mii_info->mdio_write(ugeth->dev,
1768 mii_info->mii_id, 0x1b,
1769 (u16) tempval);
1770 tempval =
1771 (u32) mii_info->mdio_read(ugeth->dev,
1772 mii_info->mii_id,
1773 MII_BMCR);
1774 mii_info->mdio_write(ugeth->dev,
1775 mii_info->mii_id, MII_BMCR,
1776 (u16) (tempval |
1777 BMCR_RESET));
1778 msleep(4000);
1779#endif /* CONFIG_MPC8360 */
1780 adjust_enet_interface(ugeth);
1781 break; 1719 break;
1782 default: 1720 default:
1783 ugeth_warn 1721 ugeth_warn
@@ -1785,6 +1723,7 @@ remove this lines when it will be fixed!!! */
1785 dev->name, mii_info->speed); 1723 dev->name, mii_info->speed);
1786 break; 1724 break;
1787 } 1725 }
1726 adjust_enet_interface(ugeth);
1788 1727
1789 ugeth_info("%s: Speed %dBT", dev->name, 1728 ugeth_info("%s: Speed %dBT", dev->name,
1790 mii_info->speed); 1729 mii_info->speed);
@@ -2865,8 +2804,8 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2865 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) 2804 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2866 align = UCC_GETH_TX_BD_RING_ALIGNMENT; 2805 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2867 ugeth->tx_bd_ring_offset[j] = 2806 ugeth->tx_bd_ring_offset[j] =
2868 (u32) (kmalloc((u32) (length + align), 2807 kmalloc((u32) (length + align), GFP_KERNEL);
2869 GFP_KERNEL)); 2808
2870 if (ugeth->tx_bd_ring_offset[j] != 0) 2809 if (ugeth->tx_bd_ring_offset[j] != 0)
2871 ugeth->p_tx_bd_ring[j] = 2810 ugeth->p_tx_bd_ring[j] =
2872 (void*)((ugeth->tx_bd_ring_offset[j] + 2811 (void*)((ugeth->tx_bd_ring_offset[j] +
@@ -2901,7 +2840,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2901 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) 2840 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2902 align = UCC_GETH_RX_BD_RING_ALIGNMENT; 2841 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2903 ugeth->rx_bd_ring_offset[j] = 2842 ugeth->rx_bd_ring_offset[j] =
2904 (u32) (kmalloc((u32) (length + align), GFP_KERNEL)); 2843 kmalloc((u32) (length + align), GFP_KERNEL);
2905 if (ugeth->rx_bd_ring_offset[j] != 0) 2844 if (ugeth->rx_bd_ring_offset[j] != 0)
2906 ugeth->p_rx_bd_ring[j] = 2845 ugeth->p_rx_bd_ring[j] =
2907 (void*)((ugeth->rx_bd_ring_offset[j] + 2846 (void*)((ugeth->rx_bd_ring_offset[j] +
@@ -2927,10 +2866,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2927 /* Init Tx bds */ 2866 /* Init Tx bds */
2928 for (j = 0; j < ug_info->numQueuesTx; j++) { 2867 for (j = 0; j < ug_info->numQueuesTx; j++) {
2929 /* Setup the skbuff rings */ 2868 /* Setup the skbuff rings */
2930 ugeth->tx_skbuff[j] = 2869 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2931 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * 2870 ugeth->ug_info->bdRingLenTx[j],
2932 ugeth->ug_info->bdRingLenTx[j], 2871 GFP_KERNEL);
2933 GFP_KERNEL);
2934 2872
2935 if (ugeth->tx_skbuff[j] == NULL) { 2873 if (ugeth->tx_skbuff[j] == NULL) {
2936 ugeth_err("%s: Could not allocate tx_skbuff", 2874 ugeth_err("%s: Could not allocate tx_skbuff",
@@ -2959,10 +2897,9 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2959 /* Init Rx bds */ 2897 /* Init Rx bds */
2960 for (j = 0; j < ug_info->numQueuesRx; j++) { 2898 for (j = 0; j < ug_info->numQueuesRx; j++) {
2961 /* Setup the skbuff rings */ 2899 /* Setup the skbuff rings */
2962 ugeth->rx_skbuff[j] = 2900 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2963 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) * 2901 ugeth->ug_info->bdRingLenRx[j],
2964 ugeth->ug_info->bdRingLenRx[j], 2902 GFP_KERNEL);
2965 GFP_KERNEL);
2966 2903
2967 if (ugeth->rx_skbuff[j] == NULL) { 2904 if (ugeth->rx_skbuff[j] == NULL) {
2968 ugeth_err("%s: Could not allocate rx_skbuff", 2905 ugeth_err("%s: Could not allocate rx_skbuff",
@@ -3453,8 +3390,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3453 * allocated resources can be released when the channel is freed. 3390 * allocated resources can be released when the channel is freed.
3454 */ 3391 */
3455 if (!(ugeth->p_init_enet_param_shadow = 3392 if (!(ugeth->p_init_enet_param_shadow =
3456 (struct ucc_geth_init_pram *) kmalloc(sizeof(struct ucc_geth_init_pram), 3393 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3457 GFP_KERNEL))) {
3458 ugeth_err 3394 ugeth_err
3459 ("%s: Can not allocate memory for" 3395 ("%s: Can not allocate memory for"
3460 " p_UccInitEnetParamShadows.", __FUNCTION__); 3396 " p_UccInitEnetParamShadows.", __FUNCTION__);
@@ -4136,6 +4072,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
4136 static int mii_mng_configured = 0; 4072 static int mii_mng_configured = 0;
4137 const phandle *ph; 4073 const phandle *ph;
4138 const unsigned int *prop; 4074 const unsigned int *prop;
4075 const void *mac_addr;
4139 4076
4140 ugeth_vdbg("%s: IN", __FUNCTION__); 4077 ugeth_vdbg("%s: IN", __FUNCTION__);
4141 4078
@@ -4261,7 +4198,12 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
4261 4198
4262 ugeth->ug_info = ug_info; 4199 ugeth->ug_info = ug_info;
4263 ugeth->dev = dev; 4200 ugeth->dev = dev;
4264 memcpy(dev->dev_addr, get_property(np, "mac-address", NULL), 6); 4201
4202 mac_addr = get_property(np, "mac-address", NULL);
4203 if (mac_addr == NULL)
4204 mac_addr = get_property(np, "local-mac-address", NULL);
4205 if (mac_addr)
4206 memcpy(dev->dev_addr, mac_addr, 6);
4265 4207
4266 return 0; 4208 return 0;
4267} 4209}
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
index 3c86592ce03c..6fda6d88be49 100644
--- a/drivers/net/ucc_geth_phy.c
+++ b/drivers/net/ucc_geth_phy.c
@@ -376,6 +376,8 @@ static int marvell_init(struct ugeth_mii_info *mii_info)
376 ugphy_vdbg("%s: IN", __FUNCTION__); 376 ugphy_vdbg("%s: IN", __FUNCTION__);
377 377
378 ucc_geth_phy_write(mii_info, 0x14, 0x0cd2); 378 ucc_geth_phy_write(mii_info, 0x14, 0x0cd2);
379 ucc_geth_phy_write(mii_info, 0x1b,
380 (ucc_geth_phy_read(mii_info, 0x1b) & ~0x000f) | 0x000b);
379 ucc_geth_phy_write(mii_info, MII_BMCR, 381 ucc_geth_phy_write(mii_info, MII_BMCR,
380 ucc_geth_phy_read(mii_info, MII_BMCR) | BMCR_RESET); 382 ucc_geth_phy_read(mii_info, MII_BMCR) | BMCR_RESET);
381 msleep(4000); 383 msleep(4000);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 21f76f51c95e..61708cf4c85d 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -235,6 +235,19 @@ comment "Cyclades-PC300 MLPPP support is disabled."
235comment "Refer to the file README.mlppp, provided by PC300 package." 235comment "Refer to the file README.mlppp, provided by PC300 package."
236 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP) 236 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
237 237
238config PC300TOO
239 tristate "Cyclades PC300 RSV/X21 alternative support"
240 depends on HDLC && PCI
241 help
242 Alternative driver for PC300 RSV/X21 PCI cards made by
243 Cyclades, Inc. If you have such a card, say Y here and see
244 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
245
246 To compile this as a module, choose M here: the module
247 will be called pc300too.
248
249 If unsure, say N here.
250
238config N2 251config N2
239 tristate "SDL RISCom/N2 support" 252 tristate "SDL RISCom/N2 support"
240 depends on HDLC && ISA 253 depends on HDLC && ISA
@@ -344,17 +357,6 @@ config DLCI
344 To compile this driver as a module, choose M here: the 357 To compile this driver as a module, choose M here: the
345 module will be called dlci. 358 module will be called dlci.
346 359
347config DLCI_COUNT
348 int "Max open DLCI"
349 depends on DLCI
350 default "24"
351 help
352 Maximal number of logical point-to-point frame relay connections
353 (the identifiers of which are called DCLIs) that the driver can
354 handle.
355
356 The default is probably fine.
357
358config DLCI_MAX 360config DLCI_MAX
359 int "Max DLCI per device" 361 int "Max DLCI per device"
360 depends on DLCI 362 depends on DLCI
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 83ec2c87ba3f..d61fef36afc9 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_N2) += n2.o
41obj-$(CONFIG_C101) += c101.o 41obj-$(CONFIG_C101) += c101.o
42obj-$(CONFIG_WANXL) += wanxl.o 42obj-$(CONFIG_WANXL) += wanxl.o
43obj-$(CONFIG_PCI200SYN) += pci200syn.o 43obj-$(CONFIG_PCI200SYN) += pci200syn.o
44obj-$(CONFIG_PC300TOO) += pc300too.o
44 45
45clean-files := wanxlfw.inc 46clean-files := wanxlfw.inc
46$(obj)/wanxl.o: $(obj)/wanxlfw.inc 47$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index db354e0edbe5..9040d7cf651e 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -222,7 +222,7 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
222 return -EINVAL; 222 return -EINVAL;
223} 223}
224 224
225void hdlc_setup(struct net_device *dev) 225static void hdlc_setup(struct net_device *dev)
226{ 226{
227 hdlc_device *hdlc = dev_to_hdlc(dev); 227 hdlc_device *hdlc = dev_to_hdlc(dev);
228 228
@@ -325,7 +325,6 @@ MODULE_LICENSE("GPL v2");
325EXPORT_SYMBOL(hdlc_open); 325EXPORT_SYMBOL(hdlc_open);
326EXPORT_SYMBOL(hdlc_close); 326EXPORT_SYMBOL(hdlc_close);
327EXPORT_SYMBOL(hdlc_ioctl); 327EXPORT_SYMBOL(hdlc_ioctl);
328EXPORT_SYMBOL(hdlc_setup);
329EXPORT_SYMBOL(alloc_hdlcdev); 328EXPORT_SYMBOL(alloc_hdlcdev);
330EXPORT_SYMBOL(unregister_hdlc_device); 329EXPORT_SYMBOL(unregister_hdlc_device);
331EXPORT_SYMBOL(register_hdlc_protocol); 330EXPORT_SYMBOL(register_hdlc_protocol);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
new file mode 100644
index 000000000000..bc156b51678a
--- /dev/null
+++ b/drivers/net/wan/pc300too.c
@@ -0,0 +1,565 @@
1/*
2 * Cyclades PC300 synchronous serial card driver for Linux
3 *
4 * Copyright (C) 2000-2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
11 *
12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual
14 * Cyclades PC300 Linux driver
15 *
16 * This driver currently supports only PC300/RSV (V.24/V.35) and
17 * PC300/X21 cards.
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/fcntl.h>
26#include <linux/in.h>
27#include <linux/string.h>
28#include <linux/errno.h>
29#include <linux/init.h>
30#include <linux/ioport.h>
31#include <linux/moduleparam.h>
32#include <linux/netdevice.h>
33#include <linux/hdlc.h>
34#include <linux/pci.h>
35#include <linux/delay.h>
36#include <asm/io.h>
37
38#include "hd64572.h"
39
40static const char* version = "Cyclades PC300 driver version: 1.17";
41static const char* devname = "PC300";
42
43#undef DEBUG_PKT
44#define DEBUG_RINGS
45
46#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
47#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
48#define ALL_PAGES_ALWAYS_MAPPED
49#define NEED_DETECT_RAM
50#define NEED_SCA_MSCI_INTR
51#define MAX_TX_BUFFERS 10
52
53static int pci_clock_freq = 33000000;
54static int use_crystal_clock = 0;
55static unsigned int CLOCK_BASE;
56
57/* Masks to access the init_ctrl PLX register */
58#define PC300_CLKSEL_MASK (0x00000004UL)
59#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
60#define PC300_CTYPE_MASK (0x00000800UL)
61
62
63enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
64
65/*
66 * PLX PCI9050-1 local configuration and shared runtime registers.
67 * This structure can be used to access 9050 registers (memory mapped).
68 */
69typedef struct {
70 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
71 u32 loc_rom_range; /* 10h : Local ROM Range */
72 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
73 u32 loc_rom_base; /* 24h : Local ROM Base */
74 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
75 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
76 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
77 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
78 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
79}plx9050;
80
81
82
83typedef struct port_s {
84 struct net_device *dev;
85 struct card_s *card;
86 spinlock_t lock; /* TX lock */
87 sync_serial_settings settings;
88 int rxpart; /* partial frame received, next frame invalid*/
89 unsigned short encoding;
90 unsigned short parity;
91 unsigned int iface;
92 u16 rxin; /* rx ring buffer 'in' pointer */
93 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
94 u16 txlast;
95 u8 rxs, txs, tmc; /* SCA registers */
96 u8 phy_node; /* physical port # - 0 or 1 */
97}port_t;
98
99
100
101typedef struct card_s {
102 int type; /* RSV, X21, etc. */
103 int n_ports; /* 1 or 2 ports */
104 u8 __iomem *rambase; /* buffer memory base (virtual) */
105 u8 __iomem *scabase; /* SCA memory base (virtual) */
106 plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */
107 u32 init_ctrl_value; /* Saved value - 9050 bug workaround */
108 u16 rx_ring_buffers; /* number of buffers in a ring */
109 u16 tx_ring_buffers;
110 u16 buff_offset; /* offset of first buffer of first channel */
111 u8 irq; /* interrupt request level */
112
113 port_t ports[2];
114}card_t;
115
116
117#define sca_in(reg, card) readb(card->scabase + (reg))
118#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
119#define sca_inw(reg, card) readw(card->scabase + (reg))
120#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
121#define sca_inl(reg, card) readl(card->scabase + (reg))
122#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
123
124#define port_to_card(port) (port->card)
125#define log_node(port) (port->phy_node)
126#define phy_node(port) (port->phy_node)
127#define winbase(card) (card->rambase)
128#define get_port(card, port) ((port) < (card)->n_ports ? \
129 (&(card)->ports[port]) : (NULL))
130
131#include "hd6457x.c"
132
133
134static void pc300_set_iface(port_t *port)
135{
136 card_t *card = port->card;
137 u32 __iomem * init_ctrl = &card->plxbase->init_ctrl;
138 u16 msci = get_msci(port);
139 u8 rxs = port->rxs & CLK_BRG_MASK;
140 u8 txs = port->txs & CLK_BRG_MASK;
141
142 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
143 port_to_card(port));
144 switch(port->settings.clock_type) {
145 case CLOCK_INT:
146 rxs |= CLK_BRG; /* BRG output */
147 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
148 break;
149
150 case CLOCK_TXINT:
151 rxs |= CLK_LINE; /* RXC input */
152 txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
153 break;
154
155 case CLOCK_TXFROMRX:
156 rxs |= CLK_LINE; /* RXC input */
157 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
158 break;
159
160 default: /* EXTernal clock */
161 rxs |= CLK_LINE; /* RXC input */
162 txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
163 break;
164 }
165
166 port->rxs = rxs;
167 port->txs = txs;
168 sca_out(rxs, msci + RXS, card);
169 sca_out(txs, msci + TXS, card);
170 sca_set_port(port);
171
172 if (port->card->type == PC300_RSV) {
173 if (port->iface == IF_IFACE_V35)
174 writel(card->init_ctrl_value |
175 PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
176 else
177 writel(card->init_ctrl_value &
178 ~PC300_CHMEDIA_MASK(port->phy_node), init_ctrl);
179 }
180}
181
182
183
184static int pc300_open(struct net_device *dev)
185{
186 port_t *port = dev_to_port(dev);
187
188 int result = hdlc_open(dev);
189 if (result)
190 return result;
191
192 sca_open(dev);
193 pc300_set_iface(port);
194 return 0;
195}
196
197
198
199static int pc300_close(struct net_device *dev)
200{
201 sca_close(dev);
202 hdlc_close(dev);
203 return 0;
204}
205
206
207
208static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
209{
210 const size_t size = sizeof(sync_serial_settings);
211 sync_serial_settings new_line;
212 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
213 int new_type;
214 port_t *port = dev_to_port(dev);
215
216#ifdef DEBUG_RINGS
217 if (cmd == SIOCDEVPRIVATE) {
218 sca_dump_rings(dev);
219 return 0;
220 }
221#endif
222 if (cmd != SIOCWANDEV)
223 return hdlc_ioctl(dev, ifr, cmd);
224
225 if (ifr->ifr_settings.type == IF_GET_IFACE) {
226 ifr->ifr_settings.type = port->iface;
227 if (ifr->ifr_settings.size < size) {
228 ifr->ifr_settings.size = size; /* data size wanted */
229 return -ENOBUFS;
230 }
231 if (copy_to_user(line, &port->settings, size))
232 return -EFAULT;
233 return 0;
234
235 }
236
237 if (port->card->type == PC300_X21 &&
238 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
239 ifr->ifr_settings.type == IF_IFACE_X21))
240 new_type = IF_IFACE_X21;
241
242 else if (port->card->type == PC300_RSV &&
243 (ifr->ifr_settings.type == IF_IFACE_SYNC_SERIAL ||
244 ifr->ifr_settings.type == IF_IFACE_V35))
245 new_type = IF_IFACE_V35;
246
247 else if (port->card->type == PC300_RSV &&
248 ifr->ifr_settings.type == IF_IFACE_V24)
249 new_type = IF_IFACE_V24;
250
251 else
252 return hdlc_ioctl(dev, ifr, cmd);
253
254 if (!capable(CAP_NET_ADMIN))
255 return -EPERM;
256
257 if (copy_from_user(&new_line, line, size))
258 return -EFAULT;
259
260 if (new_line.clock_type != CLOCK_EXT &&
261 new_line.clock_type != CLOCK_TXFROMRX &&
262 new_line.clock_type != CLOCK_INT &&
263 new_line.clock_type != CLOCK_TXINT)
264 return -EINVAL; /* No such clock setting */
265
266 if (new_line.loopback != 0 && new_line.loopback != 1)
267 return -EINVAL;
268
269 memcpy(&port->settings, &new_line, size); /* Update settings */
270 port->iface = new_type;
271 pc300_set_iface(port);
272 return 0;
273}
274
275
276
277static void pc300_pci_remove_one(struct pci_dev *pdev)
278{
279 int i;
280 card_t *card = pci_get_drvdata(pdev);
281
282 for (i = 0; i < 2; i++)
283 if (card->ports[i].card) {
284 struct net_device *dev = port_to_dev(&card->ports[i]);
285 unregister_hdlc_device(dev);
286 }
287
288 if (card->irq)
289 free_irq(card->irq, card);
290
291 if (card->rambase)
292 iounmap(card->rambase);
293 if (card->scabase)
294 iounmap(card->scabase);
295 if (card->plxbase)
296 iounmap(card->plxbase);
297
298 pci_release_regions(pdev);
299 pci_disable_device(pdev);
300 pci_set_drvdata(pdev, NULL);
301 if (card->ports[0].dev)
302 free_netdev(card->ports[0].dev);
303 if (card->ports[1].dev)
304 free_netdev(card->ports[1].dev);
305 kfree(card);
306}
307
308
309
310static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
311 const struct pci_device_id *ent)
312{
313 card_t *card;
314 u8 rev_id;
315 u32 __iomem *p;
316 int i;
317 u32 ramsize;
318 u32 ramphys; /* buffer memory base */
319 u32 scaphys; /* SCA memory base */
320 u32 plxphys; /* PLX registers memory base */
321
322#ifndef MODULE
323 static int printed_version;
324 if (!printed_version++)
325 printk(KERN_INFO "%s\n", version);
326#endif
327
328 i = pci_enable_device(pdev);
329 if (i)
330 return i;
331
332 i = pci_request_regions(pdev, "PC300");
333 if (i) {
334 pci_disable_device(pdev);
335 return i;
336 }
337
338 card = kmalloc(sizeof(card_t), GFP_KERNEL);
339 if (card == NULL) {
340 printk(KERN_ERR "pc300: unable to allocate memory\n");
341 pci_release_regions(pdev);
342 pci_disable_device(pdev);
343 return -ENOBUFS;
344 }
345 memset(card, 0, sizeof(card_t));
346 pci_set_drvdata(pdev, card);
347
348 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
349 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
350 card->type = PC300_TE; /* not fully supported */
351 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
352 card->type = PC300_X21;
353 else
354 card->type = PC300_RSV;
355
356 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
357 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
358 card->n_ports = 1;
359 else
360 card->n_ports = 2;
361
362 for (i = 0; i < card->n_ports; i++)
363 if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
364 printk(KERN_ERR "pc300: unable to allocate memory\n");
365 pc300_pci_remove_one(pdev);
366 return -ENOMEM;
367 }
368
369 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
370 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
371 pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
372 pci_resource_len(pdev, 3) < 16384) {
373 printk(KERN_ERR "pc300: invalid card EEPROM parameters\n");
374 pc300_pci_remove_one(pdev);
375 return -EFAULT;
376 }
377
378 plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
379 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
380
381 scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
382 card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
383
384 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
385 card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
386
387 if (card->plxbase == NULL ||
388 card->scabase == NULL ||
389 card->rambase == NULL) {
390 printk(KERN_ERR "pc300: ioremap() failed\n");
391 pc300_pci_remove_one(pdev);
392 }
393
394 /* PLX PCI 9050 workaround for local configuration register read bug */
395 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys);
396 card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
397 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
398
399 /* Reset PLX */
400 p = &card->plxbase->init_ctrl;
401 writel(card->init_ctrl_value | 0x40000000, p);
402 readl(p); /* Flush the write - do not use sca_flush */
403 udelay(1);
404
405 writel(card->init_ctrl_value, p);
406 readl(p); /* Flush the write - do not use sca_flush */
407 udelay(1);
408
409 /* Reload Config. Registers from EEPROM */
410 writel(card->init_ctrl_value | 0x20000000, p);
411 readl(p); /* Flush the write - do not use sca_flush */
412 udelay(1);
413
414 writel(card->init_ctrl_value, p);
415 readl(p); /* Flush the write - do not use sca_flush */
416 udelay(1);
417
418 ramsize = sca_detect_ram(card, card->rambase,
419 pci_resource_len(pdev, 3));
420
421 if (use_crystal_clock)
422 card->init_ctrl_value &= ~PC300_CLKSEL_MASK;
423 else
424 card->init_ctrl_value |= PC300_CLKSEL_MASK;
425
426 writel(card->init_ctrl_value, &card->plxbase->init_ctrl);
427 /* number of TX + RX buffers for one port */
428 i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU));
429 card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
430 card->rx_ring_buffers = i - card->tx_ring_buffers;
431
432 card->buff_offset = card->n_ports * sizeof(pkt_desc) *
433 (card->tx_ring_buffers + card->rx_ring_buffers);
434
435 printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, "
436 "using %u TX + %u RX packets rings\n",
437 card->type == PC300_X21 ? "X21" :
438 card->type == PC300_TE ? "TE" : "RSV",
439 ramsize / 1024, ramphys, pdev->irq,
440 card->tx_ring_buffers, card->rx_ring_buffers);
441
442 if (card->tx_ring_buffers < 1) {
443 printk(KERN_ERR "pc300: RAM test failed\n");
444 pc300_pci_remove_one(pdev);
445 return -EFAULT;
446 }
447
448 /* Enable interrupts on the PCI bridge, LINTi1 active low */
449 writew(0x0041, &card->plxbase->intr_ctrl_stat);
450
451 /* Allocate IRQ */
452 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) {
453 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
454 pdev->irq);
455 pc300_pci_remove_one(pdev);
456 return -EBUSY;
457 }
458 card->irq = pdev->irq;
459
460 sca_init(card, 0);
461
462 // COTE not set - allows better TX DMA settings
463 // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card);
464
465 sca_out(0x10, BTCR, card);
466
467 for (i = 0; i < card->n_ports; i++) {
468 port_t *port = &card->ports[i];
469 struct net_device *dev = port_to_dev(port);
470 hdlc_device *hdlc = dev_to_hdlc(dev);
471 port->phy_node = i;
472
473 spin_lock_init(&port->lock);
474 SET_MODULE_OWNER(dev);
475 dev->irq = card->irq;
476 dev->mem_start = ramphys;
477 dev->mem_end = ramphys + ramsize - 1;
478 dev->tx_queue_len = 50;
479 dev->do_ioctl = pc300_ioctl;
480 dev->open = pc300_open;
481 dev->stop = pc300_close;
482 hdlc->attach = sca_attach;
483 hdlc->xmit = sca_xmit;
484 port->settings.clock_type = CLOCK_EXT;
485 port->card = card;
486 if (card->type == PC300_X21)
487 port->iface = IF_IFACE_X21;
488 else
489 port->iface = IF_IFACE_V35;
490
491 if (register_hdlc_device(dev)) {
492 printk(KERN_ERR "pc300: unable to register hdlc "
493 "device\n");
494 port->card = NULL;
495 pc300_pci_remove_one(pdev);
496 return -ENOBUFS;
497 }
498 sca_init_sync_port(port); /* Set up SCA memory */
499
500 printk(KERN_INFO "%s: PC300 node %d\n",
501 dev->name, port->phy_node);
502 }
503 return 0;
504}
505
506
507
508static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
509 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
510 PCI_ANY_ID, 0, 0, 0 },
511 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
512 PCI_ANY_ID, 0, 0, 0 },
513 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_1, PCI_ANY_ID,
514 PCI_ANY_ID, 0, 0, 0 },
515 { PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_2, PCI_ANY_ID,
516 PCI_ANY_ID, 0, 0, 0 },
517 { 0, }
518};
519
520
521static struct pci_driver pc300_pci_driver = {
522 .name = "PC300",
523 .id_table = pc300_pci_tbl,
524 .probe = pc300_pci_init_one,
525 .remove = pc300_pci_remove_one,
526};
527
528
529static int __init pc300_init_module(void)
530{
531#ifdef MODULE
532 printk(KERN_INFO "%s\n", version);
533#endif
534 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
535 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
536 return -EINVAL;
537 }
538 if (use_crystal_clock != 0 && use_crystal_clock != 1) {
539 printk(KERN_ERR "pc300: Invalid 'use_crystal_clock' value\n");
540 return -EINVAL;
541 }
542
543 CLOCK_BASE = use_crystal_clock ? 24576000 : pci_clock_freq;
544
545 return pci_module_init(&pc300_pci_driver);
546}
547
548
549
550static void __exit pc300_cleanup_module(void)
551{
552 pci_unregister_driver(&pc300_pci_driver);
553}
554
555MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
556MODULE_DESCRIPTION("Cyclades PC300 serial port driver");
557MODULE_LICENSE("GPL v2");
558MODULE_DEVICE_TABLE(pci, pc300_pci_tbl);
559module_param(pci_clock_freq, int, 0444);
560MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
561module_param(use_crystal_clock, int, 0444);
562MODULE_PARM_DESC(use_crystal_clock,
563 "Use 24.576 MHz clock instead of PCI clock");
564module_init(pc300_init_module);
565module_exit(pc300_cleanup_module);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 59ddd21c3958..8dbcf83bb5f3 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -331,8 +331,7 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
331static void z8530_rx(struct z8530_channel *c) 331static void z8530_rx(struct z8530_channel *c)
332{ 332{
333 u8 ch,stat; 333 u8 ch,stat;
334 spin_lock(c->lock); 334
335
336 while(1) 335 while(1)
337 { 336 {
338 /* FIFO empty ? */ 337 /* FIFO empty ? */
@@ -390,7 +389,6 @@ static void z8530_rx(struct z8530_channel *c)
390 */ 389 */
391 write_zsctrl(c, ERR_RES); 390 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS); 391 write_zsctrl(c, RES_H_IUS);
393 spin_unlock(c->lock);
394} 392}
395 393
396 394
@@ -406,7 +404,6 @@ static void z8530_rx(struct z8530_channel *c)
406 404
407static void z8530_tx(struct z8530_channel *c) 405static void z8530_tx(struct z8530_channel *c)
408{ 406{
409 spin_lock(c->lock);
410 while(c->txcount) { 407 while(c->txcount) {
411 /* FIFO full ? */ 408 /* FIFO full ? */
412 if(!(read_zsreg(c, R0)&4)) 409 if(!(read_zsreg(c, R0)&4))
@@ -434,7 +431,6 @@ static void z8530_tx(struct z8530_channel *c)
434 431
435 z8530_tx_done(c); 432 z8530_tx_done(c);
436 write_zsctrl(c, RES_H_IUS); 433 write_zsctrl(c, RES_H_IUS);
437 spin_unlock(c->lock);
438} 434}
439 435
440/** 436/**
@@ -452,7 +448,6 @@ static void z8530_status(struct z8530_channel *chan)
452{ 448{
453 u8 status, altered; 449 u8 status, altered;
454 450
455 spin_lock(chan->lock);
456 status=read_zsreg(chan, R0); 451 status=read_zsreg(chan, R0);
457 altered=chan->status^status; 452 altered=chan->status^status;
458 453
@@ -487,7 +482,6 @@ static void z8530_status(struct z8530_channel *chan)
487 } 482 }
488 write_zsctrl(chan, RES_EXT_INT); 483 write_zsctrl(chan, RES_EXT_INT);
489 write_zsctrl(chan, RES_H_IUS); 484 write_zsctrl(chan, RES_H_IUS);
490 spin_unlock(chan->lock);
491} 485}
492 486
493struct z8530_irqhandler z8530_sync= 487struct z8530_irqhandler z8530_sync=
@@ -511,7 +505,6 @@ EXPORT_SYMBOL(z8530_sync);
511 505
512static void z8530_dma_rx(struct z8530_channel *chan) 506static void z8530_dma_rx(struct z8530_channel *chan)
513{ 507{
514 spin_lock(chan->lock);
515 if(chan->rxdma_on) 508 if(chan->rxdma_on)
516 { 509 {
517 /* Special condition check only */ 510 /* Special condition check only */
@@ -534,7 +527,6 @@ static void z8530_dma_rx(struct z8530_channel *chan)
534 /* DMA is off right now, drain the slow way */ 527 /* DMA is off right now, drain the slow way */
535 z8530_rx(chan); 528 z8530_rx(chan);
536 } 529 }
537 spin_unlock(chan->lock);
538} 530}
539 531
540/** 532/**
@@ -547,7 +539,6 @@ static void z8530_dma_rx(struct z8530_channel *chan)
547 539
548static void z8530_dma_tx(struct z8530_channel *chan) 540static void z8530_dma_tx(struct z8530_channel *chan)
549{ 541{
550 spin_lock(chan->lock);
551 if(!chan->dma_tx) 542 if(!chan->dma_tx)
552 { 543 {
553 printk(KERN_WARNING "Hey who turned the DMA off?\n"); 544 printk(KERN_WARNING "Hey who turned the DMA off?\n");
@@ -557,7 +548,6 @@ static void z8530_dma_tx(struct z8530_channel *chan)
557 /* This shouldnt occur in DMA mode */ 548 /* This shouldnt occur in DMA mode */
558 printk(KERN_ERR "DMA tx - bogus event!\n"); 549 printk(KERN_ERR "DMA tx - bogus event!\n");
559 z8530_tx(chan); 550 z8530_tx(chan);
560 spin_unlock(chan->lock);
561} 551}
562 552
563/** 553/**
@@ -596,7 +586,6 @@ static void z8530_dma_status(struct z8530_channel *chan)
596 } 586 }
597 } 587 }
598 588
599 spin_lock(chan->lock);
600 if(altered&chan->dcdcheck) 589 if(altered&chan->dcdcheck)
601 { 590 {
602 if(status&chan->dcdcheck) 591 if(status&chan->dcdcheck)
@@ -618,7 +607,6 @@ static void z8530_dma_status(struct z8530_channel *chan)
618 607
619 write_zsctrl(chan, RES_EXT_INT); 608 write_zsctrl(chan, RES_EXT_INT);
620 write_zsctrl(chan, RES_H_IUS); 609 write_zsctrl(chan, RES_H_IUS);
621 spin_unlock(chan->lock);
622} 610}
623 611
624struct z8530_irqhandler z8530_dma_sync= 612struct z8530_irqhandler z8530_dma_sync=
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 7f38012b9c92..a0326818ff2f 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -433,7 +433,7 @@ wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_
433 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); 433 memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
434 } else { 434 } else {
435 /* Packet is in one chunk -- we can copy + cksum. */ 435 /* Packet is in one chunk -- we can copy + cksum. */
436 eth_io_copy_and_sum(skb, xfer_start, count, 0); 436 memcpy_fromio(skb->data, xfer_start, count);
437 } 437 }
438 438
439 /* Turn off 16 bit access so that reboot works. ISA brain-damage */ 439 /* Turn off 16 bit access so that reboot works. ISA brain-damage */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 8286678513b9..3a064def162e 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -352,6 +352,10 @@
352#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040 352#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040
353#define BCM43xx_UCODEFLAG_JAPAN 0x0080 353#define BCM43xx_UCODEFLAG_JAPAN 0x0080
354 354
355/* Hardware Radio Enable masks */
356#define BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16)
357#define BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4)
358
355/* Generic-Interrupt reasons. */ 359/* Generic-Interrupt reasons. */
356#define BCM43xx_IRQ_READY (1 << 0) 360#define BCM43xx_IRQ_READY (1 << 0)
357#define BCM43xx_IRQ_BEACON (1 << 1) 361#define BCM43xx_IRQ_BEACON (1 << 1)
@@ -758,7 +762,8 @@ struct bcm43xx_private {
758 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */ 762 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */
759 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */ 763 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */
760 short_preamble:1, /* TRUE, if short preamble is enabled. */ 764 short_preamble:1, /* TRUE, if short preamble is enabled. */
761 firmware_norelease:1; /* Do not release the firmware. Used on suspend. */ 765 firmware_norelease:1, /* Do not release the firmware. Used on suspend. */
766 radio_hw_enable:1; /* TRUE if radio is hardware enabled */
762 767
763 struct bcm43xx_stats stats; 768 struct bcm43xx_stats stats;
764 769
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index 7d383a27b927..8f198befba39 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -26,6 +26,7 @@
26*/ 26*/
27 27
28#include "bcm43xx_leds.h" 28#include "bcm43xx_leds.h"
29#include "bcm43xx_radio.h"
29#include "bcm43xx.h" 30#include "bcm43xx.h"
30 31
31#include <asm/bitops.h> 32#include <asm/bitops.h>
@@ -108,6 +109,7 @@ static void bcm43xx_led_init_hardcoded(struct bcm43xx_private *bcm,
108 switch (led_index) { 109 switch (led_index) {
109 case 0: 110 case 0:
110 led->behaviour = BCM43xx_LED_ACTIVITY; 111 led->behaviour = BCM43xx_LED_ACTIVITY;
112 led->activelow = 1;
111 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ) 113 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ)
112 led->behaviour = BCM43xx_LED_RADIO_ALL; 114 led->behaviour = BCM43xx_LED_RADIO_ALL;
113 break; 115 break;
@@ -199,20 +201,21 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
199 turn_on = activity; 201 turn_on = activity;
200 break; 202 break;
201 case BCM43xx_LED_RADIO_ALL: 203 case BCM43xx_LED_RADIO_ALL:
202 turn_on = radio->enabled; 204 turn_on = radio->enabled && bcm43xx_is_hw_radio_enabled(bcm);
203 break; 205 break;
204 case BCM43xx_LED_RADIO_A: 206 case BCM43xx_LED_RADIO_A:
205 case BCM43xx_LED_BCM4303_2: 207 case BCM43xx_LED_BCM4303_2:
206 turn_on = (radio->enabled && phy->type == BCM43xx_PHYTYPE_A); 208 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
209 phy->type == BCM43xx_PHYTYPE_A);
207 break; 210 break;
208 case BCM43xx_LED_RADIO_B: 211 case BCM43xx_LED_RADIO_B:
209 case BCM43xx_LED_BCM4303_1: 212 case BCM43xx_LED_BCM4303_1:
210 turn_on = (radio->enabled && 213 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
211 (phy->type == BCM43xx_PHYTYPE_B || 214 (phy->type == BCM43xx_PHYTYPE_B ||
212 phy->type == BCM43xx_PHYTYPE_G)); 215 phy->type == BCM43xx_PHYTYPE_G));
213 break; 216 break;
214 case BCM43xx_LED_MODE_BG: 217 case BCM43xx_LED_MODE_BG:
215 if (phy->type == BCM43xx_PHYTYPE_G && 218 if (phy->type == BCM43xx_PHYTYPE_G && bcm43xx_is_hw_radio_enabled(bcm) &&
216 1/*FIXME: using G rates.*/) 219 1/*FIXME: using G rates.*/)
217 turn_on = 1; 220 turn_on = 1;
218 break; 221 break;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 91b752e3d07e..23aaf1ed8541 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -2441,6 +2441,9 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2441 if (err) 2441 if (err)
2442 goto err_gpio_cleanup; 2442 goto err_gpio_cleanup;
2443 bcm43xx_radio_turn_on(bcm); 2443 bcm43xx_radio_turn_on(bcm);
2444 bcm->radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
2445 dprintk(KERN_INFO PFX "Radio %s by hardware\n",
2446 (bcm->radio_hw_enable == 0) ? "disabled" : "enabled");
2444 2447
2445 bcm43xx_write16(bcm, 0x03E6, 0x0000); 2448 bcm43xx_write16(bcm, 0x03E6, 0x0000);
2446 err = bcm43xx_phy_init(bcm); 2449 err = bcm43xx_phy_init(bcm);
@@ -3175,9 +3178,24 @@ static void bcm43xx_periodic_every30sec(struct bcm43xx_private *bcm)
3175 3178
3176static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm) 3179static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3177{ 3180{
3181 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3182 //TODO for APHY (temperature?)
3183}
3184
3185static void bcm43xx_periodic_every1sec(struct bcm43xx_private *bcm)
3186{
3178 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 3187 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
3179 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 3188 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
3189 int radio_hw_enable;
3180 3190
3191 /* check if radio hardware enabled status changed */
3192 radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
3193 if (unlikely(bcm->radio_hw_enable != radio_hw_enable)) {
3194 bcm->radio_hw_enable = radio_hw_enable;
3195 dprintk(KERN_INFO PFX "Radio hardware status changed to %s\n",
3196 (radio_hw_enable == 0) ? "disabled" : "enabled");
3197 bcm43xx_leds_update(bcm, 0);
3198 }
3181 if (phy->type == BCM43xx_PHYTYPE_G) { 3199 if (phy->type == BCM43xx_PHYTYPE_G) {
3182 //TODO: update_aci_moving_average 3200 //TODO: update_aci_moving_average
3183 if (radio->aci_enable && radio->aci_wlan_automatic) { 3201 if (radio->aci_enable && radio->aci_wlan_automatic) {
@@ -3201,21 +3219,21 @@ static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3201 //TODO: implement rev1 workaround 3219 //TODO: implement rev1 workaround
3202 } 3220 }
3203 } 3221 }
3204 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3205 //TODO for APHY (temperature?)
3206} 3222}
3207 3223
3208static void do_periodic_work(struct bcm43xx_private *bcm) 3224static void do_periodic_work(struct bcm43xx_private *bcm)
3209{ 3225{
3210 if (bcm->periodic_state % 8 == 0) 3226 if (bcm->periodic_state % 120 == 0)
3211 bcm43xx_periodic_every120sec(bcm); 3227 bcm43xx_periodic_every120sec(bcm);
3212 if (bcm->periodic_state % 4 == 0) 3228 if (bcm->periodic_state % 60 == 0)
3213 bcm43xx_periodic_every60sec(bcm); 3229 bcm43xx_periodic_every60sec(bcm);
3214 if (bcm->periodic_state % 2 == 0) 3230 if (bcm->periodic_state % 30 == 0)
3215 bcm43xx_periodic_every30sec(bcm); 3231 bcm43xx_periodic_every30sec(bcm);
3216 bcm43xx_periodic_every15sec(bcm); 3232 if (bcm->periodic_state % 15 == 0)
3233 bcm43xx_periodic_every15sec(bcm);
3234 bcm43xx_periodic_every1sec(bcm);
3217 3235
3218 schedule_delayed_work(&bcm->periodic_work, HZ * 15); 3236 schedule_delayed_work(&bcm->periodic_work, HZ);
3219} 3237}
3220 3238
3221static void bcm43xx_periodic_work_handler(struct work_struct *work) 3239static void bcm43xx_periodic_work_handler(struct work_struct *work)
@@ -3228,7 +3246,7 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
3228 unsigned long orig_trans_start = 0; 3246 unsigned long orig_trans_start = 0;
3229 3247
3230 mutex_lock(&bcm->mutex); 3248 mutex_lock(&bcm->mutex);
3231 if (unlikely(bcm->periodic_state % 4 == 0)) { 3249 if (unlikely(bcm->periodic_state % 60 == 0)) {
3232 /* Periodic work will take a long time, so we want it to 3250 /* Periodic work will take a long time, so we want it to
3233 * be preemtible. 3251 * be preemtible.
3234 */ 3252 */
@@ -3260,7 +3278,7 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
3260 3278
3261 do_periodic_work(bcm); 3279 do_periodic_work(bcm);
3262 3280
3263 if (unlikely(bcm->periodic_state % 4 == 0)) { 3281 if (unlikely(bcm->periodic_state % 60 == 0)) {
3264 spin_lock_irqsave(&bcm->irq_lock, flags); 3282 spin_lock_irqsave(&bcm->irq_lock, flags);
3265 tasklet_enable(&bcm->isr_tasklet); 3283 tasklet_enable(&bcm->isr_tasklet);
3266 bcm43xx_interrupt_enable(bcm, savedirqs); 3284 bcm43xx_interrupt_enable(bcm, savedirqs);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
index bb9c484d7e19..af19a07032a3 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
@@ -1981,6 +1981,7 @@ void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm)
1981 } 1981 }
1982 radio->enabled = 1; 1982 radio->enabled = 1;
1983 dprintk(KERN_INFO PFX "Radio turned on\n"); 1983 dprintk(KERN_INFO PFX "Radio turned on\n");
1984 bcm43xx_leds_update(bcm, 0);
1984} 1985}
1985 1986
1986void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm) 1987void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
@@ -2001,6 +2002,7 @@ void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
2001 bcm43xx_phy_write(bcm, 0x0015, 0xAA00); 2002 bcm43xx_phy_write(bcm, 0x0015, 0xAA00);
2002 radio->enabled = 0; 2003 radio->enabled = 0;
2003 dprintk(KERN_INFO PFX "Radio turned off\n"); 2004 dprintk(KERN_INFO PFX "Radio turned off\n");
2005 bcm43xx_leds_update(bcm, 0);
2004} 2006}
2005 2007
2006void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm) 2008void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
index 9ed18039fa3e..77a98a53a2e2 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
@@ -65,6 +65,22 @@ void bcm43xx_radio_init2060(struct bcm43xx_private *bcm);
65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm); 65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm);
66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm); 66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm);
67 67
68static inline
69int bcm43xx_is_hw_radio_enabled(struct bcm43xx_private *bcm)
70{
71 /* function to return state of hardware enable of radio
72 * returns 0 if radio disabled, 1 if radio enabled
73 */
74 if (bcm->current_core->rev >= 3)
75 return ((bcm43xx_read32(bcm, BCM43xx_MMIO_RADIO_HWENABLED_HI)
76 & BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK)
77 == 0) ? 1 : 0;
78 else
79 return ((bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_HWENABLED_LO)
80 & BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK)
81 == 0) ? 0 : 1;
82}
83
68int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel, 84int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel,
69 int synthetic_pu_workaround); 85 int synthetic_pu_workaround);
70 86
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 04c19cefa1da..9077e6edde34 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -84,7 +84,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
84 if (strchr(dev->name, '%')) 84 if (strchr(dev->name, '%'))
85 ret = dev_alloc_name(dev, dev->name); 85 ret = dev_alloc_name(dev, dev->name);
86 86
87 SET_NETDEV_DEV(dev, mdev->class_dev.dev); 87 SET_NETDEV_DEV(dev, mdev->dev.parent);
88 if (ret >= 0) 88 if (ret >= 0)
89 ret = register_netdevice(dev); 89 ret = register_netdevice(dev);
90 90
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 22cb3fb7502e..c878a2f3239c 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -9166,7 +9166,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
9166{ 9166{
9167 struct ipw_priv *priv = ieee80211_priv(dev); 9167 struct ipw_priv *priv = ieee80211_priv(dev);
9168 mutex_lock(&priv->mutex); 9168 mutex_lock(&priv->mutex);
9169 if (wrqu->rts.disabled) 9169 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9170 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 9170 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9171 else { 9171 else {
9172 if (wrqu->rts.value < MIN_RTS_THRESHOLD || 9172 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
@@ -9255,7 +9255,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
9255{ 9255{
9256 struct ipw_priv *priv = ieee80211_priv(dev); 9256 struct ipw_priv *priv = ieee80211_priv(dev);
9257 mutex_lock(&priv->mutex); 9257 mutex_lock(&priv->mutex);
9258 if (wrqu->frag.disabled) 9258 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9259 priv->ieee->fts = DEFAULT_FTS; 9259 priv->ieee->fts = DEFAULT_FTS;
9260 else { 9260 else {
9261 if (wrqu->frag.value < MIN_FRAG_THRESHOLD || 9261 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 936c888e03e1..4e7f6cf51436 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2059,7 +2059,7 @@ static int determine_firmware(struct net_device *dev)
2059 int err; 2059 int err;
2060 struct comp_id nic_id, sta_id; 2060 struct comp_id nic_id, sta_id;
2061 unsigned int firmver; 2061 unsigned int firmver;
2062 char tmp[SYMBOL_MAX_VER_LEN+1]; 2062 char tmp[SYMBOL_MAX_VER_LEN+1] __attribute__((aligned(2)));
2063 2063
2064 /* Get the hardware version */ 2064 /* Get the hardware version */
2065 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id); 2065 err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
@@ -4293,8 +4293,8 @@ static void orinoco_get_drvinfo(struct net_device *dev,
4293 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1); 4293 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
4294 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1); 4294 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
4295 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1); 4295 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
4296 if (dev->class_dev.dev) 4296 if (dev->dev.parent)
4297 strncpy(info->bus_info, dev->class_dev.dev->bus_id, 4297 strncpy(info->bus_info, dev->dev.parent->bus_id,
4298 sizeof(info->bus_info) - 1); 4298 sizeof(info->bus_info) - 1);
4299 else 4299 else
4300 snprintf(info->bus_info, sizeof(info->bus_info) - 1, 4300 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index d08ae8d2726c..d1e502236b2a 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -332,7 +332,7 @@ orinoco_cs_config(struct pcmcia_device *link)
332 332
333 /* Finally, report what we've done */ 333 /* Finally, report what we've done */
334 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 334 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
335 "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id, 335 "0x%04x-0x%04x\n", dev->name, dev->dev.parent->bus_id,
336 link->irq.AssignedIRQ, link->io.BasePort1, 336 link->irq.AssignedIRQ, link->io.BasePort1,
337 link->io.BasePort1 + link->io.NumPorts1 - 1); 337 link->io.BasePort1 + link->io.NumPorts1 - 1);
338 338
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index f057fd9fcd79..a037b11dac9d 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23#include <linux/netdevice.h> 23#include <linux/netdevice.h>
24#include <linux/ethtool.h>
24#include <linux/pci.h> 25#include <linux/pci.h>
25#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
26#include <linux/delay.h> 27#include <linux/delay.h>
@@ -787,6 +788,17 @@ islpci_set_multicast_list(struct net_device *dev)
787} 788}
788#endif 789#endif
789 790
791static void islpci_ethtool_get_drvinfo(struct net_device *dev,
792 struct ethtool_drvinfo *info)
793{
794 strcpy(info->driver, DRV_NAME);
795 strcpy(info->version, DRV_VERSION);
796}
797
798static struct ethtool_ops islpci_ethtool_ops = {
799 .get_drvinfo = islpci_ethtool_get_drvinfo,
800};
801
790struct net_device * 802struct net_device *
791islpci_setup(struct pci_dev *pdev) 803islpci_setup(struct pci_dev *pdev)
792{ 804{
@@ -813,6 +825,7 @@ islpci_setup(struct pci_dev *pdev)
813 ndev->do_ioctl = &prism54_ioctl; 825 ndev->do_ioctl = &prism54_ioctl;
814 ndev->wireless_handlers = 826 ndev->wireless_handlers =
815 (struct iw_handler_def *) &prism54_handler_def; 827 (struct iw_handler_def *) &prism54_handler_def;
828 ndev->ethtool_ops = &islpci_ethtool_ops;
816 829
817 ndev->hard_start_xmit = &islpci_eth_transmit; 830 ndev->hard_start_xmit = &islpci_eth_transmit;
818 /* ndev->set_multicast_list = &islpci_set_multicast_list; */ 831 /* ndev->set_multicast_list = &islpci_set_multicast_list; */
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index a9aa1662eaa4..736666da6c24 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -211,4 +211,8 @@ islpci_trigger(islpci_private *priv)
211 211
212int islpci_free_memory(islpci_private *); 212int islpci_free_memory(islpci_private *);
213struct net_device *islpci_setup(struct pci_dev *); 213struct net_device *islpci_setup(struct pci_dev *);
214
215#define DRV_NAME "prism54"
216#define DRV_VERSION "1.2"
217
214#endif /* _ISLPCI_DEV_H */ 218#endif /* _ISLPCI_DEV_H */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 58257b40c043..3dcb13bb7d57 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -28,9 +28,6 @@
28#include "islpci_mgt.h" /* for pc_debug */ 28#include "islpci_mgt.h" /* for pc_debug */
29#include "isl_oid.h" 29#include "isl_oid.h"
30 30
31#define DRV_NAME "prism54"
32#define DRV_VERSION "1.2"
33
34MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>"); 31MODULE_AUTHOR("[Intersil] R.Bastings and W.Termorshuizen, The prism54.org Development Team <prism54-devel@prism54.org>");
35MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter"); 32MODULE_DESCRIPTION("The Prism54 802.11 Wireless LAN adapter");
36MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index cf2d1486b01d..af70460f008a 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -806,7 +806,7 @@ spectrum_cs_config(struct pcmcia_device *link)
806 806
807 /* Finally, report what we've done */ 807 /* Finally, report what we've done */
808 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 808 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
809 "0x%04x-0x%04x\n", dev->name, dev->class_dev.dev->bus_id, 809 "0x%04x-0x%04x\n", dev->name, dev->dev.parent->bus_id,
810 link->irq.AssignedIRQ, link->io.BasePort1, 810 link->irq.AssignedIRQ, link->io.BasePort1,
811 link->io.BasePort1 + link->io.NumPorts1 - 1); 811 link->io.BasePort1 + link->io.NumPorts1 - 1);
812 812
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 78ea72fb8f0c..12dfc0b6efe6 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -84,6 +84,18 @@ static void print_id(struct zd_chip *chip)
84 dev_info(zd_chip_dev(chip), "%s\n", buffer); 84 dev_info(zd_chip_dev(chip), "%s\n", buffer);
85} 85}
86 86
87static zd_addr_t inc_addr(zd_addr_t addr)
88{
89 u16 a = (u16)addr;
90 /* Control registers use byte addressing, but everything else uses word
91 * addressing. */
92 if ((a & 0xf000) == CR_START)
93 a += 2;
94 else
95 a += 1;
96 return (zd_addr_t)a;
97}
98
87/* Read a variable number of 32-bit values. Parameter count is not allowed to 99/* Read a variable number of 32-bit values. Parameter count is not allowed to
88 * exceed USB_MAX_IOREAD32_COUNT. 100 * exceed USB_MAX_IOREAD32_COUNT.
89 */ 101 */
@@ -114,7 +126,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
114 for (i = 0; i < count; i++) { 126 for (i = 0; i < count; i++) {
115 int j = 2*i; 127 int j = 2*i;
116 /* We read the high word always first. */ 128 /* We read the high word always first. */
117 a16[j] = zd_inc_word(addr[i]); 129 a16[j] = inc_addr(addr[i]);
118 a16[j+1] = addr[i]; 130 a16[j+1] = addr[i];
119 } 131 }
120 132
@@ -163,7 +175,7 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
163 j = 2*i; 175 j = 2*i;
164 /* We write the high word always first. */ 176 /* We write the high word always first. */
165 ioreqs16[j].value = ioreqs[i].value >> 16; 177 ioreqs16[j].value = ioreqs[i].value >> 16;
166 ioreqs16[j].addr = zd_inc_word(ioreqs[i].addr); 178 ioreqs16[j].addr = inc_addr(ioreqs[i].addr);
167 ioreqs16[j+1].value = ioreqs[i].value; 179 ioreqs16[j+1].value = ioreqs[i].value;
168 ioreqs16[j+1].addr = ioreqs[i].addr; 180 ioreqs16[j+1].addr = ioreqs[i].addr;
169 } 181 }
@@ -466,7 +478,8 @@ static int read_values(struct zd_chip *chip, u8 *values, size_t count,
466 478
467 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 479 ZD_ASSERT(mutex_is_locked(&chip->mutex));
468 for (i = 0;;) { 480 for (i = 0;;) {
469 r = zd_ioread32_locked(chip, &v, e2p_addr+i/2); 481 r = zd_ioread32_locked(chip, &v,
482 (zd_addr_t)((u16)e2p_addr+i/2));
470 if (r) 483 if (r)
471 return r; 484 return r;
472 v -= guard; 485 v -= guard;
@@ -798,47 +811,18 @@ static int hw_reset_phy(struct zd_chip *chip)
798static int zd1211_hw_init_hmac(struct zd_chip *chip) 811static int zd1211_hw_init_hmac(struct zd_chip *chip)
799{ 812{
800 static const struct zd_ioreq32 ioreqs[] = { 813 static const struct zd_ioreq32 ioreqs[] = {
801 { CR_ACK_TIMEOUT_EXT, 0x20 },
802 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
803 { CR_ZD1211_RETRY_MAX, 0x2 }, 814 { CR_ZD1211_RETRY_MAX, 0x2 },
804 { CR_SNIFFER_ON, 0 },
805 { CR_RX_FILTER, STA_RX_FILTER },
806 { CR_GROUP_HASH_P1, 0x00 },
807 { CR_GROUP_HASH_P2, 0x80000000 },
808 { CR_REG1, 0xa4 },
809 { CR_ADDA_PWR_DWN, 0x7f },
810 { CR_BCN_PLCP_CFG, 0x00f00401 },
811 { CR_PHY_DELAY, 0x00 },
812 { CR_ACK_TIMEOUT_EXT, 0x80 },
813 { CR_ADDA_PWR_DWN, 0x00 },
814 { CR_ACK_TIME_80211, 0x100 },
815 { CR_RX_PE_DELAY, 0x70 },
816 { CR_PS_CTRL, 0x10000000 },
817 { CR_RTS_CTS_RATE, 0x02030203 },
818 { CR_RX_THRESHOLD, 0x000c0640 }, 815 { CR_RX_THRESHOLD, 0x000c0640 },
819 { CR_AFTER_PNP, 0x1 },
820 { CR_WEP_PROTECT, 0x114 },
821 }; 816 };
822 817
823 int r;
824
825 dev_dbg_f(zd_chip_dev(chip), "\n"); 818 dev_dbg_f(zd_chip_dev(chip), "\n");
826 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 819 ZD_ASSERT(mutex_is_locked(&chip->mutex));
827 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 820 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
828#ifdef DEBUG
829 if (r) {
830 dev_err(zd_chip_dev(chip),
831 "error in zd_iowrite32a_locked. Error number %d\n", r);
832 }
833#endif /* DEBUG */
834 return r;
835} 821}
836 822
837static int zd1211b_hw_init_hmac(struct zd_chip *chip) 823static int zd1211b_hw_init_hmac(struct zd_chip *chip)
838{ 824{
839 static const struct zd_ioreq32 ioreqs[] = { 825 static const struct zd_ioreq32 ioreqs[] = {
840 { CR_ACK_TIMEOUT_EXT, 0x20 },
841 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
842 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 826 { CR_ZD1211B_RETRY_MAX, 0x02020202 },
843 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f }, 827 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f },
844 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f }, 828 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f },
@@ -847,6 +831,20 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
847 { CR_ZD1211B_AIFS_CTL1, 0x00280028 }, 831 { CR_ZD1211B_AIFS_CTL1, 0x00280028 },
848 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 832 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
849 { CR_ZD1211B_TXOP, 0x01800824 }, 833 { CR_ZD1211B_TXOP, 0x01800824 },
834 { CR_RX_THRESHOLD, 0x000c0eff, },
835 };
836
837 dev_dbg_f(zd_chip_dev(chip), "\n");
838 ZD_ASSERT(mutex_is_locked(&chip->mutex));
839 return zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
840}
841
842static int hw_init_hmac(struct zd_chip *chip)
843{
844 int r;
845 static const struct zd_ioreq32 ioreqs[] = {
846 { CR_ACK_TIMEOUT_EXT, 0x20 },
847 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
850 { CR_SNIFFER_ON, 0 }, 848 { CR_SNIFFER_ON, 0 },
851 { CR_RX_FILTER, STA_RX_FILTER }, 849 { CR_RX_FILTER, STA_RX_FILTER },
852 { CR_GROUP_HASH_P1, 0x00 }, 850 { CR_GROUP_HASH_P1, 0x00 },
@@ -861,25 +859,16 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
861 { CR_RX_PE_DELAY, 0x70 }, 859 { CR_RX_PE_DELAY, 0x70 },
862 { CR_PS_CTRL, 0x10000000 }, 860 { CR_PS_CTRL, 0x10000000 },
863 { CR_RTS_CTS_RATE, 0x02030203 }, 861 { CR_RTS_CTS_RATE, 0x02030203 },
864 { CR_RX_THRESHOLD, 0x000c0eff, },
865 { CR_AFTER_PNP, 0x1 }, 862 { CR_AFTER_PNP, 0x1 },
866 { CR_WEP_PROTECT, 0x114 }, 863 { CR_WEP_PROTECT, 0x114 },
864 { CR_IFS_VALUE, IFS_VALUE_DEFAULT },
867 }; 865 };
868 866
869 int r;
870
871 dev_dbg_f(zd_chip_dev(chip), "\n");
872 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 867 ZD_ASSERT(mutex_is_locked(&chip->mutex));
873 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 868 r = zd_iowrite32a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
874 if (r) { 869 if (r)
875 dev_dbg_f(zd_chip_dev(chip), 870 return r;
876 "error in zd_iowrite32a_locked. Error number %d\n", r);
877 }
878 return r;
879}
880 871
881static int hw_init_hmac(struct zd_chip *chip)
882{
883 return chip->is_zd1211b ? 872 return chip->is_zd1211b ?
884 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); 873 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip);
885} 874}
@@ -974,16 +963,14 @@ static int hw_init(struct zd_chip *chip)
974 if (r) 963 if (r)
975 return r; 964 return r;
976 965
977 /* Although the vendor driver defaults to a different value during
978 * init, it overwrites the IFS value with the following every time
979 * the channel changes. We should aim to be more intelligent... */
980 r = zd_iowrite32_locked(chip, IFS_VALUE_DEFAULT, CR_IFS_VALUE);
981 if (r)
982 return r;
983
984 return set_beacon_interval(chip, 100); 966 return set_beacon_interval(chip, 100);
985} 967}
986 968
969static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
970{
971 return (zd_addr_t)((u16)chip->fw_regs_base + offset);
972}
973
987#ifdef DEBUG 974#ifdef DEBUG
988static int dump_cr(struct zd_chip *chip, const zd_addr_t addr, 975static int dump_cr(struct zd_chip *chip, const zd_addr_t addr,
989 const char *addr_string) 976 const char *addr_string)
@@ -1018,9 +1005,11 @@ static int test_init(struct zd_chip *chip)
1018 1005
1019static void dump_fw_registers(struct zd_chip *chip) 1006static void dump_fw_registers(struct zd_chip *chip)
1020{ 1007{
1021 static const zd_addr_t addr[4] = { 1008 const zd_addr_t addr[4] = {
1022 FW_FIRMWARE_VER, FW_USB_SPEED, FW_FIX_TX_RATE, 1009 fw_reg_addr(chip, FW_REG_FIRMWARE_VER),
1023 FW_LINK_STATUS 1010 fw_reg_addr(chip, FW_REG_USB_SPEED),
1011 fw_reg_addr(chip, FW_REG_FIX_TX_RATE),
1012 fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
1024 }; 1013 };
1025 1014
1026 int r; 1015 int r;
@@ -1046,7 +1035,8 @@ static int print_fw_version(struct zd_chip *chip)
1046 int r; 1035 int r;
1047 u16 version; 1036 u16 version;
1048 1037
1049 r = zd_ioread16_locked(chip, &version, FW_FIRMWARE_VER); 1038 r = zd_ioread16_locked(chip, &version,
1039 fw_reg_addr(chip, FW_REG_FIRMWARE_VER));
1050 if (r) 1040 if (r)
1051 return r; 1041 return r;
1052 1042
@@ -1126,6 +1116,22 @@ int zd_chip_disable_hwint(struct zd_chip *chip)
1126 return r; 1116 return r;
1127} 1117}
1128 1118
1119static int read_fw_regs_offset(struct zd_chip *chip)
1120{
1121 int r;
1122
1123 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1124 r = zd_ioread16_locked(chip, (u16*)&chip->fw_regs_base,
1125 FWRAW_REGS_ADDR);
1126 if (r)
1127 return r;
1128 dev_dbg_f(zd_chip_dev(chip), "fw_regs_base: %#06hx\n",
1129 (u16)chip->fw_regs_base);
1130
1131 return 0;
1132}
1133
1134
1129int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) 1135int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1130{ 1136{
1131 int r; 1137 int r;
@@ -1145,7 +1151,7 @@ int zd_chip_init_hw(struct zd_chip *chip, u8 device_type)
1145 if (r) 1151 if (r)
1146 goto out; 1152 goto out;
1147 1153
1148 r = zd_usb_init_hw(&chip->usb); 1154 r = read_fw_regs_offset(chip);
1149 if (r) 1155 if (r)
1150 goto out; 1156 goto out;
1151 1157
@@ -1325,15 +1331,15 @@ u8 zd_chip_get_channel(struct zd_chip *chip)
1325 1331
1326int zd_chip_control_leds(struct zd_chip *chip, enum led_status status) 1332int zd_chip_control_leds(struct zd_chip *chip, enum led_status status)
1327{ 1333{
1328 static const zd_addr_t a[] = { 1334 const zd_addr_t a[] = {
1329 FW_LINK_STATUS, 1335 fw_reg_addr(chip, FW_REG_LED_LINK_STATUS),
1330 CR_LED, 1336 CR_LED,
1331 }; 1337 };
1332 1338
1333 int r; 1339 int r;
1334 u16 v[ARRAY_SIZE(a)]; 1340 u16 v[ARRAY_SIZE(a)];
1335 struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = { 1341 struct zd_ioreq16 ioreqs[ARRAY_SIZE(a)] = {
1336 [0] = { FW_LINK_STATUS }, 1342 [0] = { fw_reg_addr(chip, FW_REG_LED_LINK_STATUS) },
1337 [1] = { CR_LED }, 1343 [1] = { CR_LED },
1338 }; 1344 };
1339 u16 other_led; 1345 u16 other_led;
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index a4e3cee9b59d..b07569e391ee 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -18,7 +18,6 @@
18#ifndef _ZD_CHIP_H 18#ifndef _ZD_CHIP_H
19#define _ZD_CHIP_H 19#define _ZD_CHIP_H
20 20
21#include "zd_types.h"
22#include "zd_rf.h" 21#include "zd_rf.h"
23#include "zd_usb.h" 22#include "zd_usb.h"
24 23
@@ -27,6 +26,37 @@
27 * adds a processor for handling the USB protocol. 26 * adds a processor for handling the USB protocol.
28 */ 27 */
29 28
29/* Address space */
30enum {
31 /* CONTROL REGISTERS */
32 CR_START = 0x9000,
33
34
35 /* FIRMWARE */
36 FW_START = 0xee00,
37
38
39 /* EEPROM */
40 E2P_START = 0xf800,
41 E2P_LEN = 0x800,
42
43 /* EEPROM layout */
44 E2P_LOAD_CODE_LEN = 0xe, /* base 0xf800 */
45 E2P_LOAD_VECT_LEN = 0x9, /* base 0xf80e */
46 /* E2P_DATA indexes into this */
47 E2P_DATA_LEN = 0x7e, /* base 0xf817 */
48 E2P_BOOT_CODE_LEN = 0x760, /* base 0xf895 */
49 E2P_INTR_VECT_LEN = 0xb, /* base 0xfff5 */
50
51 /* Some precomputed offsets into the EEPROM */
52 E2P_DATA_OFFSET = E2P_LOAD_CODE_LEN + E2P_LOAD_VECT_LEN,
53 E2P_BOOT_CODE_OFFSET = E2P_DATA_OFFSET + E2P_DATA_LEN,
54};
55
56#define CTL_REG(offset) ((zd_addr_t)(CR_START + (offset)))
57#define E2P_DATA(offset) ((zd_addr_t)(E2P_START + E2P_DATA_OFFSET + (offset)))
58#define FWRAW_DATA(offset) ((zd_addr_t)(FW_START + (offset)))
59
30/* 8-bit hardware registers */ 60/* 8-bit hardware registers */
31#define CR0 CTL_REG(0x0000) 61#define CR0 CTL_REG(0x0000)
32#define CR1 CTL_REG(0x0004) 62#define CR1 CTL_REG(0x0004)
@@ -302,7 +332,7 @@
302 332
303#define CR_MAX_PHY_REG 255 333#define CR_MAX_PHY_REG 255
304 334
305/* Taken from the ZYDAS driver, not all of them are relevant for the ZSD1211 335/* Taken from the ZYDAS driver, not all of them are relevant for the ZD1211
306 * driver. 336 * driver.
307 */ 337 */
308 338
@@ -594,81 +624,71 @@
594/* 624/*
595 * Upper 16 bit contains the regulatory domain. 625 * Upper 16 bit contains the regulatory domain.
596 */ 626 */
597#define E2P_SUBID E2P_REG(0x00) 627#define E2P_SUBID E2P_DATA(0x00)
598#define E2P_POD E2P_REG(0x02) 628#define E2P_POD E2P_DATA(0x02)
599#define E2P_MAC_ADDR_P1 E2P_REG(0x04) 629#define E2P_MAC_ADDR_P1 E2P_DATA(0x04)
600#define E2P_MAC_ADDR_P2 E2P_REG(0x06) 630#define E2P_MAC_ADDR_P2 E2P_DATA(0x06)
601#define E2P_PWR_CAL_VALUE1 E2P_REG(0x08) 631#define E2P_PWR_CAL_VALUE1 E2P_DATA(0x08)
602#define E2P_PWR_CAL_VALUE2 E2P_REG(0x0a) 632#define E2P_PWR_CAL_VALUE2 E2P_DATA(0x0a)
603#define E2P_PWR_CAL_VALUE3 E2P_REG(0x0c) 633#define E2P_PWR_CAL_VALUE3 E2P_DATA(0x0c)
604#define E2P_PWR_CAL_VALUE4 E2P_REG(0x0e) 634#define E2P_PWR_CAL_VALUE4 E2P_DATA(0x0e)
605#define E2P_PWR_INT_VALUE1 E2P_REG(0x10) 635#define E2P_PWR_INT_VALUE1 E2P_DATA(0x10)
606#define E2P_PWR_INT_VALUE2 E2P_REG(0x12) 636#define E2P_PWR_INT_VALUE2 E2P_DATA(0x12)
607#define E2P_PWR_INT_VALUE3 E2P_REG(0x14) 637#define E2P_PWR_INT_VALUE3 E2P_DATA(0x14)
608#define E2P_PWR_INT_VALUE4 E2P_REG(0x16) 638#define E2P_PWR_INT_VALUE4 E2P_DATA(0x16)
609 639
610/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30) 640/* Contains a bit for each allowed channel. It gives for Europe (ETSI 0x30)
611 * also only 11 channels. */ 641 * also only 11 channels. */
612#define E2P_ALLOWED_CHANNEL E2P_REG(0x18) 642#define E2P_ALLOWED_CHANNEL E2P_DATA(0x18)
613 643
614#define E2P_PHY_REG E2P_REG(0x1a) 644#define E2P_PHY_REG E2P_DATA(0x1a)
615#define E2P_DEVICE_VER E2P_REG(0x20) 645#define E2P_DEVICE_VER E2P_DATA(0x20)
616#define E2P_36M_CAL_VALUE1 E2P_REG(0x28) 646#define E2P_36M_CAL_VALUE1 E2P_DATA(0x28)
617#define E2P_36M_CAL_VALUE2 E2P_REG(0x2a) 647#define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a)
618#define E2P_36M_CAL_VALUE3 E2P_REG(0x2c) 648#define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c)
619#define E2P_36M_CAL_VALUE4 E2P_REG(0x2e) 649#define E2P_36M_CAL_VALUE4 E2P_DATA(0x2e)
620#define E2P_11A_INT_VALUE1 E2P_REG(0x30) 650#define E2P_11A_INT_VALUE1 E2P_DATA(0x30)
621#define E2P_11A_INT_VALUE2 E2P_REG(0x32) 651#define E2P_11A_INT_VALUE2 E2P_DATA(0x32)
622#define E2P_11A_INT_VALUE3 E2P_REG(0x34) 652#define E2P_11A_INT_VALUE3 E2P_DATA(0x34)
623#define E2P_11A_INT_VALUE4 E2P_REG(0x36) 653#define E2P_11A_INT_VALUE4 E2P_DATA(0x36)
624#define E2P_48M_CAL_VALUE1 E2P_REG(0x38) 654#define E2P_48M_CAL_VALUE1 E2P_DATA(0x38)
625#define E2P_48M_CAL_VALUE2 E2P_REG(0x3a) 655#define E2P_48M_CAL_VALUE2 E2P_DATA(0x3a)
626#define E2P_48M_CAL_VALUE3 E2P_REG(0x3c) 656#define E2P_48M_CAL_VALUE3 E2P_DATA(0x3c)
627#define E2P_48M_CAL_VALUE4 E2P_REG(0x3e) 657#define E2P_48M_CAL_VALUE4 E2P_DATA(0x3e)
628#define E2P_48M_INT_VALUE1 E2P_REG(0x40) 658#define E2P_48M_INT_VALUE1 E2P_DATA(0x40)
629#define E2P_48M_INT_VALUE2 E2P_REG(0x42) 659#define E2P_48M_INT_VALUE2 E2P_DATA(0x42)
630#define E2P_48M_INT_VALUE3 E2P_REG(0x44) 660#define E2P_48M_INT_VALUE3 E2P_DATA(0x44)
631#define E2P_48M_INT_VALUE4 E2P_REG(0x46) 661#define E2P_48M_INT_VALUE4 E2P_DATA(0x46)
632#define E2P_54M_CAL_VALUE1 E2P_REG(0x48) /* ??? */ 662#define E2P_54M_CAL_VALUE1 E2P_DATA(0x48) /* ??? */
633#define E2P_54M_CAL_VALUE2 E2P_REG(0x4a) 663#define E2P_54M_CAL_VALUE2 E2P_DATA(0x4a)
634#define E2P_54M_CAL_VALUE3 E2P_REG(0x4c) 664#define E2P_54M_CAL_VALUE3 E2P_DATA(0x4c)
635#define E2P_54M_CAL_VALUE4 E2P_REG(0x4e) 665#define E2P_54M_CAL_VALUE4 E2P_DATA(0x4e)
636#define E2P_54M_INT_VALUE1 E2P_REG(0x50) 666#define E2P_54M_INT_VALUE1 E2P_DATA(0x50)
637#define E2P_54M_INT_VALUE2 E2P_REG(0x52) 667#define E2P_54M_INT_VALUE2 E2P_DATA(0x52)
638#define E2P_54M_INT_VALUE3 E2P_REG(0x54) 668#define E2P_54M_INT_VALUE3 E2P_DATA(0x54)
639#define E2P_54M_INT_VALUE4 E2P_REG(0x56) 669#define E2P_54M_INT_VALUE4 E2P_DATA(0x56)
640 670
641/* All 16 bit values */ 671/* This word contains the base address of the FW_REG_ registers below */
642#define FW_FIRMWARE_VER FW_REG(0) 672#define FWRAW_REGS_ADDR FWRAW_DATA(0x1d)
643/* non-zero if USB high speed connection */ 673
644#define FW_USB_SPEED FW_REG(1) 674/* All 16 bit values, offset from the address in FWRAW_REGS_ADDR */
645#define FW_FIX_TX_RATE FW_REG(2) 675enum {
646/* Seems to be able to control LEDs over the firmware */ 676 FW_REG_FIRMWARE_VER = 0,
647#define FW_LINK_STATUS FW_REG(3) 677 /* non-zero if USB high speed connection */
648#define FW_SOFT_RESET FW_REG(4) 678 FW_REG_USB_SPEED = 1,
649#define FW_FLASH_CHK FW_REG(5) 679 FW_REG_FIX_TX_RATE = 2,
680 /* Seems to be able to control LEDs over the firmware */
681 FW_REG_LED_LINK_STATUS = 3,
682 FW_REG_SOFT_RESET = 4,
683 FW_REG_FLASH_CHK = 5,
684};
650 685
686/* Values for FW_LINK_STATUS */
651#define FW_LINK_OFF 0x0 687#define FW_LINK_OFF 0x0
652#define FW_LINK_TX 0x1 688#define FW_LINK_TX 0x1
653/* 0x2 - link led on? */ 689/* 0x2 - link led on? */
654 690
655enum { 691enum {
656 CR_BASE_OFFSET = 0x9000,
657 FW_START_OFFSET = 0xee00,
658 FW_BASE_ADDR_OFFSET = FW_START_OFFSET + 0x1d,
659 EEPROM_START_OFFSET = 0xf800,
660 EEPROM_SIZE = 0x800, /* words */
661 LOAD_CODE_SIZE = 0xe, /* words */
662 LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */
663 EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
664 EEPROM_REGS_SIZE = 0x7e, /* words */
665 E2P_BASE_OFFSET = EEPROM_START_OFFSET +
666 EEPROM_REGS_OFFSET,
667};
668
669#define FW_REG_TABLE_ADDR USB_ADDR(FW_START_OFFSET + 0x1d)
670
671enum {
672 /* indices for ofdm_cal_values */ 692 /* indices for ofdm_cal_values */
673 OFDM_36M_INDEX = 0, 693 OFDM_36M_INDEX = 0,
674 OFDM_48M_INDEX = 1, 694 OFDM_48M_INDEX = 1,
@@ -679,6 +699,8 @@ struct zd_chip {
679 struct zd_usb usb; 699 struct zd_usb usb;
680 struct zd_rf rf; 700 struct zd_rf rf;
681 struct mutex mutex; 701 struct mutex mutex;
702 /* Base address of FW_REG_ registers */
703 zd_addr_t fw_regs_base;
682 u8 e2p_mac[ETH_ALEN]; 704 u8 e2p_mac[ETH_ALEN];
683 /* EepSetPoint in the vendor driver */ 705 /* EepSetPoint in the vendor driver */
684 u8 pwr_cal_values[E2P_CHANNEL_COUNT]; 706 u8 pwr_cal_values[E2P_CHANNEL_COUNT];
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index fb22f62cf1f3..deb99d1eaa77 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -23,6 +23,8 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25 25
26typedef u16 __nocast zd_addr_t;
27
26#define dev_printk_f(level, dev, fmt, args...) \ 28#define dev_printk_f(level, dev, fmt, args...) \
27 dev_printk(level, dev, "%s() " fmt, __func__, ##args) 29 dev_printk(level, dev, "%s() " fmt, __func__, ##args)
28 30
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
index 26b8298dff8c..c4f36d39642b 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -2,7 +2,6 @@
2#define _ZD_IEEE80211_H 2#define _ZD_IEEE80211_H
3 3
4#include <net/ieee80211.h> 4#include <net/ieee80211.h>
5#include "zd_types.h"
6 5
7/* Additional definitions from the standards. 6/* Additional definitions from the standards.
8 */ 7 */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index 676b3734f1ed..a57732eb69e1 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -18,8 +18,6 @@
18#ifndef _ZD_RF_H 18#ifndef _ZD_RF_H
19#define _ZD_RF_H 19#define _ZD_RF_H
20 20
21#include "zd_types.h"
22
23#define UW2451_RF 0x2 21#define UW2451_RF 0x2
24#define UCHIP_RF 0x3 22#define UCHIP_RF 0x3
25#define AL2230_RF 0x4 23#define AL2230_RF 0x4
diff --git a/drivers/net/wireless/zd1211rw/zd_types.h b/drivers/net/wireless/zd1211rw/zd_types.h
deleted file mode 100644
index 0155a1584ed3..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_types.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/* zd_types.h
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#ifndef _ZD_TYPES_H
19#define _ZD_TYPES_H
20
21#include <linux/types.h>
22
23/* We have three register spaces mapped into the overall USB address space of
24 * 64K words (16-bit values). There is the control register space of
25 * double-word registers, the eeprom register space and the firmware register
26 * space. The control register space is byte mapped, the others are word
27 * mapped.
28 *
29 * For that reason, we are using byte offsets for control registers and word
30 * offsets for everything else.
31 */
32
33typedef u32 __nocast zd_addr_t;
34
35enum {
36 ADDR_BASE_MASK = 0xff000000,
37 ADDR_OFFSET_MASK = 0x0000ffff,
38 ADDR_ZERO_MASK = 0x00ff0000,
39 NULL_BASE = 0x00000000,
40 USB_BASE = 0x01000000,
41 CR_BASE = 0x02000000,
42 CR_MAX_OFFSET = 0x0b30,
43 E2P_BASE = 0x03000000,
44 E2P_MAX_OFFSET = 0x007e,
45 FW_BASE = 0x04000000,
46 FW_MAX_OFFSET = 0x0005,
47};
48
49#define ZD_ADDR_BASE(addr) ((u32)(addr) & ADDR_BASE_MASK)
50#define ZD_OFFSET(addr) ((u32)(addr) & ADDR_OFFSET_MASK)
51
52#define ZD_ADDR(base, offset) \
53 ((zd_addr_t)(((base) & ADDR_BASE_MASK) | ((offset) & ADDR_OFFSET_MASK)))
54
55#define ZD_NULL_ADDR ((zd_addr_t)0)
56#define USB_REG(offset) ZD_ADDR(USB_BASE, offset) /* word addressing */
57#define CTL_REG(offset) ZD_ADDR(CR_BASE, offset) /* byte addressing */
58#define E2P_REG(offset) ZD_ADDR(E2P_BASE, offset) /* word addressing */
59#define FW_REG(offset) ZD_ADDR(FW_BASE, offset) /* word addressing */
60
61static inline zd_addr_t zd_inc_word(zd_addr_t addr)
62{
63 u32 base = ZD_ADDR_BASE(addr);
64 u32 offset = ZD_OFFSET(addr);
65
66 offset += base == CR_BASE ? 2 : 1;
67
68 return base | offset;
69}
70
71#endif /* _ZD_TYPES_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 605e96e74057..75ef55624d7f 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -58,6 +58,10 @@ static struct usb_device_id usb_ids[] = {
58 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B }, 58 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
59 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 59 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
60 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B }, 60 { USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
61 { USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
62 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
63 { USB_DEVICE(0x13b1, 0x0024), .driver_info = DEVICE_ZD1211B },
64 { USB_DEVICE(0x0586, 0x340f), .driver_info = DEVICE_ZD1211B },
61 /* "Driverless" devices that need ejecting */ 65 /* "Driverless" devices that need ejecting */
62 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 66 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
63 {} 67 {}
@@ -73,96 +77,6 @@ MODULE_DEVICE_TABLE(usb, usb_ids);
73#define FW_ZD1211_PREFIX "zd1211/zd1211_" 77#define FW_ZD1211_PREFIX "zd1211/zd1211_"
74#define FW_ZD1211B_PREFIX "zd1211/zd1211b_" 78#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
75 79
76/* register address handling */
77
78#ifdef DEBUG
79static int check_addr(struct zd_usb *usb, zd_addr_t addr)
80{
81 u32 base = ZD_ADDR_BASE(addr);
82 u32 offset = ZD_OFFSET(addr);
83
84 if ((u32)addr & ADDR_ZERO_MASK)
85 goto invalid_address;
86 switch (base) {
87 case USB_BASE:
88 break;
89 case CR_BASE:
90 if (offset > CR_MAX_OFFSET) {
91 dev_dbg(zd_usb_dev(usb),
92 "CR offset %#010x larger than"
93 " CR_MAX_OFFSET %#10x\n",
94 offset, CR_MAX_OFFSET);
95 goto invalid_address;
96 }
97 if (offset & 1) {
98 dev_dbg(zd_usb_dev(usb),
99 "CR offset %#010x is not a multiple of 2\n",
100 offset);
101 goto invalid_address;
102 }
103 break;
104 case E2P_BASE:
105 if (offset > E2P_MAX_OFFSET) {
106 dev_dbg(zd_usb_dev(usb),
107 "E2P offset %#010x larger than"
108 " E2P_MAX_OFFSET %#010x\n",
109 offset, E2P_MAX_OFFSET);
110 goto invalid_address;
111 }
112 break;
113 case FW_BASE:
114 if (!usb->fw_base_offset) {
115 dev_dbg(zd_usb_dev(usb),
116 "ERROR: fw base offset has not been set\n");
117 return -EAGAIN;
118 }
119 if (offset > FW_MAX_OFFSET) {
120 dev_dbg(zd_usb_dev(usb),
121 "FW offset %#10x is larger than"
122 " FW_MAX_OFFSET %#010x\n",
123 offset, FW_MAX_OFFSET);
124 goto invalid_address;
125 }
126 break;
127 default:
128 dev_dbg(zd_usb_dev(usb),
129 "address has unsupported base %#010x\n", addr);
130 goto invalid_address;
131 }
132
133 return 0;
134invalid_address:
135 dev_dbg(zd_usb_dev(usb),
136 "ERROR: invalid address: %#010x\n", addr);
137 return -EINVAL;
138}
139#endif /* DEBUG */
140
141static u16 usb_addr(struct zd_usb *usb, zd_addr_t addr)
142{
143 u32 base;
144 u16 offset;
145
146 base = ZD_ADDR_BASE(addr);
147 offset = ZD_OFFSET(addr);
148
149 ZD_ASSERT(check_addr(usb, addr) == 0);
150
151 switch (base) {
152 case CR_BASE:
153 offset += CR_BASE_OFFSET;
154 break;
155 case E2P_BASE:
156 offset += E2P_BASE_OFFSET;
157 break;
158 case FW_BASE:
159 offset += usb->fw_base_offset;
160 break;
161 }
162
163 return offset;
164}
165
166/* USB device initialization */ 80/* USB device initialization */
167 81
168static int request_fw_file( 82static int request_fw_file(
@@ -295,14 +209,13 @@ static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
295 if (r) 209 if (r)
296 goto error; 210 goto error;
297 211
298 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START_OFFSET, 212 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START, REBOOT);
299 REBOOT);
300 if (r) 213 if (r)
301 goto error; 214 goto error;
302 215
303 offset = ((EEPROM_REGS_OFFSET + EEPROM_REGS_SIZE) * sizeof(u16)); 216 offset = (E2P_BOOT_CODE_OFFSET * sizeof(u16));
304 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset, 217 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset,
305 E2P_BASE_OFFSET + EEPROM_REGS_SIZE, REBOOT); 218 E2P_START + E2P_BOOT_CODE_OFFSET, REBOOT);
306 219
307 /* At this point, the vendor driver downloads the whole firmware 220 /* At this point, the vendor driver downloads the whole firmware
308 * image, hacks around with version IDs, and uploads it again, 221 * image, hacks around with version IDs, and uploads it again,
@@ -331,7 +244,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
331 if (r) 244 if (r)
332 goto error; 245 goto error;
333 246
334 fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET); 247 fw_bcdDevice = get_word(ub_fw->data, E2P_DATA_OFFSET);
335 248
336 if (fw_bcdDevice != bcdDevice) { 249 if (fw_bcdDevice != bcdDevice) {
337 dev_info(&udev->dev, 250 dev_info(&udev->dev,
@@ -357,8 +270,7 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
357 if (r) 270 if (r)
358 goto error; 271 goto error;
359 272
360 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START_OFFSET, 273 r = upload_code(udev, uph_fw->data, uph_fw->size, FW_START, REBOOT);
361 REBOOT);
362 if (r) { 274 if (r) {
363 dev_err(&udev->dev, 275 dev_err(&udev->dev,
364 "Could not upload firmware code uph. Error number %d\n", 276 "Could not upload firmware code uph. Error number %d\n",
@@ -858,7 +770,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
858 spin_lock_init(&intr->lock); 770 spin_lock_init(&intr->lock);
859 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb)); 771 intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
860 init_completion(&intr->read_regs.completion); 772 init_completion(&intr->read_regs.completion);
861 intr->read_regs.cr_int_addr = cpu_to_le16(usb_addr(usb, CR_INTERRUPT)); 773 intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT);
862} 774}
863 775
864static inline void init_usb_rx(struct zd_usb *usb) 776static inline void init_usb_rx(struct zd_usb *usb)
@@ -890,22 +802,6 @@ void zd_usb_init(struct zd_usb *usb, struct net_device *netdev,
890 init_usb_rx(usb); 802 init_usb_rx(usb);
891} 803}
892 804
893int zd_usb_init_hw(struct zd_usb *usb)
894{
895 int r;
896 struct zd_chip *chip = zd_usb_to_chip(usb);
897
898 ZD_ASSERT(mutex_is_locked(&chip->mutex));
899 r = zd_ioread16_locked(chip, &usb->fw_base_offset,
900 USB_REG((u16)FW_BASE_ADDR_OFFSET));
901 if (r)
902 return r;
903 dev_dbg_f(zd_usb_dev(usb), "fw_base_offset: %#06hx\n",
904 usb->fw_base_offset);
905
906 return 0;
907}
908
909void zd_usb_clear(struct zd_usb *usb) 805void zd_usb_clear(struct zd_usb *usb)
910{ 806{
911 usb_set_intfdata(usb->intf, NULL); 807 usb_set_intfdata(usb->intf, NULL);
@@ -1253,7 +1149,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
1253 return -ENOMEM; 1149 return -ENOMEM;
1254 req->id = cpu_to_le16(USB_REQ_READ_REGS); 1150 req->id = cpu_to_le16(USB_REQ_READ_REGS);
1255 for (i = 0; i < count; i++) 1151 for (i = 0; i < count; i++)
1256 req->addr[i] = cpu_to_le16(usb_addr(usb, addresses[i])); 1152 req->addr[i] = cpu_to_le16((u16)addresses[i]);
1257 1153
1258 udev = zd_usb_to_usbdev(usb); 1154 udev = zd_usb_to_usbdev(usb);
1259 prepare_read_regs_int(usb); 1155 prepare_read_regs_int(usb);
@@ -1318,7 +1214,7 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
1318 req->id = cpu_to_le16(USB_REQ_WRITE_REGS); 1214 req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
1319 for (i = 0; i < count; i++) { 1215 for (i = 0; i < count; i++) {
1320 struct reg_data *rw = &req->reg_writes[i]; 1216 struct reg_data *rw = &req->reg_writes[i];
1321 rw->addr = cpu_to_le16(usb_addr(usb, ioreqs[i].addr)); 1217 rw->addr = cpu_to_le16((u16)ioreqs[i].addr);
1322 rw->value = cpu_to_le16(ioreqs[i].value); 1218 rw->value = cpu_to_le16(ioreqs[i].value);
1323 } 1219 }
1324 1220
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 317d37c36679..506ea6a74393 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -25,7 +25,6 @@
25#include <linux/usb.h> 25#include <linux/usb.h>
26 26
27#include "zd_def.h" 27#include "zd_def.h"
28#include "zd_types.h"
29 28
30enum devicetype { 29enum devicetype {
31 DEVICE_ZD1211 = 0, 30 DEVICE_ZD1211 = 0,
@@ -181,15 +180,14 @@ struct zd_usb_tx {
181 spinlock_t lock; 180 spinlock_t lock;
182}; 181};
183 182
184/* Contains the usb parts. The structure doesn't require a lock, because intf 183/* Contains the usb parts. The structure doesn't require a lock because intf
185 * and fw_base_offset, will not be changed after initialization. 184 * will not be changed after initialization.
186 */ 185 */
187struct zd_usb { 186struct zd_usb {
188 struct zd_usb_interrupt intr; 187 struct zd_usb_interrupt intr;
189 struct zd_usb_rx rx; 188 struct zd_usb_rx rx;
190 struct zd_usb_tx tx; 189 struct zd_usb_tx tx;
191 struct usb_interface *intf; 190 struct usb_interface *intf;
192 u16 fw_base_offset;
193}; 191};
194 192
195#define zd_usb_dev(usb) (&usb->intf->dev) 193#define zd_usb_dev(usb) (&usb->intf->dev)
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index adce4204d87d..be92695a7833 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -145,15 +145,6 @@ config HOTPLUG_PCI_SHPC
145 145
146 When in doubt, say N. 146 When in doubt, say N.
147 147
148config HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
149 bool "Use polling mechanism for hot-plug events (for testing purpose)"
150 depends on HOTPLUG_PCI_SHPC
151 help
152 Say Y here if you want to use the polling mechanism for hot-plug
153 events for early platform testing.
154
155 When in doubt, say N.
156
157config HOTPLUG_PCI_RPA 148config HOTPLUG_PCI_RPA
158 tristate "RPA PCI Hotplug driver" 149 tristate "RPA PCI Hotplug driver"
159 depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE 150 depends on HOTPLUG_PCI && PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index bd1faebf61a0..fca978fb158e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -773,13 +773,13 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
773 goto out; 773 goto out;
774 774
775 table = obj->buffer.pointer; 775 table = obj->buffer.pointer;
776 switch (((acpi_table_entry_header *)table)->type) { 776 switch (((struct acpi_subtable_header *)table)->type) {
777 case ACPI_MADT_IOSAPIC: 777 case ACPI_MADT_TYPE_IO_SAPIC:
778 *gsi_base = ((struct acpi_table_iosapic *)table)->global_irq_base; 778 *gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base;
779 result = 0; 779 result = 0;
780 break; 780 break;
781 case ACPI_MADT_IOAPIC: 781 case ACPI_MADT_TYPE_IO_APIC:
782 *gsi_base = ((struct acpi_table_ioapic *)table)->global_irq_base; 782 *gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base;
783 result = 0; 783 result = 0;
784 break; 784 break;
785 default: 785 default:
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4fb12fcda563..d19fcae8a7c0 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -44,15 +44,20 @@ extern int pciehp_poll_time;
44extern int pciehp_debug; 44extern int pciehp_debug;
45extern int pciehp_force; 45extern int pciehp_force;
46 46
47/*#define dbg(format, arg...) do { if (pciehp_debug) printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); } while (0)*/ 47#define dbg(format, arg...) \
48#define dbg(format, arg...) do { if (pciehp_debug) printk("%s: " format, MY_NAME , ## arg); } while (0) 48 do { \
49#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg) 49 if (pciehp_debug) \
50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 50 printk("%s: " format, MY_NAME , ## arg); \
51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 51 } while (0)
52 52#define err(format, arg...) \
53 printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
54#define info(format, arg...) \
55 printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
56#define warn(format, arg...) \
57 printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
53 58
59#define SLOT_NAME_SIZE 10
54struct slot { 60struct slot {
55 struct slot *next;
56 u8 bus; 61 u8 bus;
57 u8 device; 62 u8 device;
58 u32 number; 63 u32 number;
@@ -63,6 +68,8 @@ struct slot {
63 struct hpc_ops *hpc_ops; 68 struct hpc_ops *hpc_ops;
64 struct hotplug_slot *hotplug_slot; 69 struct hotplug_slot *hotplug_slot;
65 struct list_head slot_list; 70 struct list_head slot_list;
71 char name[SLOT_NAME_SIZE];
72 unsigned long last_emi_toggle;
66}; 73};
67 74
68struct event_info { 75struct event_info {
@@ -70,34 +77,15 @@ struct event_info {
70 u8 hp_slot; 77 u8 hp_slot;
71}; 78};
72 79
73typedef u8(*php_intr_callback_t) (u8 hp_slot, void *instance_id);
74
75struct php_ctlr_state_s {
76 struct php_ctlr_state_s *pnext;
77 struct pci_dev *pci_dev;
78 unsigned int irq;
79 unsigned long flags; /* spinlock's */
80 u32 slot_device_offset;
81 u32 num_slots;
82 struct timer_list int_poll_timer; /* Added for poll event */
83 php_intr_callback_t attention_button_callback;
84 php_intr_callback_t switch_change_callback;
85 php_intr_callback_t presence_change_callback;
86 php_intr_callback_t power_fault_callback;
87 void *callback_instance_id;
88 struct ctrl_reg *creg; /* Ptr to controller register space */
89};
90
91#define MAX_EVENTS 10 80#define MAX_EVENTS 10
92struct controller { 81struct controller {
93 struct controller *next; 82 struct controller *next;
94 struct mutex crit_sect; /* critical section mutex */ 83 struct mutex crit_sect; /* critical section mutex */
95 struct mutex ctrl_lock; /* controller lock */ 84 struct mutex ctrl_lock; /* controller lock */
96 struct php_ctlr_state_s *hpc_ctlr_handle; /* HPC controller handle */
97 int num_slots; /* Number of slots on ctlr */ 85 int num_slots; /* Number of slots on ctlr */
98 int slot_num_inc; /* 1 or -1 */ 86 int slot_num_inc; /* 1 or -1 */
99 struct pci_dev *pci_dev; 87 struct pci_dev *pci_dev;
100 struct pci_bus *pci_bus; 88 struct list_head slot_list;
101 struct event_info event_queue[MAX_EVENTS]; 89 struct event_info event_queue[MAX_EVENTS];
102 struct slot *slot; 90 struct slot *slot;
103 struct hpc_ops *hpc_ops; 91 struct hpc_ops *hpc_ops;
@@ -112,6 +100,8 @@ struct controller {
112 u8 ctrlcap; 100 u8 ctrlcap;
113 u16 vendor_id; 101 u16 vendor_id;
114 u8 cap_base; 102 u8 cap_base;
103 struct timer_list poll_timer;
104 volatile int cmd_busy;
115}; 105};
116 106
117#define INT_BUTTON_IGNORE 0 107#define INT_BUTTON_IGNORE 0
@@ -131,8 +121,6 @@ struct controller {
131#define POWERON_STATE 3 121#define POWERON_STATE 3
132#define POWEROFF_STATE 4 122#define POWEROFF_STATE 4
133 123
134#define PCI_TO_PCI_BRIDGE_CLASS 0x00060400
135
136/* Error messages */ 124/* Error messages */
137#define INTERLOCK_OPEN 0x00000002 125#define INTERLOCK_OPEN 0x00000002
138#define ADD_NOT_SUPPORTED 0x00000003 126#define ADD_NOT_SUPPORTED 0x00000003
@@ -144,10 +132,6 @@ struct controller {
144#define WRONG_BUS_FREQUENCY 0x0000000D 132#define WRONG_BUS_FREQUENCY 0x0000000D
145#define POWER_FAILURE 0x0000000E 133#define POWER_FAILURE 0x0000000E
146 134
147#define REMOVE_NOT_SUPPORTED 0x00000003
148
149#define DISABLE_CARD 1
150
151/* Field definitions in Slot Capabilities Register */ 135/* Field definitions in Slot Capabilities Register */
152#define ATTN_BUTTN_PRSN 0x00000001 136#define ATTN_BUTTN_PRSN 0x00000001
153#define PWR_CTRL_PRSN 0x00000002 137#define PWR_CTRL_PRSN 0x00000002
@@ -155,6 +139,7 @@ struct controller {
155#define ATTN_LED_PRSN 0x00000008 139#define ATTN_LED_PRSN 0x00000008
156#define PWR_LED_PRSN 0x00000010 140#define PWR_LED_PRSN 0x00000010
157#define HP_SUPR_RM_SUP 0x00000020 141#define HP_SUPR_RM_SUP 0x00000020
142#define EMI_PRSN 0x00020000
158 143
159#define ATTN_BUTTN(cap) (cap & ATTN_BUTTN_PRSN) 144#define ATTN_BUTTN(cap) (cap & ATTN_BUTTN_PRSN)
160#define POWER_CTRL(cap) (cap & PWR_CTRL_PRSN) 145#define POWER_CTRL(cap) (cap & PWR_CTRL_PRSN)
@@ -162,130 +147,65 @@ struct controller {
162#define ATTN_LED(cap) (cap & ATTN_LED_PRSN) 147#define ATTN_LED(cap) (cap & ATTN_LED_PRSN)
163#define PWR_LED(cap) (cap & PWR_LED_PRSN) 148#define PWR_LED(cap) (cap & PWR_LED_PRSN)
164#define HP_SUPR_RM(cap) (cap & HP_SUPR_RM_SUP) 149#define HP_SUPR_RM(cap) (cap & HP_SUPR_RM_SUP)
165 150#define EMI(cap) (cap & EMI_PRSN)
166/* 151
167 * error Messages 152extern int pciehp_event_start_thread(void);
168 */ 153extern void pciehp_event_stop_thread(void);
169#define msg_initialization_err "Initialization failure, error=%d\n" 154extern int pciehp_enable_slot(struct slot *slot);
170#define msg_button_on "PCI slot #%s - powering on due to button press.\n" 155extern int pciehp_disable_slot(struct slot *slot);
171#define msg_button_off "PCI slot #%s - powering off due to button press.\n" 156extern u8 pciehp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
172#define msg_button_cancel "PCI slot #%s - action canceled due to button press.\n" 157extern u8 pciehp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
173#define msg_button_ignore "PCI slot #%s - button press ignored. (action in progress...)\n" 158extern u8 pciehp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
174 159extern u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
175/* controller functions */ 160extern int pciehp_configure_device(struct slot *p_slot);
176extern int pciehp_event_start_thread (void); 161extern int pciehp_unconfigure_device(struct slot *p_slot);
177extern void pciehp_event_stop_thread (void); 162int pcie_init(struct controller *ctrl, struct pcie_device *dev);
178extern int pciehp_enable_slot (struct slot *slot);
179extern int pciehp_disable_slot (struct slot *slot);
180
181extern u8 pciehp_handle_attention_button (u8 hp_slot, void *inst_id);
182extern u8 pciehp_handle_switch_change (u8 hp_slot, void *inst_id);
183extern u8 pciehp_handle_presence_change (u8 hp_slot, void *inst_id);
184extern u8 pciehp_handle_power_fault (u8 hp_slot, void *inst_id);
185/* extern void long_delay (int delay); */
186
187/* pci functions */
188extern int pciehp_configure_device (struct slot *p_slot);
189extern int pciehp_unconfigure_device (struct slot *p_slot);
190
191
192 163
193/* Global variables */ 164/* Global variables */
194extern struct controller *pciehp_ctrl_list; 165extern struct controller *pciehp_ctrl_list;
195 166
196/* Inline functions */
197
198static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 167static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
199{ 168{
200 struct slot *p_slot, *tmp_slot = NULL; 169 struct slot *slot;
201
202 p_slot = ctrl->slot;
203 170
204 while (p_slot && (p_slot->device != device)) { 171 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
205 tmp_slot = p_slot; 172 if (slot->device == device)
206 p_slot = p_slot->next; 173 return slot;
207 } 174 }
208 if (p_slot == NULL) {
209 err("ERROR: pciehp_find_slot device=0x%x\n", device);
210 p_slot = tmp_slot;
211 }
212
213 return p_slot;
214}
215
216static inline int wait_for_ctrl_irq(struct controller *ctrl)
217{
218 int retval = 0;
219
220 DECLARE_WAITQUEUE(wait, current);
221
222 add_wait_queue(&ctrl->queue, &wait);
223 if (!pciehp_poll_mode)
224 /* Sleep for up to 1 second */
225 msleep_interruptible(1000);
226 else
227 msleep_interruptible(2500);
228
229 remove_wait_queue(&ctrl->queue, &wait);
230 if (signal_pending(current))
231 retval = -EINTR;
232
233 return retval;
234}
235
236#define SLOT_NAME_SIZE 10
237 175
238static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot) 176 err("%s: slot (device=0x%x) not found\n", __FUNCTION__, device);
239{ 177 return NULL;
240 snprintf(buffer, buffer_size, "%04d_%04d", slot->bus, slot->number);
241} 178}
242 179
243enum php_ctlr_type {
244 PCI,
245 ISA,
246 ACPI
247};
248
249int pcie_init(struct controller *ctrl, struct pcie_device *dev);
250
251/* This has no meaning for PCI Express, as there is only 1 slot per port */
252int pcie_get_ctlr_slot_config(struct controller *ctrl,
253 int *num_ctlr_slots,
254 int *first_device_num,
255 int *physical_slot_num,
256 u8 *ctrlcap);
257
258struct hpc_ops { 180struct hpc_ops {
259 int (*power_on_slot) (struct slot *slot); 181 int (*power_on_slot)(struct slot *slot);
260 int (*power_off_slot) (struct slot *slot); 182 int (*power_off_slot)(struct slot *slot);
261 int (*get_power_status) (struct slot *slot, u8 *status); 183 int (*get_power_status)(struct slot *slot, u8 *status);
262 int (*get_attention_status) (struct slot *slot, u8 *status); 184 int (*get_attention_status)(struct slot *slot, u8 *status);
263 int (*set_attention_status) (struct slot *slot, u8 status); 185 int (*set_attention_status)(struct slot *slot, u8 status);
264 int (*get_latch_status) (struct slot *slot, u8 *status); 186 int (*get_latch_status)(struct slot *slot, u8 *status);
265 int (*get_adapter_status) (struct slot *slot, u8 *status); 187 int (*get_adapter_status)(struct slot *slot, u8 *status);
266 188 int (*get_emi_status)(struct slot *slot, u8 *status);
267 int (*get_max_bus_speed) (struct slot *slot, enum pci_bus_speed *speed); 189 int (*toggle_emi)(struct slot *slot);
268 int (*get_cur_bus_speed) (struct slot *slot, enum pci_bus_speed *speed); 190 int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
269 191 int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
270 int (*get_max_lnk_width) (struct slot *slot, enum pcie_link_width *value); 192 int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
271 int (*get_cur_lnk_width) (struct slot *slot, enum pcie_link_width *value); 193 int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val);
272 194 int (*query_power_fault)(struct slot *slot);
273 int (*query_power_fault) (struct slot *slot); 195 void (*green_led_on)(struct slot *slot);
274 void (*green_led_on) (struct slot *slot); 196 void (*green_led_off)(struct slot *slot);
275 void (*green_led_off) (struct slot *slot); 197 void (*green_led_blink)(struct slot *slot);
276 void (*green_led_blink) (struct slot *slot); 198 void (*release_ctlr)(struct controller *ctrl);
277 void (*release_ctlr) (struct controller *ctrl); 199 int (*check_lnk_status)(struct controller *ctrl);
278 int (*check_lnk_status) (struct controller *ctrl);
279}; 200};
280 201
281
282#ifdef CONFIG_ACPI 202#ifdef CONFIG_ACPI
283#include <acpi/acpi.h> 203#include <acpi/acpi.h>
284#include <acpi/acpi_bus.h> 204#include <acpi/acpi_bus.h>
285#include <acpi/actypes.h> 205#include <acpi/actypes.h>
286#include <linux/pci-acpi.h> 206#include <linux/pci-acpi.h>
287 207
288#define pciehp_get_hp_hw_control_from_firmware(dev) \ 208#define pciehp_get_hp_hw_control_from_firmware(dev) \
289 pciehp_acpi_get_hp_hw_control_from_firmware(dev) 209 pciehp_acpi_get_hp_hw_control_from_firmware(dev)
290static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, 210static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
291 struct hotplug_params *hpp) 211 struct hotplug_params *hpp)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f13f31323e85..a92eda6e02f6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -34,6 +34,7 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include "pciehp.h" 35#include "pciehp.h"
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/time.h>
37 38
38/* Global variables */ 39/* Global variables */
39int pciehp_debug; 40int pciehp_debug;
@@ -87,6 +88,95 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
87 .get_cur_bus_speed = get_cur_bus_speed, 88 .get_cur_bus_speed = get_cur_bus_speed,
88}; 89};
89 90
91/*
92 * Check the status of the Electro Mechanical Interlock (EMI)
93 */
94static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
95{
96 struct slot *slot = hotplug_slot->private;
97 return (slot->hpc_ops->get_emi_status(slot, value));
98}
99
100/*
101 * sysfs interface for the Electro Mechanical Interlock (EMI)
102 * 1 == locked, 0 == unlocked
103 */
104static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
105{
106 int retval;
107 u8 value;
108
109 retval = get_lock_status(slot, &value);
110 if (retval)
111 goto lock_read_exit;
112 retval = sprintf (buf, "%d\n", value);
113
114lock_read_exit:
115 return retval;
116}
117
118/*
119 * Change the status of the Electro Mechanical Interlock (EMI)
120 * This is a toggle - in addition there must be at least 1 second
121 * in between toggles.
122 */
123static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
124{
125 struct slot *slot = hotplug_slot->private;
126 int retval;
127 u8 value;
128
129 mutex_lock(&slot->ctrl->crit_sect);
130
131 /* has it been >1 sec since our last toggle? */
132 if ((get_seconds() - slot->last_emi_toggle) < 1)
133 return -EINVAL;
134
135 /* see what our current state is */
136 retval = get_lock_status(hotplug_slot, &value);
137 if (retval || (value == status))
138 goto set_lock_exit;
139
140 slot->hpc_ops->toggle_emi(slot);
141set_lock_exit:
142 mutex_unlock(&slot->ctrl->crit_sect);
143 return 0;
144}
145
146/*
147 * sysfs interface which allows the user to toggle the Electro Mechanical
148 * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
149 */
150static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf,
151 size_t count)
152{
153 unsigned long llock;
154 u8 lock;
155 int retval = 0;
156
157 llock = simple_strtoul(buf, NULL, 10);
158 lock = (u8)(llock & 0xff);
159
160 switch (lock) {
161 case 0:
162 case 1:
163 retval = set_lock_status(slot, lock);
164 break;
165 default:
166 err ("%d is an invalid lock value\n", lock);
167 retval = -EINVAL;
168 }
169 if (retval)
170 return retval;
171 return count;
172}
173
174static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
175 .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
176 .show = lock_read_file,
177 .store = lock_write_file
178};
179
90/** 180/**
91 * release_slot - free up the memory used by a slot 181 * release_slot - free up the memory used by a slot
92 * @hotplug_slot: slot to free 182 * @hotplug_slot: slot to free
@@ -98,148 +188,108 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
98 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name); 188 dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
99 189
100 kfree(slot->hotplug_slot->info); 190 kfree(slot->hotplug_slot->info);
101 kfree(slot->hotplug_slot->name);
102 kfree(slot->hotplug_slot); 191 kfree(slot->hotplug_slot);
103 kfree(slot); 192 kfree(slot);
104} 193}
105 194
195static void make_slot_name(struct slot *slot)
196{
197 snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
198 slot->bus, slot->number);
199}
200
106static int init_slots(struct controller *ctrl) 201static int init_slots(struct controller *ctrl)
107{ 202{
108 struct slot *slot; 203 struct slot *slot;
109 struct hpc_ops *hpc_ops;
110 struct hotplug_slot *hotplug_slot; 204 struct hotplug_slot *hotplug_slot;
111 struct hotplug_slot_info *hotplug_slot_info; 205 struct hotplug_slot_info *info;
112 u8 number_of_slots; 206 int retval = -ENOMEM;
113 u8 slot_device; 207 int i;
114 u32 slot_number;
115 int result = -ENOMEM;
116 208
117 number_of_slots = ctrl->num_slots; 209 for (i = 0; i < ctrl->num_slots; i++) {
118 slot_device = ctrl->slot_device_offset;
119 slot_number = ctrl->first_slot;
120
121 while (number_of_slots) {
122 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 210 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
123 if (!slot) 211 if (!slot)
124 goto error; 212 goto error;
125 213
126 slot->hotplug_slot = 214 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
127 kzalloc(sizeof(*(slot->hotplug_slot)), 215 if (!hotplug_slot)
128 GFP_KERNEL);
129 if (!slot->hotplug_slot)
130 goto error_slot; 216 goto error_slot;
131 hotplug_slot = slot->hotplug_slot; 217 slot->hotplug_slot = hotplug_slot;
132 218
133 hotplug_slot->info = 219 info = kzalloc(sizeof(*info), GFP_KERNEL);
134 kzalloc(sizeof(*(hotplug_slot->info)), 220 if (!info)
135 GFP_KERNEL);
136 if (!hotplug_slot->info)
137 goto error_hpslot; 221 goto error_hpslot;
138 hotplug_slot_info = hotplug_slot->info; 222 hotplug_slot->info = info;
139 hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
140 if (!hotplug_slot->name)
141 goto error_info;
142 223
143 slot->ctrl = ctrl; 224 hotplug_slot->name = slot->name;
144 slot->bus = ctrl->slot_bus;
145 slot->device = slot_device;
146 slot->hpc_ops = hpc_ops = ctrl->hpc_ops;
147 225
226 slot->hp_slot = i;
227 slot->ctrl = ctrl;
228 slot->bus = ctrl->pci_dev->subordinate->number;
229 slot->device = ctrl->slot_device_offset + i;
230 slot->hpc_ops = ctrl->hpc_ops;
148 slot->number = ctrl->first_slot; 231 slot->number = ctrl->first_slot;
149 slot->hp_slot = slot_device - ctrl->slot_device_offset;
150 232
151 /* register this slot with the hotplug pci core */ 233 /* register this slot with the hotplug pci core */
152 hotplug_slot->private = slot; 234 hotplug_slot->private = slot;
153 hotplug_slot->release = &release_slot; 235 hotplug_slot->release = &release_slot;
154 make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot); 236 make_slot_name(slot);
155 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 237 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
156 238
157 hpc_ops->get_power_status(slot, 239 get_power_status(hotplug_slot, &info->power_status);
158 &(hotplug_slot_info->power_status)); 240 get_attention_status(hotplug_slot, &info->attention_status);
159 hpc_ops->get_attention_status(slot, 241 get_latch_status(hotplug_slot, &info->latch_status);
160 &(hotplug_slot_info->attention_status)); 242 get_adapter_status(hotplug_slot, &info->adapter_status);
161 hpc_ops->get_latch_status(slot,
162 &(hotplug_slot_info->latch_status));
163 hpc_ops->get_adapter_status(slot,
164 &(hotplug_slot_info->adapter_status));
165 243
166 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 244 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
167 "slot_device_offset=%x\n", 245 "slot_device_offset=%x\n", slot->bus, slot->device,
168 slot->bus, slot->device, slot->hp_slot, slot->number, 246 slot->hp_slot, slot->number, ctrl->slot_device_offset);
169 ctrl->slot_device_offset); 247 retval = pci_hp_register(hotplug_slot);
170 result = pci_hp_register(hotplug_slot); 248 if (retval) {
171 if (result) { 249 err ("pci_hp_register failed with error %d\n", retval);
172 err ("pci_hp_register failed with error %d\n", result); 250 goto error_info;
173 goto error_name; 251 }
252 /* create additional sysfs entries */
253 if (EMI(ctrl->ctrlcap)) {
254 retval = sysfs_create_file(&hotplug_slot->kobj,
255 &hotplug_slot_attr_lock.attr);
256 if (retval) {
257 pci_hp_deregister(hotplug_slot);
258 err("cannot create additional sysfs entries\n");
259 goto error_info;
260 }
174 } 261 }
175 262
176 slot->next = ctrl->slot; 263 list_add(&slot->slot_list, &ctrl->slot_list);
177 ctrl->slot = slot;
178
179 number_of_slots--;
180 slot_device++;
181 slot_number += ctrl->slot_num_inc;
182 } 264 }
183 265
184 return 0; 266 return 0;
185
186error_name:
187 kfree(hotplug_slot->name);
188error_info: 267error_info:
189 kfree(hotplug_slot_info); 268 kfree(info);
190error_hpslot: 269error_hpslot:
191 kfree(hotplug_slot); 270 kfree(hotplug_slot);
192error_slot: 271error_slot:
193 kfree(slot); 272 kfree(slot);
194error: 273error:
195 return result; 274 return retval;
196}
197
198
199static int cleanup_slots (struct controller * ctrl)
200{
201 struct slot *old_slot, *next_slot;
202
203 old_slot = ctrl->slot;
204 ctrl->slot = NULL;
205
206 while (old_slot) {
207 next_slot = old_slot->next;
208 pci_hp_deregister (old_slot->hotplug_slot);
209 old_slot = next_slot;
210 }
211
212
213 return(0);
214} 275}
215 276
216static int get_ctlr_slot_config(struct controller *ctrl) 277static void cleanup_slots(struct controller *ctrl)
217{ 278{
218 int num_ctlr_slots; /* Not needed; PCI Express has 1 slot per port*/ 279 struct list_head *tmp;
219 int first_device_num; /* Not needed */ 280 struct list_head *next;
220 int physical_slot_num; 281 struct slot *slot;
221 u8 ctrlcap;
222 int rc;
223 282
224 rc = pcie_get_ctlr_slot_config(ctrl, &num_ctlr_slots, &first_device_num, &physical_slot_num, &ctrlcap); 283 list_for_each_safe(tmp, next, &ctrl->slot_list) {
225 if (rc) { 284 slot = list_entry(tmp, struct slot, slot_list);
226 err("%s: get_ctlr_slot_config fail for b:d (%x:%x)\n", __FUNCTION__, ctrl->bus, ctrl->device); 285 list_del(&slot->slot_list);
227 return (-1); 286 if (EMI(ctrl->ctrlcap))
287 sysfs_remove_file(&slot->hotplug_slot->kobj,
288 &hotplug_slot_attr_lock.attr);
289 pci_hp_deregister(slot->hotplug_slot);
228 } 290 }
229
230 ctrl->num_slots = num_ctlr_slots; /* PCI Express has 1 slot per port */
231 ctrl->slot_device_offset = first_device_num;
232 ctrl->first_slot = physical_slot_num;
233 ctrl->ctrlcap = ctrlcap;
234
235 dbg("%s: bus(0x%x) num_slot(0x%x) 1st_dev(0x%x) psn(0x%x) ctrlcap(%x) for b:d (%x:%x)\n",
236 __FUNCTION__, ctrl->slot_bus, num_ctlr_slots, first_device_num, physical_slot_num, ctrlcap,
237 ctrl->bus, ctrl->device);
238
239 return (0);
240} 291}
241 292
242
243/* 293/*
244 * set_attention_status - Turns the Amber LED for a slot on, off or blink 294 * set_attention_status - Turns the Amber LED for a slot on, off or blink
245 */ 295 */
@@ -378,8 +428,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
378 int rc; 428 int rc;
379 struct controller *ctrl; 429 struct controller *ctrl;
380 struct slot *t_slot; 430 struct slot *t_slot;
381 int first_device_num = 0 ; /* first PCI device number supported by this PCIE */
382 int num_ctlr_slots; /* number of slots supported by this HPC */
383 u8 value; 431 u8 value;
384 struct pci_dev *pdev; 432 struct pci_dev *pdev;
385 433
@@ -388,6 +436,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
388 err("%s : out of memory\n", __FUNCTION__); 436 err("%s : out of memory\n", __FUNCTION__);
389 goto err_out_none; 437 goto err_out_none;
390 } 438 }
439 INIT_LIST_HEAD(&ctrl->slot_list);
391 440
392 pdev = dev->port; 441 pdev = dev->port;
393 ctrl->pci_dev = pdev; 442 ctrl->pci_dev = pdev;
@@ -400,13 +449,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
400 449
401 pci_set_drvdata(pdev, ctrl); 450 pci_set_drvdata(pdev, ctrl);
402 451
403 ctrl->pci_bus = kmalloc(sizeof(*ctrl->pci_bus), GFP_KERNEL);
404 if (!ctrl->pci_bus) {
405 err("%s: out of memory\n", __FUNCTION__);
406 rc = -ENOMEM;
407 goto err_out_unmap_mmio_region;
408 }
409 memcpy (ctrl->pci_bus, pdev->bus, sizeof (*ctrl->pci_bus));
410 ctrl->bus = pdev->bus->number; /* ctrl bus */ 452 ctrl->bus = pdev->bus->number; /* ctrl bus */
411 ctrl->slot_bus = pdev->subordinate->number; /* bus controlled by this HPC */ 453 ctrl->slot_bus = pdev->subordinate->number; /* bus controlled by this HPC */
412 454
@@ -415,26 +457,14 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
415 dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", __FUNCTION__, 457 dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n", __FUNCTION__,
416 ctrl->bus, ctrl->device, ctrl->function, pdev->irq); 458 ctrl->bus, ctrl->device, ctrl->function, pdev->irq);
417 459
418 /*
419 * Save configuration headers for this and subordinate PCI buses
420 */
421
422 rc = get_ctlr_slot_config(ctrl);
423 if (rc) {
424 err(msg_initialization_err, rc);
425 goto err_out_free_ctrl_bus;
426 }
427 first_device_num = ctrl->slot_device_offset;
428 num_ctlr_slots = ctrl->num_slots;
429
430 /* Setup the slot information structures */ 460 /* Setup the slot information structures */
431 rc = init_slots(ctrl); 461 rc = init_slots(ctrl);
432 if (rc) { 462 if (rc) {
433 err(msg_initialization_err, 6); 463 err("%s: slot initialization failed\n", PCIE_MODULE_NAME);
434 goto err_out_free_ctrl_slot; 464 goto err_out_release_ctlr;
435 } 465 }
436 466
437 t_slot = pciehp_find_slot(ctrl, first_device_num); 467 t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
438 468
439 /* Finish setting up the hot plug ctrl device */ 469 /* Finish setting up the hot plug ctrl device */
440 ctrl->next_event = 0; 470 ctrl->next_event = 0;
@@ -447,32 +477,18 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
447 pciehp_ctrl_list = ctrl; 477 pciehp_ctrl_list = ctrl;
448 } 478 }
449 479
450 /* Wait for exclusive access to hardware */
451 mutex_lock(&ctrl->ctrl_lock);
452
453 t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */ 480 t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */
454
455 if ((POWER_CTRL(ctrl->ctrlcap)) && !value) { 481 if ((POWER_CTRL(ctrl->ctrlcap)) && !value) {
456 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/ 482 rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
457 if (rc) { 483 if (rc)
458 /* Done with exclusive hardware access */
459 mutex_unlock(&ctrl->ctrl_lock);
460 goto err_out_free_ctrl_slot; 484 goto err_out_free_ctrl_slot;
461 } else
462 /* Wait for the command to complete */
463 wait_for_ctrl_irq (ctrl);
464 } 485 }
465 486
466 /* Done with exclusive hardware access */
467 mutex_unlock(&ctrl->ctrl_lock);
468
469 return 0; 487 return 0;
470 488
471err_out_free_ctrl_slot: 489err_out_free_ctrl_slot:
472 cleanup_slots(ctrl); 490 cleanup_slots(ctrl);
473err_out_free_ctrl_bus: 491err_out_release_ctlr:
474 kfree(ctrl->pci_bus);
475err_out_unmap_mmio_region:
476 ctrl->hpc_ops->release_ctlr(ctrl); 492 ctrl->hpc_ops->release_ctlr(ctrl);
477err_out_free_ctrl: 493err_out_free_ctrl:
478 kfree(ctrl); 494 kfree(ctrl);
@@ -506,8 +522,6 @@ static void __exit unload_pciehpd(void)
506 while (ctrl) { 522 while (ctrl) {
507 cleanup_slots(ctrl); 523 cleanup_slots(ctrl);
508 524
509 kfree (ctrl->pci_bus);
510
511 ctrl->hpc_ops->release_ctlr(ctrl); 525 ctrl->hpc_ops->release_ctlr(ctrl);
512 526
513 tctrl = ctrl; 527 tctrl = ctrl;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 372c63e35aa9..4283ef56dbd9 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -48,9 +48,8 @@ static inline char *slot_name(struct slot *p_slot)
48 return p_slot->hotplug_slot->name; 48 return p_slot->hotplug_slot->name;
49} 49}
50 50
51u8 pciehp_handle_attention_button(u8 hp_slot, void *inst_id) 51u8 pciehp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
52{ 52{
53 struct controller *ctrl = (struct controller *) inst_id;
54 struct slot *p_slot; 53 struct slot *p_slot;
55 u8 rc = 0; 54 u8 rc = 0;
56 u8 getstatus; 55 u8 getstatus;
@@ -101,9 +100,8 @@ u8 pciehp_handle_attention_button(u8 hp_slot, void *inst_id)
101 100
102} 101}
103 102
104u8 pciehp_handle_switch_change(u8 hp_slot, void *inst_id) 103u8 pciehp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
105{ 104{
106 struct controller *ctrl = (struct controller *) inst_id;
107 struct slot *p_slot; 105 struct slot *p_slot;
108 u8 rc = 0; 106 u8 rc = 0;
109 u8 getstatus; 107 u8 getstatus;
@@ -143,9 +141,8 @@ u8 pciehp_handle_switch_change(u8 hp_slot, void *inst_id)
143 return rc; 141 return rc;
144} 142}
145 143
146u8 pciehp_handle_presence_change(u8 hp_slot, void *inst_id) 144u8 pciehp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
147{ 145{
148 struct controller *ctrl = (struct controller *) inst_id;
149 struct slot *p_slot; 146 struct slot *p_slot;
150 u8 presence_save, rc = 0; 147 u8 presence_save, rc = 0;
151 struct event_info *taskInfo; 148 struct event_info *taskInfo;
@@ -187,9 +184,8 @@ u8 pciehp_handle_presence_change(u8 hp_slot, void *inst_id)
187 return rc; 184 return rc;
188} 185}
189 186
190u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id) 187u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
191{ 188{
192 struct controller *ctrl = (struct controller *) inst_id;
193 struct slot *p_slot; 189 struct slot *p_slot;
194 u8 rc = 0; 190 u8 rc = 0;
195 struct event_info *taskInfo; 191 struct event_info *taskInfo;
@@ -233,35 +229,25 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
233 229
234static void set_slot_off(struct controller *ctrl, struct slot * pslot) 230static void set_slot_off(struct controller *ctrl, struct slot * pslot)
235{ 231{
236 /* Wait for exclusive access to hardware */
237 mutex_lock(&ctrl->ctrl_lock);
238
239 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ 232 /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
240 if (POWER_CTRL(ctrl->ctrlcap)) { 233 if (POWER_CTRL(ctrl->ctrlcap)) {
241 if (pslot->hpc_ops->power_off_slot(pslot)) { 234 if (pslot->hpc_ops->power_off_slot(pslot)) {
242 err("%s: Issue of Slot Power Off command failed\n", __FUNCTION__); 235 err("%s: Issue of Slot Power Off command failed\n",
243 mutex_unlock(&ctrl->ctrl_lock); 236 __FUNCTION__);
244 return; 237 return;
245 } 238 }
246 wait_for_ctrl_irq (ctrl);
247 } 239 }
248 240
249 if (PWR_LED(ctrl->ctrlcap)) { 241 if (PWR_LED(ctrl->ctrlcap))
250 pslot->hpc_ops->green_led_off(pslot); 242 pslot->hpc_ops->green_led_off(pslot);
251 wait_for_ctrl_irq (ctrl);
252 }
253 243
254 if (ATTN_LED(ctrl->ctrlcap)) { 244 if (ATTN_LED(ctrl->ctrlcap)) {
255 if (pslot->hpc_ops->set_attention_status(pslot, 1)) { 245 if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
256 err("%s: Issue of Set Attention Led command failed\n", __FUNCTION__); 246 err("%s: Issue of Set Attention Led command failed\n",
257 mutex_unlock(&ctrl->ctrl_lock); 247 __FUNCTION__);
258 return; 248 return;
259 } 249 }
260 wait_for_ctrl_irq (ctrl);
261 } 250 }
262
263 /* Done with exclusive hardware access */
264 mutex_unlock(&ctrl->ctrl_lock);
265} 251}
266 252
267/** 253/**
@@ -274,7 +260,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
274static int board_added(struct slot *p_slot) 260static int board_added(struct slot *p_slot)
275{ 261{
276 u8 hp_slot; 262 u8 hp_slot;
277 int rc = 0; 263 int retval = 0;
278 struct controller *ctrl = p_slot->ctrl; 264 struct controller *ctrl = p_slot->ctrl;
279 265
280 hp_slot = p_slot->device - ctrl->slot_device_offset; 266 hp_slot = p_slot->device - ctrl->slot_device_offset;
@@ -283,53 +269,38 @@ static int board_added(struct slot *p_slot)
283 __FUNCTION__, p_slot->device, 269 __FUNCTION__, p_slot->device,
284 ctrl->slot_device_offset, hp_slot); 270 ctrl->slot_device_offset, hp_slot);
285 271
286 /* Wait for exclusive access to hardware */
287 mutex_lock(&ctrl->ctrl_lock);
288
289 if (POWER_CTRL(ctrl->ctrlcap)) { 272 if (POWER_CTRL(ctrl->ctrlcap)) {
290 /* Power on slot */ 273 /* Power on slot */
291 rc = p_slot->hpc_ops->power_on_slot(p_slot); 274 retval = p_slot->hpc_ops->power_on_slot(p_slot);
292 if (rc) { 275 if (retval)
293 mutex_unlock(&ctrl->ctrl_lock); 276 return retval;
294 return -1;
295 }
296
297 /* Wait for the command to complete */
298 wait_for_ctrl_irq (ctrl);
299 } 277 }
300 278
301 if (PWR_LED(ctrl->ctrlcap)) { 279 if (PWR_LED(ctrl->ctrlcap))
302 p_slot->hpc_ops->green_led_blink(p_slot); 280 p_slot->hpc_ops->green_led_blink(p_slot);
303
304 /* Wait for the command to complete */
305 wait_for_ctrl_irq (ctrl);
306 }
307
308 /* Done with exclusive hardware access */
309 mutex_unlock(&ctrl->ctrl_lock);
310 281
311 /* Wait for ~1 second */ 282 /* Wait for ~1 second */
312 wait_for_ctrl_irq (ctrl); 283 msleep(1000);
313 284
314 /* Check link training status */ 285 /* Check link training status */
315 rc = p_slot->hpc_ops->check_lnk_status(ctrl); 286 retval = p_slot->hpc_ops->check_lnk_status(ctrl);
316 if (rc) { 287 if (retval) {
317 err("%s: Failed to check link status\n", __FUNCTION__); 288 err("%s: Failed to check link status\n", __FUNCTION__);
318 set_slot_off(ctrl, p_slot); 289 set_slot_off(ctrl, p_slot);
319 return rc; 290 return retval;
320 } 291 }
321 292
322 /* Check for a power fault */ 293 /* Check for a power fault */
323 if (p_slot->hpc_ops->query_power_fault(p_slot)) { 294 if (p_slot->hpc_ops->query_power_fault(p_slot)) {
324 dbg("%s: power fault detected\n", __FUNCTION__); 295 dbg("%s: power fault detected\n", __FUNCTION__);
325 rc = POWER_FAILURE; 296 retval = POWER_FAILURE;
326 goto err_exit; 297 goto err_exit;
327 } 298 }
328 299
329 rc = pciehp_configure_device(p_slot); 300 retval = pciehp_configure_device(p_slot);
330 if (rc) { 301 if (retval) {
331 err("Cannot add device 0x%x:%x\n", p_slot->bus, 302 err("Cannot add device 0x%x:%x\n", p_slot->bus,
332 p_slot->device); 303 p_slot->device);
333 goto err_exit; 304 goto err_exit;
334 } 305 }
335 306
@@ -338,26 +309,16 @@ static int board_added(struct slot *p_slot)
338 */ 309 */
339 if (pcie_mch_quirk) 310 if (pcie_mch_quirk)
340 pci_fixup_device(pci_fixup_final, ctrl->pci_dev); 311 pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
341 if (PWR_LED(ctrl->ctrlcap)) { 312 if (PWR_LED(ctrl->ctrlcap))
342 /* Wait for exclusive access to hardware */
343 mutex_lock(&ctrl->ctrl_lock);
344
345 p_slot->hpc_ops->green_led_on(p_slot); 313 p_slot->hpc_ops->green_led_on(p_slot);
346 314
347 /* Wait for the command to complete */
348 wait_for_ctrl_irq (ctrl);
349
350 /* Done with exclusive hardware access */
351 mutex_unlock(&ctrl->ctrl_lock);
352 }
353 return 0; 315 return 0;
354 316
355err_exit: 317err_exit:
356 set_slot_off(ctrl, p_slot); 318 set_slot_off(ctrl, p_slot);
357 return -1; 319 return retval;
358} 320}
359 321
360
361/** 322/**
362 * remove_board - Turns off slot and LED's 323 * remove_board - Turns off slot and LED's
363 * 324 *
@@ -366,44 +327,32 @@ static int remove_board(struct slot *p_slot)
366{ 327{
367 u8 device; 328 u8 device;
368 u8 hp_slot; 329 u8 hp_slot;
369 int rc; 330 int retval = 0;
370 struct controller *ctrl = p_slot->ctrl; 331 struct controller *ctrl = p_slot->ctrl;
371 332
372 if (pciehp_unconfigure_device(p_slot)) 333 retval = pciehp_unconfigure_device(p_slot);
373 return 1; 334 if (retval)
335 return retval;
374 336
375 device = p_slot->device; 337 device = p_slot->device;
376
377 hp_slot = p_slot->device - ctrl->slot_device_offset; 338 hp_slot = p_slot->device - ctrl->slot_device_offset;
378 p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 339 p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
379 340
380 dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot); 341 dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot);
381 342
382 /* Wait for exclusive access to hardware */
383 mutex_lock(&ctrl->ctrl_lock);
384
385 if (POWER_CTRL(ctrl->ctrlcap)) { 343 if (POWER_CTRL(ctrl->ctrlcap)) {
386 /* power off slot */ 344 /* power off slot */
387 rc = p_slot->hpc_ops->power_off_slot(p_slot); 345 retval = p_slot->hpc_ops->power_off_slot(p_slot);
388 if (rc) { 346 if (retval) {
389 err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); 347 err("%s: Issue of Slot Disable command failed\n",
390 mutex_unlock(&ctrl->ctrl_lock); 348 __FUNCTION__);
391 return rc; 349 return retval;
392 } 350 }
393 /* Wait for the command to complete */
394 wait_for_ctrl_irq (ctrl);
395 } 351 }
396 352
397 if (PWR_LED(ctrl->ctrlcap)) { 353 if (PWR_LED(ctrl->ctrlcap))
398 /* turn off Green LED */ 354 /* turn off Green LED */
399 p_slot->hpc_ops->green_led_off(p_slot); 355 p_slot->hpc_ops->green_led_off(p_slot);
400
401 /* Wait for the command to complete */
402 wait_for_ctrl_irq (ctrl);
403 }
404
405 /* Done with exclusive hardware access */
406 mutex_unlock(&ctrl->ctrl_lock);
407 356
408 return 0; 357 return 0;
409} 358}
@@ -448,18 +397,10 @@ static void pciehp_pushbutton_thread(unsigned long slot)
448 dbg("%s: adding bus:device(%x:%x)\n", __FUNCTION__, 397 dbg("%s: adding bus:device(%x:%x)\n", __FUNCTION__,
449 p_slot->bus, p_slot->device); 398 p_slot->bus, p_slot->device);
450 399
451 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) { 400 if (pciehp_enable_slot(p_slot) &&
452 /* Wait for exclusive access to hardware */ 401 PWR_LED(p_slot->ctrl->ctrlcap))
453 mutex_lock(&p_slot->ctrl->ctrl_lock);
454
455 p_slot->hpc_ops->green_led_off(p_slot); 402 p_slot->hpc_ops->green_led_off(p_slot);
456 403
457 /* Wait for the command to complete */
458 wait_for_ctrl_irq (p_slot->ctrl);
459
460 /* Done with exclusive hardware access */
461 mutex_unlock(&p_slot->ctrl->ctrl_lock);
462 }
463 p_slot->state = STATIC_STATE; 404 p_slot->state = STATIC_STATE;
464 } 405 }
465 406
@@ -498,18 +439,10 @@ static void pciehp_surprise_rm_thread(unsigned long slot)
498 dbg("%s: adding bus:device(%x:%x)\n", 439 dbg("%s: adding bus:device(%x:%x)\n",
499 __FUNCTION__, p_slot->bus, p_slot->device); 440 __FUNCTION__, p_slot->bus, p_slot->device);
500 441
501 if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) { 442 if (pciehp_enable_slot(p_slot) &&
502 /* Wait for exclusive access to hardware */ 443 PWR_LED(p_slot->ctrl->ctrlcap))
503 mutex_lock(&p_slot->ctrl->ctrl_lock);
504
505 p_slot->hpc_ops->green_led_off(p_slot); 444 p_slot->hpc_ops->green_led_off(p_slot);
506 445
507 /* Wait for the command to complete */
508 wait_for_ctrl_irq (p_slot->ctrl);
509
510 /* Done with exclusive hardware access */
511 mutex_unlock(&p_slot->ctrl->ctrl_lock);
512 }
513 p_slot->state = STATIC_STATE; 446 p_slot->state = STATIC_STATE;
514 } 447 }
515 448
@@ -620,46 +553,24 @@ static void interrupt_event_handler(struct controller *ctrl)
620 553
621 switch (p_slot->state) { 554 switch (p_slot->state) {
622 case BLINKINGOFF_STATE: 555 case BLINKINGOFF_STATE:
623 /* Wait for exclusive access to hardware */ 556 if (PWR_LED(ctrl->ctrlcap))
624 mutex_lock(&ctrl->ctrl_lock);
625
626 if (PWR_LED(ctrl->ctrlcap)) {
627 p_slot->hpc_ops->green_led_on(p_slot); 557 p_slot->hpc_ops->green_led_on(p_slot);
628 /* Wait for the command to complete */
629 wait_for_ctrl_irq (ctrl);
630 }
631 if (ATTN_LED(ctrl->ctrlcap)) {
632 p_slot->hpc_ops->set_attention_status(p_slot, 0);
633 558
634 /* Wait for the command to complete */ 559 if (ATTN_LED(ctrl->ctrlcap))
635 wait_for_ctrl_irq (ctrl); 560 p_slot->hpc_ops->set_attention_status(p_slot, 0);
636 }
637 /* Done with exclusive hardware access */
638 mutex_unlock(&ctrl->ctrl_lock);
639 break; 561 break;
640 case BLINKINGON_STATE: 562 case BLINKINGON_STATE:
641 /* Wait for exclusive access to hardware */ 563 if (PWR_LED(ctrl->ctrlcap))
642 mutex_lock(&ctrl->ctrl_lock);
643
644 if (PWR_LED(ctrl->ctrlcap)) {
645 p_slot->hpc_ops->green_led_off(p_slot); 564 p_slot->hpc_ops->green_led_off(p_slot);
646 /* Wait for the command to complete */
647 wait_for_ctrl_irq (ctrl);
648 }
649 if (ATTN_LED(ctrl->ctrlcap)){
650 p_slot->hpc_ops->set_attention_status(p_slot, 0);
651 /* Wait for the command to complete */
652 wait_for_ctrl_irq (ctrl);
653 }
654 /* Done with exclusive hardware access */
655 mutex_unlock(&ctrl->ctrl_lock);
656 565
566 if (ATTN_LED(ctrl->ctrlcap))
567 p_slot->hpc_ops->set_attention_status(p_slot, 0);
657 break; 568 break;
658 default: 569 default:
659 warn("Not a valid state\n"); 570 warn("Not a valid state\n");
660 return; 571 return;
661 } 572 }
662 info(msg_button_cancel, slot_name(p_slot)); 573 info("PCI slot #%s - action canceled due to button press.\n", slot_name(p_slot));
663 p_slot->state = STATIC_STATE; 574 p_slot->state = STATIC_STATE;
664 } 575 }
665 /* ***********Button Pressed (No action on 1st press...) */ 576 /* ***********Button Pressed (No action on 1st press...) */
@@ -672,34 +583,21 @@ static void interrupt_event_handler(struct controller *ctrl)
672 /* slot is on */ 583 /* slot is on */
673 dbg("slot is on\n"); 584 dbg("slot is on\n");
674 p_slot->state = BLINKINGOFF_STATE; 585 p_slot->state = BLINKINGOFF_STATE;
675 info(msg_button_off, slot_name(p_slot)); 586 info("PCI slot #%s - powering off due to button press.\n", slot_name(p_slot));
676 } else { 587 } else {
677 /* slot is off */ 588 /* slot is off */
678 dbg("slot is off\n"); 589 dbg("slot is off\n");
679 p_slot->state = BLINKINGON_STATE; 590 p_slot->state = BLINKINGON_STATE;
680 info(msg_button_on, slot_name(p_slot)); 591 info("PCI slot #%s - powering on due to button press.\n", slot_name(p_slot));
681 } 592 }
682 593
683 /* Wait for exclusive access to hardware */
684 mutex_lock(&ctrl->ctrl_lock);
685
686 /* blink green LED and turn off amber */ 594 /* blink green LED and turn off amber */
687 if (PWR_LED(ctrl->ctrlcap)) { 595 if (PWR_LED(ctrl->ctrlcap))
688 p_slot->hpc_ops->green_led_blink(p_slot); 596 p_slot->hpc_ops->green_led_blink(p_slot);
689 /* Wait for the command to complete */
690 wait_for_ctrl_irq (ctrl);
691 }
692 597
693 if (ATTN_LED(ctrl->ctrlcap)) { 598 if (ATTN_LED(ctrl->ctrlcap))
694 p_slot->hpc_ops->set_attention_status(p_slot, 0); 599 p_slot->hpc_ops->set_attention_status(p_slot, 0);
695 600
696 /* Wait for the command to complete */
697 wait_for_ctrl_irq (ctrl);
698 }
699
700 /* Done with exclusive hardware access */
701 mutex_unlock(&ctrl->ctrl_lock);
702
703 init_timer(&p_slot->task_event); 601 init_timer(&p_slot->task_event);
704 p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */ 602 p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */
705 p_slot->task_event.function = (void (*)(unsigned long)) pushbutton_helper_thread; 603 p_slot->task_event.function = (void (*)(unsigned long)) pushbutton_helper_thread;
@@ -712,21 +610,11 @@ static void interrupt_event_handler(struct controller *ctrl)
712 else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) { 610 else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) {
713 if (POWER_CTRL(ctrl->ctrlcap)) { 611 if (POWER_CTRL(ctrl->ctrlcap)) {
714 dbg("power fault\n"); 612 dbg("power fault\n");
715 /* Wait for exclusive access to hardware */ 613 if (ATTN_LED(ctrl->ctrlcap))
716 mutex_lock(&ctrl->ctrl_lock);
717
718 if (ATTN_LED(ctrl->ctrlcap)) {
719 p_slot->hpc_ops->set_attention_status(p_slot, 1); 614 p_slot->hpc_ops->set_attention_status(p_slot, 1);
720 wait_for_ctrl_irq (ctrl);
721 }
722 615
723 if (PWR_LED(ctrl->ctrlcap)) { 616 if (PWR_LED(ctrl->ctrlcap))
724 p_slot->hpc_ops->green_led_off(p_slot); 617 p_slot->hpc_ops->green_led_off(p_slot);
725 wait_for_ctrl_irq (ctrl);
726 }
727
728 /* Done with exclusive hardware access */
729 mutex_unlock(&ctrl->ctrl_lock);
730 } 618 }
731 } 619 }
732 /***********SURPRISE REMOVAL********************/ 620 /***********SURPRISE REMOVAL********************/
@@ -754,7 +642,6 @@ static void interrupt_event_handler(struct controller *ctrl)
754 } 642 }
755} 643}
756 644
757
758int pciehp_enable_slot(struct slot *p_slot) 645int pciehp_enable_slot(struct slot *p_slot)
759{ 646{
760 u8 getstatus = 0; 647 u8 getstatus = 0;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 25d3aadfddbf..fbc64aa2dd68 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -35,6 +35,7 @@
35#include <linux/timer.h> 35#include <linux/timer.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/time.h>
38 39
39#include "../pci.h" 40#include "../pci.h"
40#include "pciehp.h" 41#include "pciehp.h"
@@ -105,34 +106,30 @@ enum ctrl_offsets {
105 ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl), 106 ROOTCTRL = offsetof(struct ctrl_reg, root_ctrl),
106 ROOTSTATUS = offsetof(struct ctrl_reg, root_status), 107 ROOTSTATUS = offsetof(struct ctrl_reg, root_status),
107}; 108};
108static int pcie_cap_base = 0; /* Base of the PCI Express capability item structure */ 109
109 110static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
110#define PCIE_CAP_ID(cb) ( cb + PCIECAPID ) 111{
111#define NXT_CAP_PTR(cb) ( cb + NXTCAPPTR ) 112 struct pci_dev *dev = ctrl->pci_dev;
112#define CAP_REG(cb) ( cb + CAPREG ) 113 return pci_read_config_word(dev, ctrl->cap_base + reg, value);
113#define DEV_CAP(cb) ( cb + DEVCAP ) 114}
114#define DEV_CTRL(cb) ( cb + DEVCTRL ) 115
115#define DEV_STATUS(cb) ( cb + DEVSTATUS ) 116static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
116#define LNK_CAP(cb) ( cb + LNKCAP ) 117{
117#define LNK_CTRL(cb) ( cb + LNKCTRL ) 118 struct pci_dev *dev = ctrl->pci_dev;
118#define LNK_STATUS(cb) ( cb + LNKSTATUS ) 119 return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
119#define SLOT_CAP(cb) ( cb + SLOTCAP ) 120}
120#define SLOT_CTRL(cb) ( cb + SLOTCTRL ) 121
121#define SLOT_STATUS(cb) ( cb + SLOTSTATUS ) 122static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
122#define ROOT_CTRL(cb) ( cb + ROOTCTRL ) 123{
123#define ROOT_STATUS(cb) ( cb + ROOTSTATUS ) 124 struct pci_dev *dev = ctrl->pci_dev;
124 125 return pci_write_config_word(dev, ctrl->cap_base + reg, value);
125#define hp_register_read_word(pdev, reg , value) \ 126}
126 pci_read_config_word(pdev, reg, &value) 127
127 128static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
128#define hp_register_read_dword(pdev, reg , value) \ 129{
129 pci_read_config_dword(pdev, reg, &value) 130 struct pci_dev *dev = ctrl->pci_dev;
130 131 return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
131#define hp_register_write_word(pdev, reg , value) \ 132}
132 pci_write_config_word(pdev, reg, value)
133
134#define hp_register_dwrite_word(pdev, reg , value) \
135 pci_write_config_dword(pdev, reg, value)
136 133
137/* Field definitions in PCI Express Capabilities Register */ 134/* Field definitions in PCI Express Capabilities Register */
138#define CAP_VER 0x000F 135#define CAP_VER 0x000F
@@ -196,6 +193,7 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
196#define ATTN_LED_CTRL 0x00C0 193#define ATTN_LED_CTRL 0x00C0
197#define PWR_LED_CTRL 0x0300 194#define PWR_LED_CTRL 0x0300
198#define PWR_CTRL 0x0400 195#define PWR_CTRL 0x0400
196#define EMI_CTRL 0x0800
199 197
200/* Attention indicator and Power indicator states */ 198/* Attention indicator and Power indicator states */
201#define LED_ON 0x01 199#define LED_ON 0x01
@@ -206,6 +204,10 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
206#define POWER_ON 0 204#define POWER_ON 0
207#define POWER_OFF 0x0400 205#define POWER_OFF 0x0400
208 206
207/* EMI Status defines */
208#define EMI_DISENGAGED 0
209#define EMI_ENGAGED 1
210
209/* Field definitions in Slot Status Register */ 211/* Field definitions in Slot Status Register */
210#define ATTN_BUTTN_PRESSED 0x0001 212#define ATTN_BUTTN_PRESSED 0x0001
211#define PWR_FAULT_DETECTED 0x0002 213#define PWR_FAULT_DETECTED 0x0002
@@ -214,114 +216,117 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
214#define CMD_COMPLETED 0x0010 216#define CMD_COMPLETED 0x0010
215#define MRL_STATE 0x0020 217#define MRL_STATE 0x0020
216#define PRSN_STATE 0x0040 218#define PRSN_STATE 0x0040
219#define EMI_STATE 0x0080
220#define EMI_STATUS_BIT 7
217 221
218static spinlock_t hpc_event_lock; 222static spinlock_t hpc_event_lock;
219 223
220DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */ 224DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */
221static struct php_ctlr_state_s *php_ctlr_list_head; /* HPC state linked list */
222static int ctlr_seq_num = 0; /* Controller sequence # */ 225static int ctlr_seq_num = 0; /* Controller sequence # */
223static spinlock_t list_lock;
224
225static irqreturn_t pcie_isr(int IRQ, void *dev_id);
226 226
227static void start_int_poll_timer(struct php_ctlr_state_s *php_ctlr, int seconds); 227static irqreturn_t pcie_isr(int irq, void *dev_id);
228static void start_int_poll_timer(struct controller *ctrl, int sec);
228 229
229/* This is the interrupt polling timeout function. */ 230/* This is the interrupt polling timeout function. */
230static void int_poll_timeout(unsigned long lphp_ctlr) 231static void int_poll_timeout(unsigned long data)
231{ 232{
232 struct php_ctlr_state_s *php_ctlr = (struct php_ctlr_state_s *)lphp_ctlr; 233 struct controller *ctrl = (struct controller *)data;
233 234
234 DBG_ENTER_ROUTINE 235 DBG_ENTER_ROUTINE
235 236
236 if ( !php_ctlr ) {
237 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
238 return;
239 }
240
241 /* Poll for interrupt events. regs == NULL => polling */ 237 /* Poll for interrupt events. regs == NULL => polling */
242 pcie_isr( 0, (void *)php_ctlr ); 238 pcie_isr(0, ctrl);
243
244 init_timer(&php_ctlr->int_poll_timer);
245 239
240 init_timer(&ctrl->poll_timer);
246 if (!pciehp_poll_time) 241 if (!pciehp_poll_time)
247 pciehp_poll_time = 2; /* reset timer to poll in 2 secs if user doesn't specify at module installation*/ 242 pciehp_poll_time = 2; /* reset timer to poll in 2 secs if user doesn't specify at module installation*/
248 243
249 start_int_poll_timer(php_ctlr, pciehp_poll_time); 244 start_int_poll_timer(ctrl, pciehp_poll_time);
250
251 return;
252} 245}
253 246
254/* This function starts the interrupt polling timer. */ 247/* This function starts the interrupt polling timer. */
255static void start_int_poll_timer(struct php_ctlr_state_s *php_ctlr, int seconds) 248static void start_int_poll_timer(struct controller *ctrl, int sec)
256{ 249{
257 if (!php_ctlr) { 250 /* Clamp to sane value */
258 err("%s: Invalid HPC controller handle!\n", __FUNCTION__); 251 if ((sec <= 0) || (sec > 60))
259 return; 252 sec = 2;
260 } 253
254 ctrl->poll_timer.function = &int_poll_timeout;
255 ctrl->poll_timer.data = (unsigned long)ctrl;
256 ctrl->poll_timer.expires = jiffies + sec * HZ;
257 add_timer(&ctrl->poll_timer);
258}
261 259
262 if ( ( seconds <= 0 ) || ( seconds > 60 ) ) 260static inline int pcie_wait_cmd(struct controller *ctrl)
263 seconds = 2; /* Clamp to sane value */ 261{
262 int retval = 0;
263 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
264 unsigned long timeout = msecs_to_jiffies(msecs);
265 int rc;
264 266
265 php_ctlr->int_poll_timer.function = &int_poll_timeout; 267 rc = wait_event_interruptible_timeout(ctrl->queue,
266 php_ctlr->int_poll_timer.data = (unsigned long)php_ctlr; /* Instance data */ 268 !ctrl->cmd_busy, timeout);
267 php_ctlr->int_poll_timer.expires = jiffies + seconds * HZ; 269 if (!rc)
268 add_timer(&php_ctlr->int_poll_timer); 270 dbg("Command not completed in 1000 msec\n");
271 else if (rc < 0) {
272 retval = -EINTR;
273 info("Command was interrupted by a signal\n");
274 }
269 275
270 return; 276 return retval;
271} 277}
272 278
273static int pcie_write_cmd(struct slot *slot, u16 cmd) 279static int pcie_write_cmd(struct slot *slot, u16 cmd)
274{ 280{
275 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 281 struct controller *ctrl = slot->ctrl;
276 int retval = 0; 282 int retval = 0;
277 u16 slot_status; 283 u16 slot_status;
278 284
279 DBG_ENTER_ROUTINE 285 DBG_ENTER_ROUTINE
280
281 if (!php_ctlr) {
282 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
283 return -1;
284 }
285 286
286 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status); 287 mutex_lock(&ctrl->ctrl_lock);
288
289 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
287 if (retval) { 290 if (retval) {
288 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 291 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
289 return retval; 292 goto out;
290 } 293 }
291 294
292 if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) { 295 if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) {
293 /* After 1 sec and CMD_COMPLETED still not set, just proceed forward to issue 296 /* After 1 sec and CMD_COMPLETED still not set, just
294 the next command according to spec. Just print out the error message */ 297 proceed forward to issue the next command according
295 dbg("%s : CMD_COMPLETED not clear after 1 sec.\n", __FUNCTION__); 298 to spec. Just print out the error message */
299 dbg("%s: CMD_COMPLETED not clear after 1 sec.\n",
300 __FUNCTION__);
296 } 301 }
297 302
298 retval = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), cmd | CMD_CMPL_INTR_ENABLE); 303 ctrl->cmd_busy = 1;
304 retval = pciehp_writew(ctrl, SLOTCTRL, (cmd | CMD_CMPL_INTR_ENABLE));
299 if (retval) { 305 if (retval) {
300 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 306 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
301 return retval; 307 goto out;
302 } 308 }
303 309
310 /*
311 * Wait for command completion.
312 */
313 retval = pcie_wait_cmd(ctrl);
314 out:
315 mutex_unlock(&ctrl->ctrl_lock);
304 DBG_LEAVE_ROUTINE 316 DBG_LEAVE_ROUTINE
305 return retval; 317 return retval;
306} 318}
307 319
308static int hpc_check_lnk_status(struct controller *ctrl) 320static int hpc_check_lnk_status(struct controller *ctrl)
309{ 321{
310 struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle;
311 u16 lnk_status; 322 u16 lnk_status;
312 int retval = 0; 323 int retval = 0;
313 324
314 DBG_ENTER_ROUTINE 325 DBG_ENTER_ROUTINE
315 326
316 if (!php_ctlr) { 327 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
317 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
318 return -1;
319 }
320
321 retval = hp_register_read_word(php_ctlr->pci_dev, LNK_STATUS(ctrl->cap_base), lnk_status);
322
323 if (retval) { 328 if (retval) {
324 err("%s : hp_register_read_word LNK_STATUS failed\n", __FUNCTION__); 329 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
325 return retval; 330 return retval;
326 } 331 }
327 332
@@ -340,26 +345,21 @@ static int hpc_check_lnk_status(struct controller *ctrl)
340 345
341static int hpc_get_attention_status(struct slot *slot, u8 *status) 346static int hpc_get_attention_status(struct slot *slot, u8 *status)
342{ 347{
343 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 348 struct controller *ctrl = slot->ctrl;
344 u16 slot_ctrl; 349 u16 slot_ctrl;
345 u8 atten_led_state; 350 u8 atten_led_state;
346 int retval = 0; 351 int retval = 0;
347 352
348 DBG_ENTER_ROUTINE 353 DBG_ENTER_ROUTINE
349 354
350 if (!php_ctlr) { 355 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
351 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
352 return -1;
353 }
354
355 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
356
357 if (retval) { 356 if (retval) {
358 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 357 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
359 return retval; 358 return retval;
360 } 359 }
361 360
362 dbg("%s: SLOT_CTRL %x, value read %x\n", __FUNCTION__,SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl); 361 dbg("%s: SLOTCTRL %x, value read %x\n",
362 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
363 363
364 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; 364 atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6;
365 365
@@ -385,27 +385,22 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
385 return 0; 385 return 0;
386} 386}
387 387
388static int hpc_get_power_status(struct slot * slot, u8 *status) 388static int hpc_get_power_status(struct slot *slot, u8 *status)
389{ 389{
390 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 390 struct controller *ctrl = slot->ctrl;
391 u16 slot_ctrl; 391 u16 slot_ctrl;
392 u8 pwr_state; 392 u8 pwr_state;
393 int retval = 0; 393 int retval = 0;
394 394
395 DBG_ENTER_ROUTINE 395 DBG_ENTER_ROUTINE
396 396
397 if (!php_ctlr) { 397 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
398 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
399 return -1;
400 }
401
402 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
403
404 if (retval) { 398 if (retval) {
405 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 399 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
406 return retval; 400 return retval;
407 } 401 }
408 dbg("%s: SLOT_CTRL %x value read %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl); 402 dbg("%s: SLOTCTRL %x value read %x\n",
403 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
409 404
410 pwr_state = (slot_ctrl & PWR_CTRL) >> 10; 405 pwr_state = (slot_ctrl & PWR_CTRL) >> 10;
411 406
@@ -428,21 +423,15 @@ static int hpc_get_power_status(struct slot * slot, u8 *status)
428 423
429static int hpc_get_latch_status(struct slot *slot, u8 *status) 424static int hpc_get_latch_status(struct slot *slot, u8 *status)
430{ 425{
431 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 426 struct controller *ctrl = slot->ctrl;
432 u16 slot_status; 427 u16 slot_status;
433 int retval = 0; 428 int retval = 0;
434 429
435 DBG_ENTER_ROUTINE 430 DBG_ENTER_ROUTINE
436 431
437 if (!php_ctlr) { 432 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
438 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
439 return -1;
440 }
441
442 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
443
444 if (retval) { 433 if (retval) {
445 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 434 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
446 return retval; 435 return retval;
447 } 436 }
448 437
@@ -454,22 +443,16 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
454 443
455static int hpc_get_adapter_status(struct slot *slot, u8 *status) 444static int hpc_get_adapter_status(struct slot *slot, u8 *status)
456{ 445{
457 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 446 struct controller *ctrl = slot->ctrl;
458 u16 slot_status; 447 u16 slot_status;
459 u8 card_state; 448 u8 card_state;
460 int retval = 0; 449 int retval = 0;
461 450
462 DBG_ENTER_ROUTINE 451 DBG_ENTER_ROUTINE
463 452
464 if (!php_ctlr) { 453 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
465 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
466 return -1;
467 }
468
469 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
470
471 if (retval) { 454 if (retval) {
472 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 455 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
473 return retval; 456 return retval;
474 } 457 }
475 card_state = (u8)((slot_status & PRSN_STATE) >> 6); 458 card_state = (u8)((slot_status & PRSN_STATE) >> 6);
@@ -479,24 +462,18 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
479 return 0; 462 return 0;
480} 463}
481 464
482static int hpc_query_power_fault(struct slot * slot) 465static int hpc_query_power_fault(struct slot *slot)
483{ 466{
484 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 467 struct controller *ctrl = slot->ctrl;
485 u16 slot_status; 468 u16 slot_status;
486 u8 pwr_fault; 469 u8 pwr_fault;
487 int retval = 0; 470 int retval = 0;
488 471
489 DBG_ENTER_ROUTINE 472 DBG_ENTER_ROUTINE
490 473
491 if (!php_ctlr) { 474 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
492 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
493 return -1;
494 }
495
496 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
497
498 if (retval) { 475 if (retval) {
499 err("%s : Cannot check for power fault\n", __FUNCTION__); 476 err("%s: Cannot check for power fault\n", __FUNCTION__);
500 return retval; 477 return retval;
501 } 478 }
502 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); 479 pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
@@ -505,28 +482,63 @@ static int hpc_query_power_fault(struct slot * slot)
505 return pwr_fault; 482 return pwr_fault;
506} 483}
507 484
508static int hpc_set_attention_status(struct slot *slot, u8 value) 485static int hpc_get_emi_status(struct slot *slot, u8 *status)
509{ 486{
510 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 487 struct controller *ctrl = slot->ctrl;
488 u16 slot_status;
489 int retval = 0;
490
491 DBG_ENTER_ROUTINE
492
493 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
494 if (retval) {
495 err("%s : Cannot check EMI status\n", __FUNCTION__);
496 return retval;
497 }
498 *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT;
499
500 DBG_LEAVE_ROUTINE
501 return retval;
502}
503
504static int hpc_toggle_emi(struct slot *slot)
505{
506 struct controller *ctrl = slot->ctrl;
511 u16 slot_cmd = 0; 507 u16 slot_cmd = 0;
512 u16 slot_ctrl; 508 u16 slot_ctrl;
513 int rc = 0; 509 int rc = 0;
514 510
515 DBG_ENTER_ROUTINE 511 DBG_ENTER_ROUTINE
516 512
517 if (!php_ctlr) { 513 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
518 err("%s: Invalid HPC controller handle!\n", __FUNCTION__); 514 if (rc) {
519 return -1; 515 err("%s : hp_register_read_word SLOT_CTRL failed\n",
516 __FUNCTION__);
517 return rc;
520 } 518 }
521 519
522 if (slot->hp_slot >= php_ctlr->num_slots) { 520 slot_cmd = (slot_ctrl | EMI_CTRL);
523 err("%s: Invalid HPC slot number!\n", __FUNCTION__); 521 if (!pciehp_poll_mode)
524 return -1; 522 slot_cmd = slot_cmd | HP_INTR_ENABLE;
525 }
526 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
527 523
524 pcie_write_cmd(slot, slot_cmd);
525 slot->last_emi_toggle = get_seconds();
526 DBG_LEAVE_ROUTINE
527 return rc;
528}
529
530static int hpc_set_attention_status(struct slot *slot, u8 value)
531{
532 struct controller *ctrl = slot->ctrl;
533 u16 slot_cmd = 0;
534 u16 slot_ctrl;
535 int rc = 0;
536
537 DBG_ENTER_ROUTINE
538
539 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
528 if (rc) { 540 if (rc) {
529 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 541 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
530 return rc; 542 return rc;
531 } 543 }
532 544
@@ -547,7 +559,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
547 slot_cmd = slot_cmd | HP_INTR_ENABLE; 559 slot_cmd = slot_cmd | HP_INTR_ENABLE;
548 560
549 pcie_write_cmd(slot, slot_cmd); 561 pcie_write_cmd(slot, slot_cmd);
550 dbg("%s: SLOT_CTRL %x write cmd %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 562 dbg("%s: SLOTCTRL %x write cmd %x\n",
563 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
551 564
552 DBG_LEAVE_ROUTINE 565 DBG_LEAVE_ROUTINE
553 return rc; 566 return rc;
@@ -556,27 +569,16 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
556 569
557static void hpc_set_green_led_on(struct slot *slot) 570static void hpc_set_green_led_on(struct slot *slot)
558{ 571{
559 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 572 struct controller *ctrl = slot->ctrl;
560 u16 slot_cmd; 573 u16 slot_cmd;
561 u16 slot_ctrl; 574 u16 slot_ctrl;
562 int rc = 0; 575 int rc = 0;
563 576
564 DBG_ENTER_ROUTINE 577 DBG_ENTER_ROUTINE
565 578
566 if (!php_ctlr) { 579 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
567 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
568 return ;
569 }
570
571 if (slot->hp_slot >= php_ctlr->num_slots) {
572 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
573 return ;
574 }
575
576 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
577
578 if (rc) { 580 if (rc) {
579 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 581 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
580 return; 582 return;
581 } 583 }
582 slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0100; 584 slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0100;
@@ -585,34 +587,24 @@ static void hpc_set_green_led_on(struct slot *slot)
585 587
586 pcie_write_cmd(slot, slot_cmd); 588 pcie_write_cmd(slot, slot_cmd);
587 589
588 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 590 dbg("%s: SLOTCTRL %x write cmd %x\n",
591 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
589 DBG_LEAVE_ROUTINE 592 DBG_LEAVE_ROUTINE
590 return; 593 return;
591} 594}
592 595
593static void hpc_set_green_led_off(struct slot *slot) 596static void hpc_set_green_led_off(struct slot *slot)
594{ 597{
595 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 598 struct controller *ctrl = slot->ctrl;
596 u16 slot_cmd; 599 u16 slot_cmd;
597 u16 slot_ctrl; 600 u16 slot_ctrl;
598 int rc = 0; 601 int rc = 0;
599 602
600 DBG_ENTER_ROUTINE 603 DBG_ENTER_ROUTINE
601 604
602 if (!php_ctlr) { 605 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
603 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
604 return ;
605 }
606
607 if (slot->hp_slot >= php_ctlr->num_slots) {
608 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
609 return ;
610 }
611
612 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
613
614 if (rc) { 606 if (rc) {
615 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 607 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
616 return; 608 return;
617 } 609 }
618 610
@@ -621,7 +613,8 @@ static void hpc_set_green_led_off(struct slot *slot)
621 if (!pciehp_poll_mode) 613 if (!pciehp_poll_mode)
622 slot_cmd = slot_cmd | HP_INTR_ENABLE; 614 slot_cmd = slot_cmd | HP_INTR_ENABLE;
623 pcie_write_cmd(slot, slot_cmd); 615 pcie_write_cmd(slot, slot_cmd);
624 dbg("%s: SLOT_CTRL %x write cmd %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 616 dbg("%s: SLOTCTRL %x write cmd %x\n",
617 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
625 618
626 DBG_LEAVE_ROUTINE 619 DBG_LEAVE_ROUTINE
627 return; 620 return;
@@ -629,27 +622,16 @@ static void hpc_set_green_led_off(struct slot *slot)
629 622
630static void hpc_set_green_led_blink(struct slot *slot) 623static void hpc_set_green_led_blink(struct slot *slot)
631{ 624{
632 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 625 struct controller *ctrl = slot->ctrl;
633 u16 slot_cmd; 626 u16 slot_cmd;
634 u16 slot_ctrl; 627 u16 slot_ctrl;
635 int rc = 0; 628 int rc = 0;
636 629
637 DBG_ENTER_ROUTINE 630 DBG_ENTER_ROUTINE
638 631
639 if (!php_ctlr) { 632 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
640 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
641 return ;
642 }
643
644 if (slot->hp_slot >= php_ctlr->num_slots) {
645 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
646 return ;
647 }
648
649 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
650
651 if (rc) { 633 if (rc) {
652 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 634 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
653 return; 635 return;
654 } 636 }
655 637
@@ -659,126 +641,54 @@ static void hpc_set_green_led_blink(struct slot *slot)
659 slot_cmd = slot_cmd | HP_INTR_ENABLE; 641 slot_cmd = slot_cmd | HP_INTR_ENABLE;
660 pcie_write_cmd(slot, slot_cmd); 642 pcie_write_cmd(slot, slot_cmd);
661 643
662 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 644 dbg("%s: SLOTCTRL %x write cmd %x\n",
645 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
663 DBG_LEAVE_ROUTINE 646 DBG_LEAVE_ROUTINE
664 return; 647 return;
665} 648}
666 649
667int pcie_get_ctlr_slot_config(struct controller *ctrl,
668 int *num_ctlr_slots, /* number of slots in this HPC; only 1 in PCIE */
669 int *first_device_num, /* PCI dev num of the first slot in this PCIE */
670 int *physical_slot_num, /* phy slot num of the first slot in this PCIE */
671 u8 *ctrlcap)
672{
673 struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle;
674 u32 slot_cap;
675 int rc = 0;
676
677 DBG_ENTER_ROUTINE
678
679 if (!php_ctlr) {
680 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
681 return -1;
682 }
683
684 *first_device_num = 0;
685 *num_ctlr_slots = 1;
686
687 rc = hp_register_read_dword(php_ctlr->pci_dev, SLOT_CAP(ctrl->cap_base), slot_cap);
688
689 if (rc) {
690 err("%s : hp_register_read_dword SLOT_CAP failed\n", __FUNCTION__);
691 return -1;
692 }
693
694 *physical_slot_num = slot_cap >> 19;
695 dbg("%s: PSN %d \n", __FUNCTION__, *physical_slot_num);
696
697 *ctrlcap = slot_cap & 0x0000007f;
698
699 DBG_LEAVE_ROUTINE
700 return 0;
701}
702
703static void hpc_release_ctlr(struct controller *ctrl) 650static void hpc_release_ctlr(struct controller *ctrl)
704{ 651{
705 struct php_ctlr_state_s *php_ctlr = ctrl->hpc_ctlr_handle;
706 struct php_ctlr_state_s *p, *p_prev;
707
708 DBG_ENTER_ROUTINE 652 DBG_ENTER_ROUTINE
709 653
710 if (!php_ctlr) { 654 if (pciehp_poll_mode)
711 err("%s: Invalid HPC controller handle!\n", __FUNCTION__); 655 del_timer(&ctrl->poll_timer);
712 return ; 656 else
713 } 657 free_irq(ctrl->pci_dev->irq, ctrl);
714
715 if (pciehp_poll_mode) {
716 del_timer(&php_ctlr->int_poll_timer);
717 } else {
718 if (php_ctlr->irq) {
719 free_irq(php_ctlr->irq, ctrl);
720 php_ctlr->irq = 0;
721 }
722 }
723 if (php_ctlr->pci_dev)
724 php_ctlr->pci_dev = NULL;
725
726 spin_lock(&list_lock);
727 p = php_ctlr_list_head;
728 p_prev = NULL;
729 while (p) {
730 if (p == php_ctlr) {
731 if (p_prev)
732 p_prev->pnext = p->pnext;
733 else
734 php_ctlr_list_head = p->pnext;
735 break;
736 } else {
737 p_prev = p;
738 p = p->pnext;
739 }
740 }
741 spin_unlock(&list_lock);
742
743 kfree(php_ctlr);
744 658
745 DBG_LEAVE_ROUTINE 659 DBG_LEAVE_ROUTINE
746
747} 660}
748 661
749static int hpc_power_on_slot(struct slot * slot) 662static int hpc_power_on_slot(struct slot * slot)
750{ 663{
751 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 664 struct controller *ctrl = slot->ctrl;
752 u16 slot_cmd; 665 u16 slot_cmd;
753 u16 slot_ctrl, slot_status; 666 u16 slot_ctrl, slot_status;
754
755 int retval = 0; 667 int retval = 0;
756 668
757 DBG_ENTER_ROUTINE 669 DBG_ENTER_ROUTINE
758 670
759 if (!php_ctlr) {
760 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
761 return -1;
762 }
763
764 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot); 671 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
765 if (slot->hp_slot >= php_ctlr->num_slots) {
766 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
767 return -1;
768 }
769 672
770 /* Clear sticky power-fault bit from previous power failures */ 673 /* Clear sticky power-fault bit from previous power failures */
771 hp_register_read_word(php_ctlr->pci_dev, 674 retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
772 SLOT_STATUS(slot->ctrl->cap_base), slot_status); 675 if (retval) {
676 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
677 return retval;
678 }
773 slot_status &= PWR_FAULT_DETECTED; 679 slot_status &= PWR_FAULT_DETECTED;
774 if (slot_status) 680 if (slot_status) {
775 hp_register_write_word(php_ctlr->pci_dev, 681 retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status);
776 SLOT_STATUS(slot->ctrl->cap_base), slot_status); 682 if (retval) {
777 683 err("%s: Cannot write to SLOTSTATUS register\n",
778 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl); 684 __FUNCTION__);
685 return retval;
686 }
687 }
779 688
689 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
780 if (retval) { 690 if (retval) {
781 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 691 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
782 return retval; 692 return retval;
783 } 693 }
784 694
@@ -798,7 +708,8 @@ static int hpc_power_on_slot(struct slot * slot)
798 err("%s: Write %x command failed!\n", __FUNCTION__, slot_cmd); 708 err("%s: Write %x command failed!\n", __FUNCTION__, slot_cmd);
799 return -1; 709 return -1;
800 } 710 }
801 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 711 dbg("%s: SLOTCTRL %x write cmd %x\n",
712 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
802 713
803 DBG_LEAVE_ROUTINE 714 DBG_LEAVE_ROUTINE
804 715
@@ -807,29 +718,18 @@ static int hpc_power_on_slot(struct slot * slot)
807 718
808static int hpc_power_off_slot(struct slot * slot) 719static int hpc_power_off_slot(struct slot * slot)
809{ 720{
810 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 721 struct controller *ctrl = slot->ctrl;
811 u16 slot_cmd; 722 u16 slot_cmd;
812 u16 slot_ctrl; 723 u16 slot_ctrl;
813
814 int retval = 0; 724 int retval = 0;
815 725
816 DBG_ENTER_ROUTINE 726 DBG_ENTER_ROUTINE
817 727
818 if (!php_ctlr) {
819 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
820 return -1;
821 }
822
823 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot); 728 dbg("%s: slot->hp_slot %x\n", __FUNCTION__, slot->hp_slot);
824 slot->hp_slot = 0;
825 if (slot->hp_slot >= php_ctlr->num_slots) {
826 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
827 return -1;
828 }
829 retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
830 729
730 retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
831 if (retval) { 731 if (retval) {
832 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 732 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
833 return retval; 733 return retval;
834 } 734 }
835 735
@@ -854,47 +754,25 @@ static int hpc_power_off_slot(struct slot * slot)
854 err("%s: Write command failed!\n", __FUNCTION__); 754 err("%s: Write command failed!\n", __FUNCTION__);
855 return -1; 755 return -1;
856 } 756 }
857 dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd); 757 dbg("%s: SLOTCTRL %x write cmd %x\n",
758 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
858 759
859 DBG_LEAVE_ROUTINE 760 DBG_LEAVE_ROUTINE
860 761
861 return retval; 762 return retval;
862} 763}
863 764
864static irqreturn_t pcie_isr(int IRQ, void *dev_id) 765static irqreturn_t pcie_isr(int irq, void *dev_id)
865{ 766{
866 struct controller *ctrl = NULL; 767 struct controller *ctrl = (struct controller *)dev_id;
867 struct php_ctlr_state_s *php_ctlr;
868 u8 schedule_flag = 0;
869 u16 slot_status, intr_detect, intr_loc; 768 u16 slot_status, intr_detect, intr_loc;
870 u16 temp_word; 769 u16 temp_word;
871 int hp_slot = 0; /* only 1 slot per PCI Express port */ 770 int hp_slot = 0; /* only 1 slot per PCI Express port */
872 int rc = 0; 771 int rc = 0;
873 772
874 if (!dev_id) 773 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
875 return IRQ_NONE;
876
877 if (!pciehp_poll_mode) {
878 ctrl = dev_id;
879 php_ctlr = ctrl->hpc_ctlr_handle;
880 } else {
881 php_ctlr = dev_id;
882 ctrl = (struct controller *)php_ctlr->callback_instance_id;
883 }
884
885 if (!ctrl) {
886 dbg("%s: dev_id %p ctlr == NULL\n", __FUNCTION__, (void*) dev_id);
887 return IRQ_NONE;
888 }
889
890 if (!php_ctlr) {
891 dbg("%s: php_ctlr == NULL\n", __FUNCTION__);
892 return IRQ_NONE;
893 }
894
895 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status);
896 if (rc) { 774 if (rc) {
897 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 775 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
898 return IRQ_NONE; 776 return IRQ_NONE;
899 } 777 }
900 778
@@ -910,33 +788,38 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id)
910 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); 788 dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc);
911 /* Mask Hot-plug Interrupt Enable */ 789 /* Mask Hot-plug Interrupt Enable */
912 if (!pciehp_poll_mode) { 790 if (!pciehp_poll_mode) {
913 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word); 791 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
914 if (rc) { 792 if (rc) {
915 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 793 err("%s: Cannot read SLOT_CTRL register\n",
794 __FUNCTION__);
916 return IRQ_NONE; 795 return IRQ_NONE;
917 } 796 }
918 797
919 dbg("%s: hp_register_read_word SLOT_CTRL with value %x\n", __FUNCTION__, temp_word); 798 dbg("%s: pciehp_readw(SLOTCTRL) with value %x\n",
799 __FUNCTION__, temp_word);
920 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00; 800 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00;
921 801 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
922 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word);
923 if (rc) { 802 if (rc) {
924 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 803 err("%s: Cannot write to SLOTCTRL register\n",
804 __FUNCTION__);
925 return IRQ_NONE; 805 return IRQ_NONE;
926 } 806 }
927 807
928 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 808 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
929 if (rc) { 809 if (rc) {
930 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 810 err("%s: Cannot read SLOT_STATUS register\n",
811 __FUNCTION__);
931 return IRQ_NONE; 812 return IRQ_NONE;
932 } 813 }
933 dbg("%s: hp_register_read_word SLOT_STATUS with value %x\n", __FUNCTION__, slot_status); 814 dbg("%s: pciehp_readw(SLOTSTATUS) with value %x\n",
815 __FUNCTION__, slot_status);
934 816
935 /* Clear command complete interrupt caused by this write */ 817 /* Clear command complete interrupt caused by this write */
936 temp_word = 0x1f; 818 temp_word = 0x1f;
937 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 819 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
938 if (rc) { 820 if (rc) {
939 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 821 err("%s: Cannot write to SLOTSTATUS register\n",
822 __FUNCTION__);
940 return IRQ_NONE; 823 return IRQ_NONE;
941 } 824 }
942 } 825 }
@@ -945,60 +828,65 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id)
945 /* 828 /*
946 * Command Complete Interrupt Pending 829 * Command Complete Interrupt Pending
947 */ 830 */
831 ctrl->cmd_busy = 0;
948 wake_up_interruptible(&ctrl->queue); 832 wake_up_interruptible(&ctrl->queue);
949 } 833 }
950 834
951 if ((php_ctlr->switch_change_callback) && (intr_loc & MRL_SENS_CHANGED)) 835 if (intr_loc & MRL_SENS_CHANGED)
952 schedule_flag += php_ctlr->switch_change_callback( 836 pciehp_handle_switch_change(hp_slot, ctrl);
953 hp_slot, php_ctlr->callback_instance_id); 837
954 if ((php_ctlr->attention_button_callback) && (intr_loc & ATTN_BUTTN_PRESSED)) 838 if (intr_loc & ATTN_BUTTN_PRESSED)
955 schedule_flag += php_ctlr->attention_button_callback( 839 pciehp_handle_attention_button(hp_slot, ctrl);
956 hp_slot, php_ctlr->callback_instance_id); 840
957 if ((php_ctlr->presence_change_callback) && (intr_loc & PRSN_DETECT_CHANGED)) 841 if (intr_loc & PRSN_DETECT_CHANGED)
958 schedule_flag += php_ctlr->presence_change_callback( 842 pciehp_handle_presence_change(hp_slot, ctrl);
959 hp_slot , php_ctlr->callback_instance_id); 843
960 if ((php_ctlr->power_fault_callback) && (intr_loc & PWR_FAULT_DETECTED)) 844 if (intr_loc & PWR_FAULT_DETECTED)
961 schedule_flag += php_ctlr->power_fault_callback( 845 pciehp_handle_power_fault(hp_slot, ctrl);
962 hp_slot, php_ctlr->callback_instance_id);
963 846
964 /* Clear all events after serving them */ 847 /* Clear all events after serving them */
965 temp_word = 0x1F; 848 temp_word = 0x1F;
966 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 849 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
967 if (rc) { 850 if (rc) {
968 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 851 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
969 return IRQ_NONE; 852 return IRQ_NONE;
970 } 853 }
971 /* Unmask Hot-plug Interrupt Enable */ 854 /* Unmask Hot-plug Interrupt Enable */
972 if (!pciehp_poll_mode) { 855 if (!pciehp_poll_mode) {
973 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word); 856 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
974 if (rc) { 857 if (rc) {
975 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 858 err("%s: Cannot read SLOTCTRL register\n",
859 __FUNCTION__);
976 return IRQ_NONE; 860 return IRQ_NONE;
977 } 861 }
978 862
979 dbg("%s: Unmask Hot-plug Interrupt Enable\n", __FUNCTION__); 863 dbg("%s: Unmask Hot-plug Interrupt Enable\n", __FUNCTION__);
980 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE; 864 temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
981 865
982 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word); 866 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
983 if (rc) { 867 if (rc) {
984 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 868 err("%s: Cannot write to SLOTCTRL register\n",
869 __FUNCTION__);
985 return IRQ_NONE; 870 return IRQ_NONE;
986 } 871 }
987 872
988 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 873 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
989 if (rc) { 874 if (rc) {
990 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 875 err("%s: Cannot read SLOT_STATUS register\n",
876 __FUNCTION__);
991 return IRQ_NONE; 877 return IRQ_NONE;
992 } 878 }
993 879
994 /* Clear command complete interrupt caused by this write */ 880 /* Clear command complete interrupt caused by this write */
995 temp_word = 0x1F; 881 temp_word = 0x1F;
996 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 882 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
997 if (rc) { 883 if (rc) {
998 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 884 err("%s: Cannot write to SLOTSTATUS failed\n",
885 __FUNCTION__);
999 return IRQ_NONE; 886 return IRQ_NONE;
1000 } 887 }
1001 dbg("%s: hp_register_write_word SLOT_STATUS with value %x\n", __FUNCTION__, temp_word); 888 dbg("%s: pciehp_writew(SLOTSTATUS) with value %x\n",
889 __FUNCTION__, temp_word);
1002 } 890 }
1003 891
1004 return IRQ_HANDLED; 892 return IRQ_HANDLED;
@@ -1006,27 +894,16 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id)
1006 894
1007static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value) 895static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1008{ 896{
1009 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 897 struct controller *ctrl = slot->ctrl;
1010 enum pcie_link_speed lnk_speed; 898 enum pcie_link_speed lnk_speed;
1011 u32 lnk_cap; 899 u32 lnk_cap;
1012 int retval = 0; 900 int retval = 0;
1013 901
1014 DBG_ENTER_ROUTINE 902 DBG_ENTER_ROUTINE
1015 903
1016 if (!php_ctlr) { 904 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
1017 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1018 return -1;
1019 }
1020
1021 if (slot->hp_slot >= php_ctlr->num_slots) {
1022 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1023 return -1;
1024 }
1025
1026 retval = hp_register_read_dword(php_ctlr->pci_dev, LNK_CAP(slot->ctrl->cap_base), lnk_cap);
1027
1028 if (retval) { 905 if (retval) {
1029 err("%s : hp_register_read_dword LNK_CAP failed\n", __FUNCTION__); 906 err("%s: Cannot read LNKCAP register\n", __FUNCTION__);
1030 return retval; 907 return retval;
1031 } 908 }
1032 909
@@ -1047,27 +924,16 @@ static int hpc_get_max_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1047 924
1048static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value) 925static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value)
1049{ 926{
1050 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 927 struct controller *ctrl = slot->ctrl;
1051 enum pcie_link_width lnk_wdth; 928 enum pcie_link_width lnk_wdth;
1052 u32 lnk_cap; 929 u32 lnk_cap;
1053 int retval = 0; 930 int retval = 0;
1054 931
1055 DBG_ENTER_ROUTINE 932 DBG_ENTER_ROUTINE
1056 933
1057 if (!php_ctlr) { 934 retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap);
1058 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1059 return -1;
1060 }
1061
1062 if (slot->hp_slot >= php_ctlr->num_slots) {
1063 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1064 return -1;
1065 }
1066
1067 retval = hp_register_read_dword(php_ctlr->pci_dev, LNK_CAP(slot->ctrl->cap_base), lnk_cap);
1068
1069 if (retval) { 935 if (retval) {
1070 err("%s : hp_register_read_dword LNK_CAP failed\n", __FUNCTION__); 936 err("%s: Cannot read LNKCAP register\n", __FUNCTION__);
1071 return retval; 937 return retval;
1072 } 938 }
1073 939
@@ -1109,27 +975,16 @@ static int hpc_get_max_lnk_width (struct slot *slot, enum pcie_link_width *value
1109 975
1110static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value) 976static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1111{ 977{
1112 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 978 struct controller *ctrl = slot->ctrl;
1113 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN; 979 enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
1114 int retval = 0; 980 int retval = 0;
1115 u16 lnk_status; 981 u16 lnk_status;
1116 982
1117 DBG_ENTER_ROUTINE 983 DBG_ENTER_ROUTINE
1118 984
1119 if (!php_ctlr) { 985 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
1120 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1121 return -1;
1122 }
1123
1124 if (slot->hp_slot >= php_ctlr->num_slots) {
1125 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1126 return -1;
1127 }
1128
1129 retval = hp_register_read_word(php_ctlr->pci_dev, LNK_STATUS(slot->ctrl->cap_base), lnk_status);
1130
1131 if (retval) { 986 if (retval) {
1132 err("%s : hp_register_read_word LNK_STATUS failed\n", __FUNCTION__); 987 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
1133 return retval; 988 return retval;
1134 } 989 }
1135 990
@@ -1150,27 +1005,16 @@ static int hpc_get_cur_lnk_speed (struct slot *slot, enum pci_bus_speed *value)
1150 1005
1151static int hpc_get_cur_lnk_width (struct slot *slot, enum pcie_link_width *value) 1006static int hpc_get_cur_lnk_width (struct slot *slot, enum pcie_link_width *value)
1152{ 1007{
1153 struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle; 1008 struct controller *ctrl = slot->ctrl;
1154 enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN; 1009 enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
1155 int retval = 0; 1010 int retval = 0;
1156 u16 lnk_status; 1011 u16 lnk_status;
1157 1012
1158 DBG_ENTER_ROUTINE 1013 DBG_ENTER_ROUTINE
1159 1014
1160 if (!php_ctlr) { 1015 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
1161 err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
1162 return -1;
1163 }
1164
1165 if (slot->hp_slot >= php_ctlr->num_slots) {
1166 err("%s: Invalid HPC slot number!\n", __FUNCTION__);
1167 return -1;
1168 }
1169
1170 retval = hp_register_read_word(php_ctlr->pci_dev, LNK_STATUS(slot->ctrl->cap_base), lnk_status);
1171
1172 if (retval) { 1016 if (retval) {
1173 err("%s : hp_register_read_word LNK_STATUS failed\n", __FUNCTION__); 1017 err("%s: Cannot read LNKSTATUS register\n", __FUNCTION__);
1174 return retval; 1018 return retval;
1175 } 1019 }
1176 1020
@@ -1218,6 +1062,8 @@ static struct hpc_ops pciehp_hpc_ops = {
1218 .get_attention_status = hpc_get_attention_status, 1062 .get_attention_status = hpc_get_attention_status,
1219 .get_latch_status = hpc_get_latch_status, 1063 .get_latch_status = hpc_get_latch_status,
1220 .get_adapter_status = hpc_get_adapter_status, 1064 .get_adapter_status = hpc_get_adapter_status,
1065 .get_emi_status = hpc_get_emi_status,
1066 .toggle_emi = hpc_toggle_emi,
1221 1067
1222 .get_max_bus_speed = hpc_get_max_lnk_speed, 1068 .get_max_bus_speed = hpc_get_max_lnk_speed,
1223 .get_cur_bus_speed = hpc_get_cur_lnk_speed, 1069 .get_cur_bus_speed = hpc_get_cur_lnk_speed,
@@ -1305,38 +1151,24 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
1305 1151
1306int pcie_init(struct controller * ctrl, struct pcie_device *dev) 1152int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1307{ 1153{
1308 struct php_ctlr_state_s *php_ctlr, *p;
1309 void *instance_id = ctrl;
1310 int rc; 1154 int rc;
1311 static int first = 1; 1155 static int first = 1;
1312 u16 temp_word; 1156 u16 temp_word;
1313 u16 cap_reg; 1157 u16 cap_reg;
1314 u16 intr_enable = 0; 1158 u16 intr_enable = 0;
1315 u32 slot_cap; 1159 u32 slot_cap;
1316 int cap_base, saved_cap_base; 1160 int cap_base;
1317 u16 slot_status, slot_ctrl; 1161 u16 slot_status, slot_ctrl;
1318 struct pci_dev *pdev; 1162 struct pci_dev *pdev;
1319 1163
1320 DBG_ENTER_ROUTINE 1164 DBG_ENTER_ROUTINE
1321 1165
1322 spin_lock_init(&list_lock);
1323 php_ctlr = kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL);
1324
1325 if (!php_ctlr) { /* allocate controller state data */
1326 err("%s: HPC controller memory allocation error!\n", __FUNCTION__);
1327 goto abort;
1328 }
1329
1330 memset(php_ctlr, 0, sizeof(struct php_ctlr_state_s));
1331
1332 pdev = dev->port; 1166 pdev = dev->port;
1333 php_ctlr->pci_dev = pdev; /* save pci_dev in context */ 1167 ctrl->pci_dev = pdev; /* save pci_dev in context */
1334 1168
1335 dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n", 1169 dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n",
1336 __FUNCTION__, pdev->vendor, pdev->device); 1170 __FUNCTION__, pdev->vendor, pdev->device);
1337 1171
1338 saved_cap_base = pcie_cap_base;
1339
1340 if ((cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP)) == 0) { 1172 if ((cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP)) == 0) {
1341 dbg("%s: Can't find PCI_CAP_ID_EXP (0x10)\n", __FUNCTION__); 1173 dbg("%s: Can't find PCI_CAP_ID_EXP (0x10)\n", __FUNCTION__);
1342 goto abort_free_ctlr; 1174 goto abort_free_ctlr;
@@ -1344,14 +1176,15 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1344 1176
1345 ctrl->cap_base = cap_base; 1177 ctrl->cap_base = cap_base;
1346 1178
1347 dbg("%s: pcie_cap_base %x\n", __FUNCTION__, pcie_cap_base); 1179 dbg("%s: pcie_cap_base %x\n", __FUNCTION__, cap_base);
1348 1180
1349 rc = hp_register_read_word(pdev, CAP_REG(ctrl->cap_base), cap_reg); 1181 rc = pciehp_readw(ctrl, CAPREG, &cap_reg);
1350 if (rc) { 1182 if (rc) {
1351 err("%s : hp_register_read_word CAP_REG failed\n", __FUNCTION__); 1183 err("%s: Cannot read CAPREG register\n", __FUNCTION__);
1352 goto abort_free_ctlr; 1184 goto abort_free_ctlr;
1353 } 1185 }
1354 dbg("%s: CAP_REG offset %x cap_reg %x\n", __FUNCTION__, CAP_REG(ctrl->cap_base), cap_reg); 1186 dbg("%s: CAPREG offset %x cap_reg %x\n",
1187 __FUNCTION__, ctrl->cap_base + CAPREG, cap_reg);
1355 1188
1356 if (((cap_reg & SLOT_IMPL) == 0) || (((cap_reg & DEV_PORT_TYPE) != 0x0040) 1189 if (((cap_reg & SLOT_IMPL) == 0) || (((cap_reg & DEV_PORT_TYPE) != 0x0040)
1357 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) { 1190 && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) {
@@ -1359,31 +1192,34 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1359 goto abort_free_ctlr; 1192 goto abort_free_ctlr;
1360 } 1193 }
1361 1194
1362 rc = hp_register_read_dword(php_ctlr->pci_dev, SLOT_CAP(ctrl->cap_base), slot_cap); 1195 rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
1363 if (rc) { 1196 if (rc) {
1364 err("%s : hp_register_read_word CAP_REG failed\n", __FUNCTION__); 1197 err("%s: Cannot read SLOTCAP register\n", __FUNCTION__);
1365 goto abort_free_ctlr; 1198 goto abort_free_ctlr;
1366 } 1199 }
1367 dbg("%s: SLOT_CAP offset %x slot_cap %x\n", __FUNCTION__, SLOT_CAP(ctrl->cap_base), slot_cap); 1200 dbg("%s: SLOTCAP offset %x slot_cap %x\n",
1201 __FUNCTION__, ctrl->cap_base + SLOTCAP, slot_cap);
1368 1202
1369 if (!(slot_cap & HP_CAP)) { 1203 if (!(slot_cap & HP_CAP)) {
1370 dbg("%s : This slot is not hot-plug capable\n", __FUNCTION__); 1204 dbg("%s : This slot is not hot-plug capable\n", __FUNCTION__);
1371 goto abort_free_ctlr; 1205 goto abort_free_ctlr;
1372 } 1206 }
1373 /* For debugging purpose */ 1207 /* For debugging purpose */
1374 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 1208 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1375 if (rc) { 1209 if (rc) {
1376 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 1210 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1377 goto abort_free_ctlr; 1211 goto abort_free_ctlr;
1378 } 1212 }
1379 dbg("%s: SLOT_STATUS offset %x slot_status %x\n", __FUNCTION__, SLOT_STATUS(ctrl->cap_base), slot_status); 1213 dbg("%s: SLOTSTATUS offset %x slot_status %x\n",
1214 __FUNCTION__, ctrl->cap_base + SLOTSTATUS, slot_status);
1380 1215
1381 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), slot_ctrl); 1216 rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
1382 if (rc) { 1217 if (rc) {
1383 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 1218 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
1384 goto abort_free_ctlr; 1219 goto abort_free_ctlr;
1385 } 1220 }
1386 dbg("%s: SLOT_CTRL offset %x slot_ctrl %x\n", __FUNCTION__, SLOT_CTRL(ctrl->cap_base), slot_ctrl); 1221 dbg("%s: SLOTCTRL offset %x slot_ctrl %x\n",
1222 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
1387 1223
1388 if (first) { 1224 if (first) {
1389 spin_lock_init(&hpc_event_lock); 1225 spin_lock_init(&hpc_event_lock);
@@ -1405,69 +1241,64 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1405 /* setup wait queue */ 1241 /* setup wait queue */
1406 init_waitqueue_head(&ctrl->queue); 1242 init_waitqueue_head(&ctrl->queue);
1407 1243
1408 /* find the IRQ */
1409 php_ctlr->irq = dev->irq;
1410
1411 /* Save interrupt callback info */
1412 php_ctlr->attention_button_callback = pciehp_handle_attention_button;
1413 php_ctlr->switch_change_callback = pciehp_handle_switch_change;
1414 php_ctlr->presence_change_callback = pciehp_handle_presence_change;
1415 php_ctlr->power_fault_callback = pciehp_handle_power_fault;
1416 php_ctlr->callback_instance_id = instance_id;
1417
1418 /* return PCI Controller Info */ 1244 /* return PCI Controller Info */
1419 php_ctlr->slot_device_offset = 0; 1245 ctrl->slot_device_offset = 0;
1420 php_ctlr->num_slots = 1; 1246 ctrl->num_slots = 1;
1247 ctrl->first_slot = slot_cap >> 19;
1248 ctrl->ctrlcap = slot_cap & 0x0000007f;
1421 1249
1422 /* Mask Hot-plug Interrupt Enable */ 1250 /* Mask Hot-plug Interrupt Enable */
1423 rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1251 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1424 if (rc) { 1252 if (rc) {
1425 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 1253 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
1426 goto abort_free_ctlr; 1254 goto abort_free_ctlr;
1427 } 1255 }
1428 1256
1429 dbg("%s: SLOT_CTRL %x value read %x\n", __FUNCTION__, SLOT_CTRL(ctrl->cap_base), temp_word); 1257 dbg("%s: SLOTCTRL %x value read %x\n",
1258 __FUNCTION__, ctrl->cap_base + SLOTCTRL, temp_word);
1430 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00; 1259 temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00;
1431 1260
1432 rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1261 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1433 if (rc) { 1262 if (rc) {
1434 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 1263 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
1435 goto abort_free_ctlr; 1264 goto abort_free_ctlr;
1436 } 1265 }
1437 1266
1438 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 1267 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1439 if (rc) { 1268 if (rc) {
1440 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 1269 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1441 goto abort_free_ctlr; 1270 goto abort_free_ctlr;
1442 } 1271 }
1443 1272
1444 temp_word = 0x1F; /* Clear all events */ 1273 temp_word = 0x1F; /* Clear all events */
1445 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 1274 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1446 if (rc) { 1275 if (rc) {
1447 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 1276 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
1448 goto abort_free_ctlr; 1277 goto abort_free_ctlr;
1449 } 1278 }
1450 1279
1451 if (pciehp_poll_mode) {/* Install interrupt polling code */ 1280 if (pciehp_poll_mode) {
1452 /* Install and start the interrupt polling timer */ 1281 /* Install interrupt polling timer. Start with 10 sec delay */
1453 init_timer(&php_ctlr->int_poll_timer); 1282 init_timer(&ctrl->poll_timer);
1454 start_int_poll_timer( php_ctlr, 10 ); /* start with 10 second delay */ 1283 start_int_poll_timer(ctrl, 10);
1455 } else { 1284 } else {
1456 /* Installs the interrupt handler */ 1285 /* Installs the interrupt handler */
1457 rc = request_irq(php_ctlr->irq, pcie_isr, IRQF_SHARED, MY_NAME, (void *) ctrl); 1286 rc = request_irq(ctrl->pci_dev->irq, pcie_isr, IRQF_SHARED,
1458 dbg("%s: request_irq %d for hpc%d (returns %d)\n", __FUNCTION__, php_ctlr->irq, ctlr_seq_num, rc); 1287 MY_NAME, (void *)ctrl);
1288 dbg("%s: request_irq %d for hpc%d (returns %d)\n",
1289 __FUNCTION__, ctrl->pci_dev->irq, ctlr_seq_num, rc);
1459 if (rc) { 1290 if (rc) {
1460 err("Can't get irq %d for the hotplug controller\n", php_ctlr->irq); 1291 err("Can't get irq %d for the hotplug controller\n",
1292 ctrl->pci_dev->irq);
1461 goto abort_free_ctlr; 1293 goto abort_free_ctlr;
1462 } 1294 }
1463 } 1295 }
1464
1465 dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number, 1296 dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number,
1466 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq); 1297 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq);
1467 1298
1468 rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1299 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1469 if (rc) { 1300 if (rc) {
1470 err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__); 1301 err("%s: Cannot read SLOTCTRL register\n", __FUNCTION__);
1471 goto abort_free_irq; 1302 goto abort_free_irq;
1472 } 1303 }
1473 1304
@@ -1491,21 +1322,21 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1491 } 1322 }
1492 1323
1493 /* Unmask Hot-plug Interrupt Enable for the interrupt notification mechanism case */ 1324 /* Unmask Hot-plug Interrupt Enable for the interrupt notification mechanism case */
1494 rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1325 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1495 if (rc) { 1326 if (rc) {
1496 err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__); 1327 err("%s: Cannot write to SLOTCTRL register\n", __FUNCTION__);
1497 goto abort_free_irq; 1328 goto abort_free_irq;
1498 } 1329 }
1499 rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status); 1330 rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
1500 if (rc) { 1331 if (rc) {
1501 err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__); 1332 err("%s: Cannot read SLOTSTATUS register\n", __FUNCTION__);
1502 goto abort_disable_intr; 1333 goto abort_disable_intr;
1503 } 1334 }
1504 1335
1505 temp_word = 0x1F; /* Clear all events */ 1336 temp_word = 0x1F; /* Clear all events */
1506 rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word); 1337 rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
1507 if (rc) { 1338 if (rc) {
1508 err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__); 1339 err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
1509 goto abort_disable_intr; 1340 goto abort_disable_intr;
1510 } 1341 }
1511 1342
@@ -1518,24 +1349,7 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1518 goto abort_disable_intr; 1349 goto abort_disable_intr;
1519 } 1350 }
1520 1351
1521 /* Add this HPC instance into the HPC list */
1522 spin_lock(&list_lock);
1523 if (php_ctlr_list_head == 0) {
1524 php_ctlr_list_head = php_ctlr;
1525 p = php_ctlr_list_head;
1526 p->pnext = NULL;
1527 } else {
1528 p = php_ctlr_list_head;
1529
1530 while (p->pnext)
1531 p = p->pnext;
1532
1533 p->pnext = php_ctlr;
1534 }
1535 spin_unlock(&list_lock);
1536
1537 ctlr_seq_num++; 1352 ctlr_seq_num++;
1538 ctrl->hpc_ctlr_handle = php_ctlr;
1539 ctrl->hpc_ops = &pciehp_hpc_ops; 1353 ctrl->hpc_ops = &pciehp_hpc_ops;
1540 1354
1541 DBG_LEAVE_ROUTINE 1355 DBG_LEAVE_ROUTINE
@@ -1543,24 +1357,21 @@ int pcie_init(struct controller * ctrl, struct pcie_device *dev)
1543 1357
1544 /* We end up here for the many possible ways to fail this API. */ 1358 /* We end up here for the many possible ways to fail this API. */
1545abort_disable_intr: 1359abort_disable_intr:
1546 rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1360 rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
1547 if (!rc) { 1361 if (!rc) {
1548 temp_word &= ~(intr_enable | HP_INTR_ENABLE); 1362 temp_word &= ~(intr_enable | HP_INTR_ENABLE);
1549 rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word); 1363 rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
1550 } 1364 }
1551 if (rc) 1365 if (rc)
1552 err("%s : disabling interrupts failed\n", __FUNCTION__); 1366 err("%s : disabling interrupts failed\n", __FUNCTION__);
1553 1367
1554abort_free_irq: 1368abort_free_irq:
1555 if (pciehp_poll_mode) 1369 if (pciehp_poll_mode)
1556 del_timer_sync(&php_ctlr->int_poll_timer); 1370 del_timer_sync(&ctrl->poll_timer);
1557 else 1371 else
1558 free_irq(php_ctlr->irq, ctrl); 1372 free_irq(ctrl->pci_dev->irq, ctrl);
1559 1373
1560abort_free_ctlr: 1374abort_free_ctlr:
1561 pcie_cap_base = saved_cap_base;
1562 kfree(php_ctlr);
1563abort:
1564 DBG_LEAVE_ROUTINE 1375 DBG_LEAVE_ROUTINE
1565 return -1; 1376 return -1;
1566} 1377}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 5d188c558386..78cf0711d1fa 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -28,6 +28,8 @@
28#include <asm/sn/sn_feature_sets.h> 28#include <asm/sn/sn_feature_sets.h>
29#include <asm/sn/sn_sal.h> 29#include <asm/sn/sn_sal.h>
30#include <asm/sn/types.h> 30#include <asm/sn/types.h>
31#include <linux/acpi.h>
32#include <asm/sn/acpi.h>
31 33
32#include "../pci.h" 34#include "../pci.h"
33 35
@@ -35,14 +37,17 @@ MODULE_LICENSE("GPL");
35MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)"); 37MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
36MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver"); 38MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
37 39
38#define PCIIO_ASIC_TYPE_TIOCA 4 40
41/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
39#define PCI_SLOT_ALREADY_UP 2 /* slot already up */ 42#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
40#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */ 43#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
41#define PCI_L1_ERR 7 /* L1 console command error */ 44#define PCI_L1_ERR 7 /* L1 console command error */
42#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */ 45#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
46
47
48#define PCIIO_ASIC_TYPE_TIOCA 4
43#define PCI_L1_QSIZE 128 /* our L1 message buffer size */ 49#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
44#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */ 50#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
45#define SGI_HOTPLUG_PROM_REV 0x0430 /* Min. required PROM version */
46#define SN_SLOT_NAME_SIZE 33 /* size of name string */ 51#define SN_SLOT_NAME_SIZE 33 /* size of name string */
47 52
48/* internal list head */ 53/* internal list head */
@@ -227,7 +232,7 @@ static void sn_bus_free_data(struct pci_dev *dev)
227} 232}
228 233
229static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot, 234static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
230 int device_num) 235 int device_num, char **ssdt)
231{ 236{
232 struct slot *slot = bss_hotplug_slot->private; 237 struct slot *slot = bss_hotplug_slot->private;
233 struct pcibus_info *pcibus_info; 238 struct pcibus_info *pcibus_info;
@@ -240,7 +245,8 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
240 * Power-on and initialize the slot in the SN 245 * Power-on and initialize the slot in the SN
241 * PCI infrastructure. 246 * PCI infrastructure.
242 */ 247 */
243 rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp); 248 rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
249
244 250
245 if (rc == PCI_SLOT_ALREADY_UP) { 251 if (rc == PCI_SLOT_ALREADY_UP) {
246 dev_dbg(slot->pci_bus->self, "is already active\n"); 252 dev_dbg(slot->pci_bus->self, "is already active\n");
@@ -335,6 +341,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
335 int func, num_funcs; 341 int func, num_funcs;
336 int new_ppb = 0; 342 int new_ppb = 0;
337 int rc; 343 int rc;
344 char *ssdt = NULL;
338 void pcibios_fixup_device_resources(struct pci_dev *); 345 void pcibios_fixup_device_resources(struct pci_dev *);
339 346
340 /* Serialize the Linux PCI infrastructure */ 347 /* Serialize the Linux PCI infrastructure */
@@ -342,14 +349,29 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
342 349
343 /* 350 /*
344 * Power-on and initialize the slot in the SN 351 * Power-on and initialize the slot in the SN
345 * PCI infrastructure. 352 * PCI infrastructure. Also, retrieve the ACPI SSDT
353 * table for the slot (if ACPI capable PROM).
346 */ 354 */
347 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); 355 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
348 if (rc) { 356 if (rc) {
349 mutex_unlock(&sn_hotplug_mutex); 357 mutex_unlock(&sn_hotplug_mutex);
350 return rc; 358 return rc;
351 } 359 }
352 360
361 if (ssdt)
362 ssdt = __va(ssdt);
363 /* Add the new SSDT for the slot to the ACPI namespace */
364 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
365 acpi_status ret;
366
367 ret = acpi_load_table((struct acpi_table_header *)ssdt);
368 if (ACPI_FAILURE(ret)) {
369 printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
370 __FUNCTION__, ret);
371 /* try to continue on */
372 }
373 }
374
353 num_funcs = pci_scan_slot(slot->pci_bus, 375 num_funcs = pci_scan_slot(slot->pci_bus,
354 PCI_DEVFN(slot->device_num + 1, 0)); 376 PCI_DEVFN(slot->device_num + 1, 0));
355 if (!num_funcs) { 377 if (!num_funcs) {
@@ -374,7 +396,10 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
374 * pdi_host_pcidev_info). 396 * pdi_host_pcidev_info).
375 */ 397 */
376 pcibios_fixup_device_resources(dev); 398 pcibios_fixup_device_resources(dev);
377 sn_pci_fixup_slot(dev); 399 if (SN_ACPI_BASE_SUPPORT())
400 sn_acpi_slot_fixup(dev);
401 else
402 sn_io_slot_fixup(dev);
378 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 403 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
379 unsigned char sec_bus; 404 unsigned char sec_bus;
380 pci_read_config_byte(dev, PCI_SECONDARY_BUS, 405 pci_read_config_byte(dev, PCI_SECONDARY_BUS,
@@ -388,6 +413,63 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
388 } 413 }
389 } 414 }
390 415
416 /*
417 * Add the slot's devices to the ACPI infrastructure */
418 if (SN_ACPI_BASE_SUPPORT() && ssdt) {
419 unsigned long adr;
420 struct acpi_device *pdevice;
421 struct acpi_device *device;
422 acpi_handle phandle;
423 acpi_handle chandle = NULL;
424 acpi_handle rethandle;
425 acpi_status ret;
426
427 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
428
429 if (acpi_bus_get_device(phandle, &pdevice)) {
430 dev_dbg(slot->pci_bus->self,
431 "no parent device, assuming NULL\n");
432 pdevice = NULL;
433 }
434
435 /*
436 * Walk the rootbus node's immediate children looking for
437 * the slot's device node(s). There can be more than
438 * one for multifunction devices.
439 */
440 for (;;) {
441 rethandle = NULL;
442 ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
443 phandle, chandle,
444 &rethandle);
445
446 if (ret == AE_NOT_FOUND || rethandle == NULL)
447 break;
448
449 chandle = rethandle;
450
451 ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
452 NULL, &adr);
453
454 if (ACPI_SUCCESS(ret) &&
455 (adr>>16) == (slot->device_num + 1)) {
456
457 ret = acpi_bus_add(&device, pdevice, chandle,
458 ACPI_BUS_TYPE_DEVICE);
459 if (ACPI_FAILURE(ret)) {
460 printk(KERN_ERR "%s: acpi_bus_add "
461 "failed (0x%x) for slot %d "
462 "func %d\n", __FUNCTION__,
463 ret, (int)(adr>>16),
464 (int)(adr&0xffff));
465 /* try to continue on */
466 } else {
467 acpi_bus_start(device);
468 }
469 }
470 }
471 }
472
391 /* Call the driver for the new device */ 473 /* Call the driver for the new device */
392 pci_bus_add_devices(slot->pci_bus); 474 pci_bus_add_devices(slot->pci_bus);
393 /* Call the drivers for the new devices subordinate to PPB */ 475 /* Call the drivers for the new devices subordinate to PPB */
@@ -412,6 +494,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
412 struct pci_dev *dev; 494 struct pci_dev *dev;
413 int func; 495 int func;
414 int rc; 496 int rc;
497 acpi_owner_id ssdt_id = 0;
415 498
416 /* Acquire update access to the bus */ 499 /* Acquire update access to the bus */
417 mutex_lock(&sn_hotplug_mutex); 500 mutex_lock(&sn_hotplug_mutex);
@@ -422,6 +505,52 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
422 if (rc) 505 if (rc)
423 goto leaving; 506 goto leaving;
424 507
508 /* free the ACPI resources for the slot */
509 if (SN_ACPI_BASE_SUPPORT() &&
510 PCI_CONTROLLER(slot->pci_bus)->acpi_handle) {
511 unsigned long adr;
512 struct acpi_device *device;
513 acpi_handle phandle;
514 acpi_handle chandle = NULL;
515 acpi_handle rethandle;
516 acpi_status ret;
517
518 /* Get the rootbus node pointer */
519 phandle = PCI_CONTROLLER(slot->pci_bus)->acpi_handle;
520
521 /*
522 * Walk the rootbus node's immediate children looking for
523 * the slot's device node(s). There can be more than
524 * one for multifunction devices.
525 */
526 for (;;) {
527 rethandle = NULL;
528 ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
529 phandle, chandle,
530 &rethandle);
531
532 if (ret == AE_NOT_FOUND || rethandle == NULL)
533 break;
534
535 chandle = rethandle;
536
537 ret = acpi_evaluate_integer(chandle,
538 METHOD_NAME__ADR,
539 NULL, &adr);
540 if (ACPI_SUCCESS(ret) &&
541 (adr>>16) == (slot->device_num + 1)) {
542 /* retain the owner id */
543 acpi_get_id(chandle, &ssdt_id);
544
545 ret = acpi_bus_get_device(chandle,
546 &device);
547 if (ACPI_SUCCESS(ret))
548 acpi_bus_trim(device, 1);
549 }
550 }
551
552 }
553
425 /* Free the SN resources assigned to the Linux device.*/ 554 /* Free the SN resources assigned to the Linux device.*/
426 for (func = 0; func < 8; func++) { 555 for (func = 0; func < 8; func++) {
427 dev = pci_get_slot(slot->pci_bus, 556 dev = pci_get_slot(slot->pci_bus,
@@ -434,6 +563,18 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
434 } 563 }
435 } 564 }
436 565
566 /* Remove the SSDT for the slot from the ACPI namespace */
567 if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
568 acpi_status ret;
569 ret = acpi_unload_table_id(ssdt_id);
570 if (ACPI_FAILURE(ret)) {
571 printk(KERN_ERR "%s: acpi_unload_table_id "
572 "failed (0x%x) for id %d\n",
573 __FUNCTION__, ret, ssdt_id);
574 /* try to continue on */
575 }
576 }
577
437 /* free the collected sysdata pointers */ 578 /* free the collected sysdata pointers */
438 sn_bus_free_sysdata(); 579 sn_bus_free_sysdata();
439 580
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 3ca6a4f574b3..01d31a1f697c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -106,7 +106,7 @@ struct controller {
106}; 106};
107 107
108/* Define AMD SHPC ID */ 108/* Define AMD SHPC ID */
109#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450 109#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
110#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458 110#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
111 111
112/* AMD PCIX bridge registers */ 112/* AMD PCIX bridge registers */
@@ -221,7 +221,7 @@ enum ctrl_offsets {
221}; 221};
222 222
223static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot) 223static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot)
224{ 224{
225 return hotplug_slot->private; 225 return hotplug_slot->private;
226} 226}
227 227
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 590cd3cbe010..5f4bc08a633a 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -401,10 +401,6 @@ static int __init shpcd_init(void)
401{ 401{
402 int retval = 0; 402 int retval = 0;
403 403
404#ifdef CONFIG_HOTPLUG_PCI_SHPC_POLL_EVENT_MODE
405 shpchp_poll_mode = 1;
406#endif
407
408 retval = pci_register_driver(&shpc_driver); 404 retval = pci_register_driver(&shpc_driver);
409 dbg("%s: pci_register_driver = %d\n", __FUNCTION__, retval); 405 dbg("%s: pci_register_driver = %d\n", __FUNCTION__, retval);
410 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 406 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6bb84734cd6c..b746bd265bc6 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -64,7 +64,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
64 64
65 /* Attention Button Change */ 65 /* Attention Button Change */
66 dbg("shpchp: Attention button interrupt received.\n"); 66 dbg("shpchp: Attention button interrupt received.\n");
67 67
68 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 68 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
69 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 69 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
70 70
@@ -128,7 +128,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
128 128
129 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); 129 p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
130 130
131 /* 131 /*
132 * Save the presence state 132 * Save the presence state
133 */ 133 */
134 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); 134 p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
@@ -184,12 +184,12 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
184 return 1; 184 return 1;
185} 185}
186 186
187/* The following routines constitute the bulk of the 187/* The following routines constitute the bulk of the
188 hotplug controller logic 188 hotplug controller logic
189 */ 189 */
190static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, 190static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
191 enum pci_bus_speed speed) 191 enum pci_bus_speed speed)
192{ 192{
193 int rc = 0; 193 int rc = 0;
194 194
195 dbg("%s: change to speed %d\n", __FUNCTION__, speed); 195 dbg("%s: change to speed %d\n", __FUNCTION__, speed);
@@ -204,7 +204,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
204static int fix_bus_speed(struct controller *ctrl, struct slot *pslot, 204static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
205 u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp, 205 u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp,
206 enum pci_bus_speed msp) 206 enum pci_bus_speed msp)
207{ 207{
208 int rc = 0; 208 int rc = 0;
209 209
210 /* 210 /*
@@ -257,23 +257,23 @@ static int board_added(struct slot *p_slot)
257 err("%s: Failed to power on slot\n", __FUNCTION__); 257 err("%s: Failed to power on slot\n", __FUNCTION__);
258 return -1; 258 return -1;
259 } 259 }
260 260
261 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { 261 if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
262 if (slots_not_empty) 262 if (slots_not_empty)
263 return WRONG_BUS_FREQUENCY; 263 return WRONG_BUS_FREQUENCY;
264 264
265 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { 265 if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
266 err("%s: Issue of set bus speed mode command failed\n", __FUNCTION__); 266 err("%s: Issue of set bus speed mode command failed\n", __FUNCTION__);
267 return WRONG_BUS_FREQUENCY; 267 return WRONG_BUS_FREQUENCY;
268 } 268 }
269 269
270 /* turn on board, blink green LED, turn off Amber LED */ 270 /* turn on board, blink green LED, turn off Amber LED */
271 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { 271 if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
272 err("%s: Issue of Slot Enable command failed\n", __FUNCTION__); 272 err("%s: Issue of Slot Enable command failed\n", __FUNCTION__);
273 return rc; 273 return rc;
274 } 274 }
275 } 275 }
276 276
277 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); 277 rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
278 if (rc) { 278 if (rc) {
279 err("%s: Can't get adapter speed or bus mode mismatch\n", 279 err("%s: Can't get adapter speed or bus mode mismatch\n",
@@ -378,7 +378,7 @@ static int remove_board(struct slot *p_slot)
378 err("%s: Issue of Slot Disable command failed\n", __FUNCTION__); 378 err("%s: Issue of Slot Disable command failed\n", __FUNCTION__);
379 return rc; 379 return rc;
380 } 380 }
381 381
382 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); 382 rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
383 if (rc) { 383 if (rc) {
384 err("%s: Issue of Set Attention command failed\n", __FUNCTION__); 384 err("%s: Issue of Set Attention command failed\n", __FUNCTION__);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index b7bede4b7c27..5183a45d45b5 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -35,38 +35,6 @@
35 35
36#include "shpchp.h" 36#include "shpchp.h"
37 37
38#ifdef DEBUG
39#define DBG_K_TRACE_ENTRY ((unsigned int)0x00000001) /* On function entry */
40#define DBG_K_TRACE_EXIT ((unsigned int)0x00000002) /* On function exit */
41#define DBG_K_INFO ((unsigned int)0x00000004) /* Info messages */
42#define DBG_K_ERROR ((unsigned int)0x00000008) /* Error messages */
43#define DBG_K_TRACE (DBG_K_TRACE_ENTRY|DBG_K_TRACE_EXIT)
44#define DBG_K_STANDARD (DBG_K_INFO|DBG_K_ERROR|DBG_K_TRACE)
45/* Redefine this flagword to set debug level */
46#define DEBUG_LEVEL DBG_K_STANDARD
47
48#define DEFINE_DBG_BUFFER char __dbg_str_buf[256];
49
50#define DBG_PRINT( dbg_flags, args... ) \
51 do { \
52 if ( DEBUG_LEVEL & ( dbg_flags ) ) \
53 { \
54 int len; \
55 len = sprintf( __dbg_str_buf, "%s:%d: %s: ", \
56 __FILE__, __LINE__, __FUNCTION__ ); \
57 sprintf( __dbg_str_buf + len, args ); \
58 printk( KERN_NOTICE "%s\n", __dbg_str_buf ); \
59 } \
60 } while (0)
61
62#define DBG_ENTER_ROUTINE DBG_PRINT (DBG_K_TRACE_ENTRY, "%s", "[Entry]");
63#define DBG_LEAVE_ROUTINE DBG_PRINT (DBG_K_TRACE_EXIT, "%s", "[Exit]");
64#else
65#define DEFINE_DBG_BUFFER
66#define DBG_ENTER_ROUTINE
67#define DBG_LEAVE_ROUTINE
68#endif /* DEBUG */
69
70/* Slot Available Register I field definition */ 38/* Slot Available Register I field definition */
71#define SLOT_33MHZ 0x0000001f 39#define SLOT_33MHZ 0x0000001f
72#define SLOT_66MHZ_PCIX 0x00001f00 40#define SLOT_66MHZ_PCIX 0x00001f00
@@ -211,7 +179,6 @@
211#define SLOT_EVENT_LATCH 0x2 179#define SLOT_EVENT_LATCH 0x2
212#define SLOT_SERR_INT_MASK 0x3 180#define SLOT_SERR_INT_MASK 0x3
213 181
214DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */
215static atomic_t shpchp_num_controllers = ATOMIC_INIT(0); 182static atomic_t shpchp_num_controllers = ATOMIC_INIT(0);
216 183
217static irqreturn_t shpc_isr(int irq, void *dev_id); 184static irqreturn_t shpc_isr(int irq, void *dev_id);
@@ -268,8 +235,6 @@ static void int_poll_timeout(unsigned long data)
268{ 235{
269 struct controller *ctrl = (struct controller *)data; 236 struct controller *ctrl = (struct controller *)data;
270 237
271 DBG_ENTER_ROUTINE
272
273 /* Poll for interrupt events. regs == NULL => polling */ 238 /* Poll for interrupt events. regs == NULL => polling */
274 shpc_isr(0, ctrl); 239 shpc_isr(0, ctrl);
275 240
@@ -278,8 +243,6 @@ static void int_poll_timeout(unsigned long data)
278 shpchp_poll_time = 2; /* default polling interval is 2 sec */ 243 shpchp_poll_time = 2; /* default polling interval is 2 sec */
279 244
280 start_int_poll_timer(ctrl, shpchp_poll_time); 245 start_int_poll_timer(ctrl, shpchp_poll_time);
281
282 DBG_LEAVE_ROUTINE
283} 246}
284 247
285/* 248/*
@@ -353,8 +316,6 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
353 int retval = 0; 316 int retval = 0;
354 u16 temp_word; 317 u16 temp_word;
355 318
356 DBG_ENTER_ROUTINE
357
358 mutex_lock(&slot->ctrl->cmd_lock); 319 mutex_lock(&slot->ctrl->cmd_lock);
359 320
360 if (!shpc_poll_ctrl_busy(ctrl)) { 321 if (!shpc_poll_ctrl_busy(ctrl)) {
@@ -368,9 +329,9 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
368 ++t_slot; 329 ++t_slot;
369 temp_word = (t_slot << 8) | (cmd & 0xFF); 330 temp_word = (t_slot << 8) | (cmd & 0xFF);
370 dbg("%s: t_slot %x cmd %x\n", __FUNCTION__, t_slot, cmd); 331 dbg("%s: t_slot %x cmd %x\n", __FUNCTION__, t_slot, cmd);
371 332
372 /* To make sure the Controller Busy bit is 0 before we send out the 333 /* To make sure the Controller Busy bit is 0 before we send out the
373 * command. 334 * command.
374 */ 335 */
375 shpc_writew(ctrl, CMD, temp_word); 336 shpc_writew(ctrl, CMD, temp_word);
376 337
@@ -389,20 +350,14 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
389 } 350 }
390 out: 351 out:
391 mutex_unlock(&slot->ctrl->cmd_lock); 352 mutex_unlock(&slot->ctrl->cmd_lock);
392
393 DBG_LEAVE_ROUTINE
394 return retval; 353 return retval;
395} 354}
396 355
397static int hpc_check_cmd_status(struct controller *ctrl) 356static int hpc_check_cmd_status(struct controller *ctrl)
398{ 357{
399 u16 cmd_status;
400 int retval = 0; 358 int retval = 0;
359 u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
401 360
402 DBG_ENTER_ROUTINE
403
404 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
405
406 switch (cmd_status >> 1) { 361 switch (cmd_status >> 1) {
407 case 0: 362 case 0:
408 retval = 0; 363 retval = 0;
@@ -423,7 +378,6 @@ static int hpc_check_cmd_status(struct controller *ctrl)
423 retval = cmd_status; 378 retval = cmd_status;
424 } 379 }
425 380
426 DBG_LEAVE_ROUTINE
427 return retval; 381 return retval;
428} 382}
429 383
@@ -431,13 +385,8 @@ static int hpc_check_cmd_status(struct controller *ctrl)
431static int hpc_get_attention_status(struct slot *slot, u8 *status) 385static int hpc_get_attention_status(struct slot *slot, u8 *status)
432{ 386{
433 struct controller *ctrl = slot->ctrl; 387 struct controller *ctrl = slot->ctrl;
434 u32 slot_reg; 388 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
435 u8 state; 389 u8 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT;
436
437 DBG_ENTER_ROUTINE
438
439 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
440 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT;
441 390
442 switch (state) { 391 switch (state) {
443 case ATN_LED_STATE_ON: 392 case ATN_LED_STATE_ON:
@@ -454,20 +403,14 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
454 break; 403 break;
455 } 404 }
456 405
457 DBG_LEAVE_ROUTINE
458 return 0; 406 return 0;
459} 407}
460 408
461static int hpc_get_power_status(struct slot * slot, u8 *status) 409static int hpc_get_power_status(struct slot * slot, u8 *status)
462{ 410{
463 struct controller *ctrl = slot->ctrl; 411 struct controller *ctrl = slot->ctrl;
464 u32 slot_reg; 412 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
465 u8 state; 413 u8 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT;
466
467 DBG_ENTER_ROUTINE
468
469 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
470 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT;
471 414
472 switch (state) { 415 switch (state) {
473 case SLOT_STATE_PWRONLY: 416 case SLOT_STATE_PWRONLY:
@@ -484,7 +427,6 @@ static int hpc_get_power_status(struct slot * slot, u8 *status)
484 break; 427 break;
485 } 428 }
486 429
487 DBG_LEAVE_ROUTINE
488 return 0; 430 return 0;
489} 431}
490 432
@@ -492,30 +434,21 @@ static int hpc_get_power_status(struct slot * slot, u8 *status)
492static int hpc_get_latch_status(struct slot *slot, u8 *status) 434static int hpc_get_latch_status(struct slot *slot, u8 *status)
493{ 435{
494 struct controller *ctrl = slot->ctrl; 436 struct controller *ctrl = slot->ctrl;
495 u32 slot_reg; 437 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
496
497 DBG_ENTER_ROUTINE
498 438
499 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
500 *status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */ 439 *status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */
501 440
502 DBG_LEAVE_ROUTINE
503 return 0; 441 return 0;
504} 442}
505 443
506static int hpc_get_adapter_status(struct slot *slot, u8 *status) 444static int hpc_get_adapter_status(struct slot *slot, u8 *status)
507{ 445{
508 struct controller *ctrl = slot->ctrl; 446 struct controller *ctrl = slot->ctrl;
509 u32 slot_reg; 447 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
510 u8 state; 448 u8 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT;
511
512 DBG_ENTER_ROUTINE
513 449
514 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
515 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT;
516 *status = (state != 0x3) ? 1 : 0; 450 *status = (state != 0x3) ? 1 : 0;
517 451
518 DBG_LEAVE_ROUTINE
519 return 0; 452 return 0;
520} 453}
521 454
@@ -523,11 +456,8 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
523{ 456{
524 struct controller *ctrl = slot->ctrl; 457 struct controller *ctrl = slot->ctrl;
525 458
526 DBG_ENTER_ROUTINE
527
528 *prog_int = shpc_readb(ctrl, PROG_INTERFACE); 459 *prog_int = shpc_readb(ctrl, PROG_INTERFACE);
529 460
530 DBG_LEAVE_ROUTINE
531 return 0; 461 return 0;
532} 462}
533 463
@@ -539,8 +469,6 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
539 u8 m66_cap = !!(slot_reg & MHZ66_CAP); 469 u8 m66_cap = !!(slot_reg & MHZ66_CAP);
540 u8 pi, pcix_cap; 470 u8 pi, pcix_cap;
541 471
542 DBG_ENTER_ROUTINE
543
544 if ((retval = hpc_get_prog_int(slot, &pi))) 472 if ((retval = hpc_get_prog_int(slot, &pi)))
545 return retval; 473 return retval;
546 474
@@ -582,21 +510,15 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
582 } 510 }
583 511
584 dbg("Adapter speed = %d\n", *value); 512 dbg("Adapter speed = %d\n", *value);
585 DBG_LEAVE_ROUTINE
586 return retval; 513 return retval;
587} 514}
588 515
589static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode) 516static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
590{ 517{
591 struct controller *ctrl = slot->ctrl;
592 u16 sec_bus_status;
593 u8 pi;
594 int retval = 0; 518 int retval = 0;
595 519 struct controller *ctrl = slot->ctrl;
596 DBG_ENTER_ROUTINE 520 u16 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG);
597 521 u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
598 pi = shpc_readb(ctrl, PROG_INTERFACE);
599 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG);
600 522
601 if (pi == 2) { 523 if (pi == 2) {
602 *mode = (sec_bus_status & 0x0100) >> 8; 524 *mode = (sec_bus_status & 0x0100) >> 8;
@@ -605,21 +527,14 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
605 } 527 }
606 528
607 dbg("Mode 1 ECC cap = %d\n", *mode); 529 dbg("Mode 1 ECC cap = %d\n", *mode);
608
609 DBG_LEAVE_ROUTINE
610 return retval; 530 return retval;
611} 531}
612 532
613static int hpc_query_power_fault(struct slot * slot) 533static int hpc_query_power_fault(struct slot * slot)
614{ 534{
615 struct controller *ctrl = slot->ctrl; 535 struct controller *ctrl = slot->ctrl;
616 u32 slot_reg; 536 u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
617
618 DBG_ENTER_ROUTINE
619
620 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
621 537
622 DBG_LEAVE_ROUTINE
623 /* Note: Logic 0 => fault */ 538 /* Note: Logic 0 => fault */
624 return !(slot_reg & POWER_FAULT); 539 return !(slot_reg & POWER_FAULT);
625} 540}
@@ -629,7 +544,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
629 u8 slot_cmd = 0; 544 u8 slot_cmd = 0;
630 545
631 switch (value) { 546 switch (value) {
632 case 0 : 547 case 0 :
633 slot_cmd = SET_ATTN_OFF; /* OFF */ 548 slot_cmd = SET_ATTN_OFF; /* OFF */
634 break; 549 break;
635 case 1: 550 case 1:
@@ -666,8 +581,6 @@ static void hpc_release_ctlr(struct controller *ctrl)
666 int i; 581 int i;
667 u32 slot_reg, serr_int; 582 u32 slot_reg, serr_int;
668 583
669 DBG_ENTER_ROUTINE
670
671 /* 584 /*
672 * Mask event interrupts and SERRs of all slots 585 * Mask event interrupts and SERRs of all slots
673 */ 586 */
@@ -708,61 +621,43 @@ static void hpc_release_ctlr(struct controller *ctrl)
708 */ 621 */
709 if (atomic_dec_and_test(&shpchp_num_controllers)) 622 if (atomic_dec_and_test(&shpchp_num_controllers))
710 destroy_workqueue(shpchp_wq); 623 destroy_workqueue(shpchp_wq);
711
712 DBG_LEAVE_ROUTINE
713} 624}
714 625
715static int hpc_power_on_slot(struct slot * slot) 626static int hpc_power_on_slot(struct slot * slot)
716{ 627{
717 int retval; 628 int retval;
718 629
719 DBG_ENTER_ROUTINE
720
721 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR); 630 retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR);
722 if (retval) { 631 if (retval)
723 err("%s: Write command failed!\n", __FUNCTION__); 632 err("%s: Write command failed!\n", __FUNCTION__);
724 return retval;
725 }
726
727 DBG_LEAVE_ROUTINE
728 633
729 return 0; 634 return retval;
730} 635}
731 636
732static int hpc_slot_enable(struct slot * slot) 637static int hpc_slot_enable(struct slot * slot)
733{ 638{
734 int retval; 639 int retval;
735 640
736 DBG_ENTER_ROUTINE
737
738 /* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */ 641 /* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */
739 retval = shpc_write_cmd(slot, slot->hp_slot, 642 retval = shpc_write_cmd(slot, slot->hp_slot,
740 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF); 643 SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF);
741 if (retval) { 644 if (retval)
742 err("%s: Write command failed!\n", __FUNCTION__); 645 err("%s: Write command failed!\n", __FUNCTION__);
743 return retval;
744 }
745 646
746 DBG_LEAVE_ROUTINE 647 return retval;
747 return 0;
748} 648}
749 649
750static int hpc_slot_disable(struct slot * slot) 650static int hpc_slot_disable(struct slot * slot)
751{ 651{
752 int retval; 652 int retval;
753 653
754 DBG_ENTER_ROUTINE
755
756 /* Slot - Disable, Power Indicator - Off, Attention Indicator - On */ 654 /* Slot - Disable, Power Indicator - Off, Attention Indicator - On */
757 retval = shpc_write_cmd(slot, slot->hp_slot, 655 retval = shpc_write_cmd(slot, slot->hp_slot,
758 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON); 656 SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON);
759 if (retval) { 657 if (retval)
760 err("%s: Write command failed!\n", __FUNCTION__); 658 err("%s: Write command failed!\n", __FUNCTION__);
761 return retval;
762 }
763 659
764 DBG_LEAVE_ROUTINE 660 return retval;
765 return 0;
766} 661}
767 662
768static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) 663static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
@@ -771,8 +666,6 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
771 struct controller *ctrl = slot->ctrl; 666 struct controller *ctrl = slot->ctrl;
772 u8 pi, cmd; 667 u8 pi, cmd;
773 668
774 DBG_ENTER_ROUTINE
775
776 pi = shpc_readb(ctrl, PROG_INTERFACE); 669 pi = shpc_readb(ctrl, PROG_INTERFACE);
777 if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX)) 670 if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX))
778 return -EINVAL; 671 return -EINVAL;
@@ -828,7 +721,6 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
828 if (retval) 721 if (retval)
829 err("%s: Write command failed!\n", __FUNCTION__); 722 err("%s: Write command failed!\n", __FUNCTION__);
830 723
831 DBG_LEAVE_ROUTINE
832 return retval; 724 return retval;
833} 725}
834 726
@@ -843,7 +735,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
843 if (!intr_loc) 735 if (!intr_loc)
844 return IRQ_NONE; 736 return IRQ_NONE;
845 737
846 dbg("%s: intr_loc = %x\n",__FUNCTION__, intr_loc); 738 dbg("%s: intr_loc = %x\n",__FUNCTION__, intr_loc);
847 739
848 if(!shpchp_poll_mode) { 740 if(!shpchp_poll_mode) {
849 /* 741 /*
@@ -856,12 +748,12 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
856 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 748 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
857 749
858 intr_loc2 = shpc_readl(ctrl, INTR_LOC); 750 intr_loc2 = shpc_readl(ctrl, INTR_LOC);
859 dbg("%s: intr_loc2 = %x\n",__FUNCTION__, intr_loc2); 751 dbg("%s: intr_loc2 = %x\n",__FUNCTION__, intr_loc2);
860 } 752 }
861 753
862 if (intr_loc & CMD_INTR_PENDING) { 754 if (intr_loc & CMD_INTR_PENDING) {
863 /* 755 /*
864 * Command Complete Interrupt Pending 756 * Command Complete Interrupt Pending
865 * RO only - clear by writing 1 to the Command Completion 757 * RO only - clear by writing 1 to the Command Completion
866 * Detect bit in Controller SERR-INT register 758 * Detect bit in Controller SERR-INT register
867 */ 759 */
@@ -875,7 +767,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
875 if (!(intr_loc & ~CMD_INTR_PENDING)) 767 if (!(intr_loc & ~CMD_INTR_PENDING))
876 goto out; 768 goto out;
877 769
878 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { 770 for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
879 /* To find out which slot has interrupt pending */ 771 /* To find out which slot has interrupt pending */
880 if (!(intr_loc & SLOT_INTR_PENDING(hp_slot))) 772 if (!(intr_loc & SLOT_INTR_PENDING(hp_slot)))
881 continue; 773 continue;
@@ -907,7 +799,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
907 serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK); 799 serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK);
908 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); 800 shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
909 } 801 }
910 802
911 return IRQ_HANDLED; 803 return IRQ_HANDLED;
912} 804}
913 805
@@ -920,8 +812,6 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
920 u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1); 812 u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
921 u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2); 813 u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2);
922 814
923 DBG_ENTER_ROUTINE
924
925 if (pi == 2) { 815 if (pi == 2) {
926 if (slot_avail2 & SLOT_133MHZ_PCIX_533) 816 if (slot_avail2 & SLOT_133MHZ_PCIX_533)
927 bus_speed = PCI_SPEED_133MHz_PCIX_533; 817 bus_speed = PCI_SPEED_133MHz_PCIX_533;
@@ -954,7 +844,7 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
954 844
955 *value = bus_speed; 845 *value = bus_speed;
956 dbg("Max bus speed = %d\n", bus_speed); 846 dbg("Max bus speed = %d\n", bus_speed);
957 DBG_LEAVE_ROUTINE 847
958 return retval; 848 return retval;
959} 849}
960 850
@@ -967,8 +857,6 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
967 u8 pi = shpc_readb(ctrl, PROG_INTERFACE); 857 u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
968 u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7); 858 u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
969 859
970 DBG_ENTER_ROUTINE
971
972 if ((pi == 1) && (speed_mode > 4)) { 860 if ((pi == 1) && (speed_mode > 4)) {
973 *value = PCI_SPEED_UNKNOWN; 861 *value = PCI_SPEED_UNKNOWN;
974 return -ENODEV; 862 return -ENODEV;
@@ -1024,7 +912,6 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
1024 } 912 }
1025 913
1026 dbg("Current bus speed = %d\n", bus_speed); 914 dbg("Current bus speed = %d\n", bus_speed);
1027 DBG_LEAVE_ROUTINE
1028 return retval; 915 return retval;
1029} 916}
1030 917
@@ -1032,7 +919,7 @@ static struct hpc_ops shpchp_hpc_ops = {
1032 .power_on_slot = hpc_power_on_slot, 919 .power_on_slot = hpc_power_on_slot,
1033 .slot_enable = hpc_slot_enable, 920 .slot_enable = hpc_slot_enable,
1034 .slot_disable = hpc_slot_disable, 921 .slot_disable = hpc_slot_disable,
1035 .set_bus_speed_mode = hpc_set_bus_speed_mode, 922 .set_bus_speed_mode = hpc_set_bus_speed_mode,
1036 .set_attention_status = hpc_set_attention_status, 923 .set_attention_status = hpc_set_attention_status,
1037 .get_power_status = hpc_get_power_status, 924 .get_power_status = hpc_get_power_status,
1038 .get_attention_status = hpc_get_attention_status, 925 .get_attention_status = hpc_get_attention_status,
@@ -1049,7 +936,7 @@ static struct hpc_ops shpchp_hpc_ops = {
1049 .green_led_on = hpc_set_green_led_on, 936 .green_led_on = hpc_set_green_led_on,
1050 .green_led_off = hpc_set_green_led_off, 937 .green_led_off = hpc_set_green_led_off,
1051 .green_led_blink = hpc_set_green_led_blink, 938 .green_led_blink = hpc_set_green_led_blink,
1052 939
1053 .release_ctlr = hpc_release_ctlr, 940 .release_ctlr = hpc_release_ctlr,
1054}; 941};
1055 942
@@ -1061,8 +948,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1061 u32 tempdword, slot_reg, slot_config; 948 u32 tempdword, slot_reg, slot_config;
1062 u8 i; 949 u8 i;
1063 950
1064 DBG_ENTER_ROUTINE
1065
1066 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ 951 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
1067 952
1068 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == 953 if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
@@ -1108,9 +993,9 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1108 ctrl->mmio_size = 0x24 + 0x4 * num_slots; 993 ctrl->mmio_size = 0x24 + 0x4 * num_slots;
1109 } 994 }
1110 995
1111 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, 996 info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor,
1112 pdev->subsystem_device); 997 pdev->subsystem_device);
1113 998
1114 rc = pci_enable_device(pdev); 999 rc = pci_enable_device(pdev);
1115 if (rc) { 1000 if (rc) {
1116 err("%s: pci_enable_device failed\n", __FUNCTION__); 1001 err("%s: pci_enable_device failed\n", __FUNCTION__);
@@ -1172,7 +1057,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1172 slot_reg &= ~SLOT_REG_RSVDZ_MASK; 1057 slot_reg &= ~SLOT_REG_RSVDZ_MASK;
1173 shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg); 1058 shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
1174 } 1059 }
1175 1060
1176 if (shpchp_poll_mode) { 1061 if (shpchp_poll_mode) {
1177 /* Install interrupt polling timer. Start with 10 sec delay */ 1062 /* Install interrupt polling timer. Start with 10 sec delay */
1178 init_timer(&ctrl->poll_timer); 1063 init_timer(&ctrl->poll_timer);
@@ -1184,7 +1069,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1184 info("Can't get msi for the hotplug controller\n"); 1069 info("Can't get msi for the hotplug controller\n");
1185 info("Use INTx for the hotplug controller\n"); 1070 info("Use INTx for the hotplug controller\n");
1186 } 1071 }
1187 1072
1188 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, 1073 rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
1189 MY_NAME, (void *)ctrl); 1074 MY_NAME, (void *)ctrl);
1190 dbg("%s: request_irq %d for hpc%d (returns %d)\n", 1075 dbg("%s: request_irq %d for hpc%d (returns %d)\n",
@@ -1235,13 +1120,11 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1235 dbg("%s: SERR_INTR_ENABLE = %x\n", __FUNCTION__, tempdword); 1120 dbg("%s: SERR_INTR_ENABLE = %x\n", __FUNCTION__, tempdword);
1236 } 1121 }
1237 1122
1238 DBG_LEAVE_ROUTINE
1239 return 0; 1123 return 0;
1240 1124
1241 /* We end up here for the many possible ways to fail this API. */ 1125 /* We end up here for the many possible ways to fail this API. */
1242abort_iounmap: 1126abort_iounmap:
1243 iounmap(ctrl->creg); 1127 iounmap(ctrl->creg);
1244abort: 1128abort:
1245 DBG_LEAVE_ROUTINE
1246 return rc; 1129 return rc;
1247} 1130}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index ed3f7e1a563c..68555c11f556 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -24,8 +24,6 @@
24#include "pci.h" 24#include "pci.h"
25#include "msi.h" 25#include "msi.h"
26 26
27static DEFINE_SPINLOCK(msi_lock);
28static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
29static struct kmem_cache* msi_cachep; 27static struct kmem_cache* msi_cachep;
30 28
31static int pci_msi_enable = 1; 29static int pci_msi_enable = 1;
@@ -44,13 +42,13 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
44{ 42{
45 struct msi_desc *entry; 43 struct msi_desc *entry;
46 44
47 entry = msi_desc[irq]; 45 entry = get_irq_msi(irq);
48 BUG_ON(!entry || !entry->dev); 46 BUG_ON(!entry || !entry->dev);
49 switch (entry->msi_attrib.type) { 47 switch (entry->msi_attrib.type) {
50 case PCI_CAP_ID_MSI: 48 case PCI_CAP_ID_MSI:
51 if (entry->msi_attrib.maskbit) { 49 if (entry->msi_attrib.maskbit) {
52 int pos; 50 int pos;
53 u32 mask_bits; 51 u32 mask_bits;
54 52
55 pos = (long)entry->mask_base; 53 pos = (long)entry->mask_base;
56 pci_read_config_dword(entry->dev, pos, &mask_bits); 54 pci_read_config_dword(entry->dev, pos, &mask_bits);
@@ -74,7 +72,7 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
74 72
75void read_msi_msg(unsigned int irq, struct msi_msg *msg) 73void read_msi_msg(unsigned int irq, struct msi_msg *msg)
76{ 74{
77 struct msi_desc *entry = get_irq_data(irq); 75 struct msi_desc *entry = get_irq_msi(irq);
78 switch(entry->msi_attrib.type) { 76 switch(entry->msi_attrib.type) {
79 case PCI_CAP_ID_MSI: 77 case PCI_CAP_ID_MSI:
80 { 78 {
@@ -113,7 +111,7 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
113 111
114void write_msi_msg(unsigned int irq, struct msi_msg *msg) 112void write_msi_msg(unsigned int irq, struct msi_msg *msg)
115{ 113{
116 struct msi_desc *entry = get_irq_data(irq); 114 struct msi_desc *entry = get_irq_msi(irq);
117 switch (entry->msi_attrib.type) { 115 switch (entry->msi_attrib.type) {
118 case PCI_CAP_ID_MSI: 116 case PCI_CAP_ID_MSI:
119 { 117 {
@@ -162,6 +160,7 @@ void unmask_msi_irq(unsigned int irq)
162} 160}
163 161
164static int msi_free_irq(struct pci_dev* dev, int irq); 162static int msi_free_irq(struct pci_dev* dev, int irq);
163
165static int msi_init(void) 164static int msi_init(void)
166{ 165{
167 static int status = -ENOMEM; 166 static int status = -ENOMEM;
@@ -169,13 +168,6 @@ static int msi_init(void)
169 if (!status) 168 if (!status)
170 return status; 169 return status;
171 170
172 if (pci_msi_quirk) {
173 pci_msi_enable = 0;
174 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
175 status = -EINVAL;
176 return status;
177 }
178
179 status = msi_cache_init(); 171 status = msi_cache_init();
180 if (status < 0) { 172 if (status < 0) {
181 pci_msi_enable = 0; 173 pci_msi_enable = 0;
@@ -200,46 +192,6 @@ static struct msi_desc* alloc_msi_entry(void)
200 return entry; 192 return entry;
201} 193}
202 194
203static void attach_msi_entry(struct msi_desc *entry, int irq)
204{
205 unsigned long flags;
206
207 spin_lock_irqsave(&msi_lock, flags);
208 msi_desc[irq] = entry;
209 spin_unlock_irqrestore(&msi_lock, flags);
210}
211
212static int create_msi_irq(void)
213{
214 struct msi_desc *entry;
215 int irq;
216
217 entry = alloc_msi_entry();
218 if (!entry)
219 return -ENOMEM;
220
221 irq = create_irq();
222 if (irq < 0) {
223 kmem_cache_free(msi_cachep, entry);
224 return -EBUSY;
225 }
226
227 set_irq_data(irq, entry);
228
229 return irq;
230}
231
232static void destroy_msi_irq(unsigned int irq)
233{
234 struct msi_desc *entry;
235
236 entry = get_irq_data(irq);
237 set_irq_chip(irq, NULL);
238 set_irq_data(irq, NULL);
239 destroy_irq(irq);
240 kmem_cache_free(msi_cachep, entry);
241}
242
243static void enable_msi_mode(struct pci_dev *dev, int pos, int type) 195static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
244{ 196{
245 u16 control; 197 u16 control;
@@ -278,36 +230,8 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type)
278 pci_intx(dev, 1); /* enable intx */ 230 pci_intx(dev, 1); /* enable intx */
279} 231}
280 232
281static int msi_lookup_irq(struct pci_dev *dev, int type)
282{
283 int irq;
284 unsigned long flags;
285
286 spin_lock_irqsave(&msi_lock, flags);
287 for (irq = 0; irq < NR_IRQS; irq++) {
288 if (!msi_desc[irq] || msi_desc[irq]->dev != dev ||
289 msi_desc[irq]->msi_attrib.type != type ||
290 msi_desc[irq]->msi_attrib.default_irq != dev->irq)
291 continue;
292 spin_unlock_irqrestore(&msi_lock, flags);
293 /* This pre-assigned MSI irq for this device
294 already exits. Override dev->irq with this irq */
295 dev->irq = irq;
296 return 0;
297 }
298 spin_unlock_irqrestore(&msi_lock, flags);
299
300 return -EACCES;
301}
302
303void pci_scan_msi_device(struct pci_dev *dev)
304{
305 if (!dev)
306 return;
307}
308
309#ifdef CONFIG_PM 233#ifdef CONFIG_PM
310int pci_save_msi_state(struct pci_dev *dev) 234static int __pci_save_msi_state(struct pci_dev *dev)
311{ 235{
312 int pos, i = 0; 236 int pos, i = 0;
313 u16 control; 237 u16 control;
@@ -345,7 +269,7 @@ int pci_save_msi_state(struct pci_dev *dev)
345 return 0; 269 return 0;
346} 270}
347 271
348void pci_restore_msi_state(struct pci_dev *dev) 272static void __pci_restore_msi_state(struct pci_dev *dev)
349{ 273{
350 int i = 0, pos; 274 int i = 0, pos;
351 u16 control; 275 u16 control;
@@ -373,14 +297,16 @@ void pci_restore_msi_state(struct pci_dev *dev)
373 kfree(save_state); 297 kfree(save_state);
374} 298}
375 299
376int pci_save_msix_state(struct pci_dev *dev) 300static int __pci_save_msix_state(struct pci_dev *dev)
377{ 301{
378 int pos; 302 int pos;
379 int temp;
380 int irq, head, tail = 0; 303 int irq, head, tail = 0;
381 u16 control; 304 u16 control;
382 struct pci_cap_saved_state *save_state; 305 struct pci_cap_saved_state *save_state;
383 306
307 if (!dev->msix_enabled)
308 return 0;
309
384 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 310 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
385 if (pos <= 0 || dev->no_msi) 311 if (pos <= 0 || dev->no_msi)
386 return 0; 312 return 0;
@@ -398,38 +324,46 @@ int pci_save_msix_state(struct pci_dev *dev)
398 *((u16 *)&save_state->data[0]) = control; 324 *((u16 *)&save_state->data[0]) = control;
399 325
400 /* save the table */ 326 /* save the table */
401 temp = dev->irq; 327 irq = head = dev->first_msi_irq;
402 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
403 kfree(save_state);
404 return -EINVAL;
405 }
406
407 irq = head = dev->irq;
408 while (head != tail) { 328 while (head != tail) {
409 struct msi_desc *entry; 329 struct msi_desc *entry;
410 330
411 entry = msi_desc[irq]; 331 entry = get_irq_msi(irq);
412 read_msi_msg(irq, &entry->msg_save); 332 read_msi_msg(irq, &entry->msg_save);
413 333
414 tail = msi_desc[irq]->link.tail; 334 tail = entry->link.tail;
415 irq = tail; 335 irq = tail;
416 } 336 }
417 dev->irq = temp;
418 337
419 save_state->cap_nr = PCI_CAP_ID_MSIX; 338 save_state->cap_nr = PCI_CAP_ID_MSIX;
420 pci_add_saved_cap(dev, save_state); 339 pci_add_saved_cap(dev, save_state);
421 return 0; 340 return 0;
422} 341}
423 342
424void pci_restore_msix_state(struct pci_dev *dev) 343int pci_save_msi_state(struct pci_dev *dev)
344{
345 int rc;
346
347 rc = __pci_save_msi_state(dev);
348 if (rc)
349 return rc;
350
351 rc = __pci_save_msix_state(dev);
352
353 return rc;
354}
355
356static void __pci_restore_msix_state(struct pci_dev *dev)
425{ 357{
426 u16 save; 358 u16 save;
427 int pos; 359 int pos;
428 int irq, head, tail = 0; 360 int irq, head, tail = 0;
429 struct msi_desc *entry; 361 struct msi_desc *entry;
430 int temp;
431 struct pci_cap_saved_state *save_state; 362 struct pci_cap_saved_state *save_state;
432 363
364 if (!dev->msix_enabled)
365 return;
366
433 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX); 367 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_MSIX);
434 if (!save_state) 368 if (!save_state)
435 return; 369 return;
@@ -442,23 +376,25 @@ void pci_restore_msix_state(struct pci_dev *dev)
442 return; 376 return;
443 377
444 /* route the table */ 378 /* route the table */
445 temp = dev->irq; 379 irq = head = dev->first_msi_irq;
446 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX))
447 return;
448 irq = head = dev->irq;
449 while (head != tail) { 380 while (head != tail) {
450 entry = msi_desc[irq]; 381 entry = get_irq_msi(irq);
451 write_msi_msg(irq, &entry->msg_save); 382 write_msi_msg(irq, &entry->msg_save);
452 383
453 tail = msi_desc[irq]->link.tail; 384 tail = entry->link.tail;
454 irq = tail; 385 irq = tail;
455 } 386 }
456 dev->irq = temp;
457 387
458 pci_write_config_word(dev, msi_control_reg(pos), save); 388 pci_write_config_word(dev, msi_control_reg(pos), save);
459 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 389 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
460} 390}
461#endif 391
392void pci_restore_msi_state(struct pci_dev *dev)
393{
394 __pci_restore_msi_state(dev);
395 __pci_restore_msix_state(dev);
396}
397#endif /* CONFIG_PM */
462 398
463/** 399/**
464 * msi_capability_init - configure device's MSI capability structure 400 * msi_capability_init - configure device's MSI capability structure
@@ -471,7 +407,6 @@ void pci_restore_msix_state(struct pci_dev *dev)
471 **/ 407 **/
472static int msi_capability_init(struct pci_dev *dev) 408static int msi_capability_init(struct pci_dev *dev)
473{ 409{
474 int status;
475 struct msi_desc *entry; 410 struct msi_desc *entry;
476 int pos, irq; 411 int pos, irq;
477 u16 control; 412 u16 control;
@@ -479,13 +414,10 @@ static int msi_capability_init(struct pci_dev *dev)
479 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 414 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
480 pci_read_config_word(dev, msi_control_reg(pos), &control); 415 pci_read_config_word(dev, msi_control_reg(pos), &control);
481 /* MSI Entry Initialization */ 416 /* MSI Entry Initialization */
482 irq = create_msi_irq(); 417 entry = alloc_msi_entry();
483 if (irq < 0) 418 if (!entry)
484 return irq; 419 return -ENOMEM;
485 420
486 entry = get_irq_data(irq);
487 entry->link.head = irq;
488 entry->link.tail = irq;
489 entry->msi_attrib.type = PCI_CAP_ID_MSI; 421 entry->msi_attrib.type = PCI_CAP_ID_MSI;
490 entry->msi_attrib.is_64 = is_64bit_address(control); 422 entry->msi_attrib.is_64 = is_64bit_address(control);
491 entry->msi_attrib.entry_nr = 0; 423 entry->msi_attrib.entry_nr = 0;
@@ -511,13 +443,16 @@ static int msi_capability_init(struct pci_dev *dev)
511 maskbits); 443 maskbits);
512 } 444 }
513 /* Configure MSI capability structure */ 445 /* Configure MSI capability structure */
514 status = arch_setup_msi_irq(irq, dev); 446 irq = arch_setup_msi_irq(dev, entry);
515 if (status < 0) { 447 if (irq < 0) {
516 destroy_msi_irq(irq); 448 kmem_cache_free(msi_cachep, entry);
517 return status; 449 return irq;
518 } 450 }
451 entry->link.head = irq;
452 entry->link.tail = irq;
453 dev->first_msi_irq = irq;
454 set_irq_msi(irq, entry);
519 455
520 attach_msi_entry(entry, irq);
521 /* Set MSI enabled bits */ 456 /* Set MSI enabled bits */
522 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 457 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
523 458
@@ -539,7 +474,6 @@ static int msix_capability_init(struct pci_dev *dev,
539 struct msix_entry *entries, int nvec) 474 struct msix_entry *entries, int nvec)
540{ 475{
541 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 476 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
542 int status;
543 int irq, pos, i, j, nr_entries, temp = 0; 477 int irq, pos, i, j, nr_entries, temp = 0;
544 unsigned long phys_addr; 478 unsigned long phys_addr;
545 u32 table_offset; 479 u32 table_offset;
@@ -562,13 +496,11 @@ static int msix_capability_init(struct pci_dev *dev,
562 496
563 /* MSI-X Table Initialization */ 497 /* MSI-X Table Initialization */
564 for (i = 0; i < nvec; i++) { 498 for (i = 0; i < nvec; i++) {
565 irq = create_msi_irq(); 499 entry = alloc_msi_entry();
566 if (irq < 0) 500 if (!entry)
567 break; 501 break;
568 502
569 entry = get_irq_data(irq);
570 j = entries[i].entry; 503 j = entries[i].entry;
571 entries[i].vector = irq;
572 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 504 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
573 entry->msi_attrib.is_64 = 1; 505 entry->msi_attrib.is_64 = 1;
574 entry->msi_attrib.entry_nr = j; 506 entry->msi_attrib.entry_nr = j;
@@ -577,6 +509,14 @@ static int msix_capability_init(struct pci_dev *dev,
577 entry->msi_attrib.pos = pos; 509 entry->msi_attrib.pos = pos;
578 entry->dev = dev; 510 entry->dev = dev;
579 entry->mask_base = base; 511 entry->mask_base = base;
512
513 /* Configure MSI-X capability structure */
514 irq = arch_setup_msi_irq(dev, entry);
515 if (irq < 0) {
516 kmem_cache_free(msi_cachep, entry);
517 break;
518 }
519 entries[i].vector = irq;
580 if (!head) { 520 if (!head) {
581 entry->link.head = irq; 521 entry->link.head = irq;
582 entry->link.tail = irq; 522 entry->link.tail = irq;
@@ -589,14 +529,8 @@ static int msix_capability_init(struct pci_dev *dev,
589 } 529 }
590 temp = irq; 530 temp = irq;
591 tail = entry; 531 tail = entry;
592 /* Configure MSI-X capability structure */
593 status = arch_setup_msi_irq(irq, dev);
594 if (status < 0) {
595 destroy_msi_irq(irq);
596 break;
597 }
598 532
599 attach_msi_entry(entry, irq); 533 set_irq_msi(irq, entry);
600 } 534 }
601 if (i != nvec) { 535 if (i != nvec) {
602 int avail = i - 1; 536 int avail = i - 1;
@@ -613,6 +547,7 @@ static int msix_capability_init(struct pci_dev *dev,
613 avail = -EBUSY; 547 avail = -EBUSY;
614 return avail; 548 return avail;
615 } 549 }
550 dev->first_msi_irq = entries[0].vector;
616 /* Set MSI-X enabled bits */ 551 /* Set MSI-X enabled bits */
617 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 552 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
618 553
@@ -660,13 +595,11 @@ int pci_msi_supported(struct pci_dev * dev)
660 **/ 595 **/
661int pci_enable_msi(struct pci_dev* dev) 596int pci_enable_msi(struct pci_dev* dev)
662{ 597{
663 int pos, temp, status; 598 int pos, status;
664 599
665 if (pci_msi_supported(dev) < 0) 600 if (pci_msi_supported(dev) < 0)
666 return -EINVAL; 601 return -EINVAL;
667 602
668 temp = dev->irq;
669
670 status = msi_init(); 603 status = msi_init();
671 if (status < 0) 604 if (status < 0)
672 return status; 605 return status;
@@ -675,15 +608,14 @@ int pci_enable_msi(struct pci_dev* dev)
675 if (!pos) 608 if (!pos)
676 return -EINVAL; 609 return -EINVAL;
677 610
678 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSI)); 611 WARN_ON(!!dev->msi_enabled);
679 612
680 /* Check whether driver already requested for MSI-X irqs */ 613 /* Check whether driver already requested for MSI-X irqs */
681 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 614 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
682 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) { 615 if (pos > 0 && dev->msix_enabled) {
683 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 616 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
684 "Device already has MSI-X irq assigned\n", 617 "Device already has MSI-X enabled\n",
685 pci_name(dev)); 618 pci_name(dev));
686 dev->irq = temp;
687 return -EINVAL; 619 return -EINVAL;
688 } 620 }
689 status = msi_capability_init(dev); 621 status = msi_capability_init(dev);
@@ -695,13 +627,15 @@ void pci_disable_msi(struct pci_dev* dev)
695 struct msi_desc *entry; 627 struct msi_desc *entry;
696 int pos, default_irq; 628 int pos, default_irq;
697 u16 control; 629 u16 control;
698 unsigned long flags;
699 630
700 if (!pci_msi_enable) 631 if (!pci_msi_enable)
701 return; 632 return;
702 if (!dev) 633 if (!dev)
703 return; 634 return;
704 635
636 if (!dev->msi_enabled)
637 return;
638
705 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 639 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
706 if (!pos) 640 if (!pos)
707 return; 641 return;
@@ -710,28 +644,26 @@ void pci_disable_msi(struct pci_dev* dev)
710 if (!(control & PCI_MSI_FLAGS_ENABLE)) 644 if (!(control & PCI_MSI_FLAGS_ENABLE))
711 return; 645 return;
712 646
647
713 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 648 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
714 649
715 spin_lock_irqsave(&msi_lock, flags); 650 entry = get_irq_msi(dev->first_msi_irq);
716 entry = msi_desc[dev->irq];
717 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 651 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
718 spin_unlock_irqrestore(&msi_lock, flags);
719 return; 652 return;
720 } 653 }
721 if (irq_has_action(dev->irq)) { 654 if (irq_has_action(dev->first_msi_irq)) {
722 spin_unlock_irqrestore(&msi_lock, flags);
723 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " 655 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
724 "free_irq() on MSI irq %d\n", 656 "free_irq() on MSI irq %d\n",
725 pci_name(dev), dev->irq); 657 pci_name(dev), dev->first_msi_irq);
726 BUG_ON(irq_has_action(dev->irq)); 658 BUG_ON(irq_has_action(dev->first_msi_irq));
727 } else { 659 } else {
728 default_irq = entry->msi_attrib.default_irq; 660 default_irq = entry->msi_attrib.default_irq;
729 spin_unlock_irqrestore(&msi_lock, flags); 661 msi_free_irq(dev, dev->first_msi_irq);
730 msi_free_irq(dev, dev->irq);
731 662
732 /* Restore dev->irq to its default pin-assertion irq */ 663 /* Restore dev->irq to its default pin-assertion irq */
733 dev->irq = default_irq; 664 dev->irq = default_irq;
734 } 665 }
666 dev->first_msi_irq = 0;
735} 667}
736 668
737static int msi_free_irq(struct pci_dev* dev, int irq) 669static int msi_free_irq(struct pci_dev* dev, int irq)
@@ -739,27 +671,20 @@ static int msi_free_irq(struct pci_dev* dev, int irq)
739 struct msi_desc *entry; 671 struct msi_desc *entry;
740 int head, entry_nr, type; 672 int head, entry_nr, type;
741 void __iomem *base; 673 void __iomem *base;
742 unsigned long flags;
743 674
744 arch_teardown_msi_irq(irq); 675 entry = get_irq_msi(irq);
745
746 spin_lock_irqsave(&msi_lock, flags);
747 entry = msi_desc[irq];
748 if (!entry || entry->dev != dev) { 676 if (!entry || entry->dev != dev) {
749 spin_unlock_irqrestore(&msi_lock, flags);
750 return -EINVAL; 677 return -EINVAL;
751 } 678 }
752 type = entry->msi_attrib.type; 679 type = entry->msi_attrib.type;
753 entry_nr = entry->msi_attrib.entry_nr; 680 entry_nr = entry->msi_attrib.entry_nr;
754 head = entry->link.head; 681 head = entry->link.head;
755 base = entry->mask_base; 682 base = entry->mask_base;
756 msi_desc[entry->link.head]->link.tail = entry->link.tail; 683 get_irq_msi(entry->link.head)->link.tail = entry->link.tail;
757 msi_desc[entry->link.tail]->link.head = entry->link.head; 684 get_irq_msi(entry->link.tail)->link.head = entry->link.head;
758 entry->dev = NULL;
759 msi_desc[irq] = NULL;
760 spin_unlock_irqrestore(&msi_lock, flags);
761 685
762 destroy_msi_irq(irq); 686 arch_teardown_msi_irq(irq);
687 kmem_cache_free(msi_cachep, entry);
763 688
764 if (type == PCI_CAP_ID_MSIX) { 689 if (type == PCI_CAP_ID_MSIX) {
765 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE + 690 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
@@ -790,7 +715,7 @@ static int msi_free_irq(struct pci_dev* dev, int irq)
790int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 715int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
791{ 716{
792 int status, pos, nr_entries; 717 int status, pos, nr_entries;
793 int i, j, temp; 718 int i, j;
794 u16 control; 719 u16 control;
795 720
796 if (!entries || pci_msi_supported(dev) < 0) 721 if (!entries || pci_msi_supported(dev) < 0)
@@ -818,16 +743,14 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
818 return -EINVAL; /* duplicate entry */ 743 return -EINVAL; /* duplicate entry */
819 } 744 }
820 } 745 }
821 temp = dev->irq; 746 WARN_ON(!!dev->msix_enabled);
822 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSIX));
823 747
824 /* Check whether driver already requested for MSI irq */ 748 /* Check whether driver already requested for MSI irq */
825 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 749 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
826 !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) { 750 dev->msi_enabled) {
827 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 751 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
828 "Device already has an MSI irq assigned\n", 752 "Device already has an MSI irq assigned\n",
829 pci_name(dev)); 753 pci_name(dev));
830 dev->irq = temp;
831 return -EINVAL; 754 return -EINVAL;
832 } 755 }
833 status = msix_capability_init(dev, entries, nvec); 756 status = msix_capability_init(dev, entries, nvec);
@@ -836,7 +759,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
836 759
837void pci_disable_msix(struct pci_dev* dev) 760void pci_disable_msix(struct pci_dev* dev)
838{ 761{
839 int pos, temp; 762 int irq, head, tail = 0, warning = 0;
763 int pos;
840 u16 control; 764 u16 control;
841 765
842 if (!pci_msi_enable) 766 if (!pci_msi_enable)
@@ -844,6 +768,9 @@ void pci_disable_msix(struct pci_dev* dev)
844 if (!dev) 768 if (!dev)
845 return; 769 return;
846 770
771 if (!dev->msix_enabled)
772 return;
773
847 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 774 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
848 if (!pos) 775 if (!pos)
849 return; 776 return;
@@ -854,31 +781,23 @@ void pci_disable_msix(struct pci_dev* dev)
854 781
855 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 782 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
856 783
857 temp = dev->irq; 784 irq = head = dev->first_msi_irq;
858 if (!msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) { 785 while (head != tail) {
859 int irq, head, tail = 0, warning = 0; 786 tail = get_irq_msi(irq)->link.tail;
860 unsigned long flags; 787 if (irq_has_action(irq))
861 788 warning = 1;
862 irq = head = dev->irq; 789 else if (irq != head) /* Release MSI-X irq */
863 dev->irq = temp; /* Restore pin IRQ */ 790 msi_free_irq(dev, irq);
864 while (head != tail) { 791 irq = tail;
865 spin_lock_irqsave(&msi_lock, flags); 792 }
866 tail = msi_desc[irq]->link.tail; 793 msi_free_irq(dev, irq);
867 spin_unlock_irqrestore(&msi_lock, flags); 794 if (warning) {
868 if (irq_has_action(irq)) 795 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
869 warning = 1; 796 "free_irq() on all MSI-X irqs\n",
870 else if (irq != head) /* Release MSI-X irq */ 797 pci_name(dev));
871 msi_free_irq(dev, irq); 798 BUG_ON(warning > 0);
872 irq = tail;
873 }
874 msi_free_irq(dev, irq);
875 if (warning) {
876 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
877 "free_irq() on all MSI-X irqs\n",
878 pci_name(dev));
879 BUG_ON(warning > 0);
880 }
881 } 799 }
800 dev->first_msi_irq = 0;
882} 801}
883 802
884/** 803/**
@@ -892,35 +811,26 @@ void pci_disable_msix(struct pci_dev* dev)
892 **/ 811 **/
893void msi_remove_pci_irq_vectors(struct pci_dev* dev) 812void msi_remove_pci_irq_vectors(struct pci_dev* dev)
894{ 813{
895 int pos, temp;
896 unsigned long flags;
897
898 if (!pci_msi_enable || !dev) 814 if (!pci_msi_enable || !dev)
899 return; 815 return;
900 816
901 temp = dev->irq; /* Save IOAPIC IRQ */ 817 if (dev->msi_enabled) {
902 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 818 if (irq_has_action(dev->first_msi_irq)) {
903 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
904 if (irq_has_action(dev->irq)) {
905 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 819 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
906 "called without free_irq() on MSI irq %d\n", 820 "called without free_irq() on MSI irq %d\n",
907 pci_name(dev), dev->irq); 821 pci_name(dev), dev->first_msi_irq);
908 BUG_ON(irq_has_action(dev->irq)); 822 BUG_ON(irq_has_action(dev->first_msi_irq));
909 } else /* Release MSI irq assigned to this device */ 823 } else /* Release MSI irq assigned to this device */
910 msi_free_irq(dev, dev->irq); 824 msi_free_irq(dev, dev->first_msi_irq);
911 dev->irq = temp; /* Restore IOAPIC IRQ */
912 } 825 }
913 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 826 if (dev->msix_enabled) {
914 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
915 int irq, head, tail = 0, warning = 0; 827 int irq, head, tail = 0, warning = 0;
916 void __iomem *base = NULL; 828 void __iomem *base = NULL;
917 829
918 irq = head = dev->irq; 830 irq = head = dev->first_msi_irq;
919 while (head != tail) { 831 while (head != tail) {
920 spin_lock_irqsave(&msi_lock, flags); 832 tail = get_irq_msi(irq)->link.tail;
921 tail = msi_desc[irq]->link.tail; 833 base = get_irq_msi(irq)->mask_base;
922 base = msi_desc[irq]->mask_base;
923 spin_unlock_irqrestore(&msi_lock, flags);
924 if (irq_has_action(irq)) 834 if (irq_has_action(irq))
925 warning = 1; 835 warning = 1;
926 else if (irq != head) /* Release MSI-X irq */ 836 else if (irq != head) /* Release MSI-X irq */
@@ -935,7 +845,6 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev)
935 pci_name(dev)); 845 pci_name(dev));
936 BUG_ON(warning > 0); 846 BUG_ON(warning > 0);
937 } 847 }
938 dev->irq = temp; /* Restore IOAPIC IRQ */
939 } 848 }
940} 849}
941 850
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 92d5e8db0de7..4438ae1ede4f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -324,8 +324,7 @@ static int pci_default_resume(struct pci_dev *pci_dev)
324 /* restore the PCI config space */ 324 /* restore the PCI config space */
325 pci_restore_state(pci_dev); 325 pci_restore_state(pci_dev);
326 /* if the device was enabled before suspend, reenable */ 326 /* if the device was enabled before suspend, reenable */
327 if (atomic_read(&pci_dev->enable_cnt)) 327 retval = __pci_reenable_device(pci_dev);
328 retval = __pci_enable_device(pci_dev);
329 /* if the device was busmaster before the suspend, make it busmaster again */ 328 /* if the device was busmaster before the suspend, make it busmaster again */
330 if (pci_dev->is_busmaster) 329 if (pci_dev->is_busmaster)
331 pci_set_master(pci_dev); 330 pci_set_master(pci_dev);
@@ -422,7 +421,8 @@ static struct kobj_type pci_driver_kobj_type = {
422 * If no error occurred, the driver remains registered even if 421 * If no error occurred, the driver remains registered even if
423 * no device was claimed during registration. 422 * no device was claimed during registration.
424 */ 423 */
425int __pci_register_driver(struct pci_driver *drv, struct module *owner) 424int __pci_register_driver(struct pci_driver *drv, struct module *owner,
425 const char *mod_name)
426{ 426{
427 int error; 427 int error;
428 428
@@ -430,6 +430,7 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner)
430 drv->driver.name = drv->name; 430 drv->driver.name = drv->name;
431 drv->driver.bus = &pci_bus_type; 431 drv->driver.bus = &pci_bus_type;
432 drv->driver.owner = owner; 432 drv->driver.owner = owner;
433 drv->driver.mod_name = mod_name;
433 drv->driver.kobj.ktype = &pci_driver_kobj_type; 434 drv->driver.kobj.ktype = &pci_driver_kobj_type;
434 435
435 if (pci_multithread_probe) 436 if (pci_multithread_probe)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 206c834d263a..8b44cff2c176 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -392,6 +392,14 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
392 if (state > PCI_D3hot) 392 if (state > PCI_D3hot)
393 state = PCI_D3hot; 393 state = PCI_D3hot;
394 394
395 /*
396 * If the device or the parent bridge can't support PCI PM, ignore
397 * the request if we're doing anything besides putting it into D0
398 * (which would only happen on boot).
399 */
400 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
401 return 0;
402
395 /* Validate current state: 403 /* Validate current state:
396 * Can enter D0 from any state, but if we can only go deeper 404 * Can enter D0 from any state, but if we can only go deeper
397 * to sleep if we're already in a low power state 405 * to sleep if we're already in a low power state
@@ -403,13 +411,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
403 } else if (dev->current_state == state) 411 } else if (dev->current_state == state)
404 return 0; /* we're already there */ 412 return 0; /* we're already there */
405 413
406 /*
407 * If the device or the parent bridge can't support PCI PM, ignore
408 * the request if we're doing anything besides putting it into D0
409 * (which would only happen on boot).
410 */
411 if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
412 return 0;
413 414
414 /* find PCI PM capability in list */ 415 /* find PCI PM capability in list */
415 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 416 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
@@ -633,8 +634,6 @@ pci_save_state(struct pci_dev *dev)
633 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 634 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
634 if ((i = pci_save_msi_state(dev)) != 0) 635 if ((i = pci_save_msi_state(dev)) != 0)
635 return i; 636 return i;
636 if ((i = pci_save_msix_state(dev)) != 0)
637 return i;
638 if ((i = pci_save_pcie_state(dev)) != 0) 637 if ((i = pci_save_pcie_state(dev)) != 0)
639 return i; 638 return i;
640 if ((i = pci_save_pcix_state(dev)) != 0) 639 if ((i = pci_save_pcix_state(dev)) != 0)
@@ -672,22 +671,11 @@ pci_restore_state(struct pci_dev *dev)
672 } 671 }
673 pci_restore_pcix_state(dev); 672 pci_restore_pcix_state(dev);
674 pci_restore_msi_state(dev); 673 pci_restore_msi_state(dev);
675 pci_restore_msix_state(dev); 674
676 return 0; 675 return 0;
677} 676}
678 677
679/** 678static int do_pci_enable_device(struct pci_dev *dev, int bars)
680 * pci_enable_device_bars - Initialize some of a device for use
681 * @dev: PCI device to be initialized
682 * @bars: bitmask of BAR's that must be configured
683 *
684 * Initialize device before it's used by a driver. Ask low-level code
685 * to enable selected I/O and memory resources. Wake up the device if it
686 * was suspended. Beware, this function can fail.
687 */
688
689int
690pci_enable_device_bars(struct pci_dev *dev, int bars)
691{ 679{
692 int err; 680 int err;
693 681
@@ -697,30 +685,47 @@ pci_enable_device_bars(struct pci_dev *dev, int bars)
697 err = pcibios_enable_device(dev, bars); 685 err = pcibios_enable_device(dev, bars);
698 if (err < 0) 686 if (err < 0)
699 return err; 687 return err;
688 pci_fixup_device(pci_fixup_enable, dev);
689
690 return 0;
691}
692
693/**
694 * __pci_reenable_device - Resume abandoned device
695 * @dev: PCI device to be resumed
696 *
697 * Note this function is a backend of pci_default_resume and is not supposed
698 * to be called by normal code, write proper resume handler and use it instead.
699 */
700int
701__pci_reenable_device(struct pci_dev *dev)
702{
703 if (atomic_read(&dev->enable_cnt))
704 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
700 return 0; 705 return 0;
701} 706}
702 707
703/** 708/**
704 * __pci_enable_device - Initialize device before it's used by a driver. 709 * pci_enable_device_bars - Initialize some of a device for use
705 * @dev: PCI device to be initialized 710 * @dev: PCI device to be initialized
711 * @bars: bitmask of BAR's that must be configured
706 * 712 *
707 * Initialize device before it's used by a driver. Ask low-level code 713 * Initialize device before it's used by a driver. Ask low-level code
708 * to enable I/O and memory. Wake up the device if it was suspended. 714 * to enable selected I/O and memory resources. Wake up the device if it
709 * Beware, this function can fail. 715 * was suspended. Beware, this function can fail.
710 *
711 * Note this function is a backend and is not supposed to be called by
712 * normal code, use pci_enable_device() instead.
713 */ 716 */
714int 717int
715__pci_enable_device(struct pci_dev *dev) 718pci_enable_device_bars(struct pci_dev *dev, int bars)
716{ 719{
717 int err; 720 int err;
718 721
719 err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 722 if (atomic_add_return(1, &dev->enable_cnt) > 1)
720 if (err) 723 return 0; /* already enabled */
721 return err; 724
722 pci_fixup_device(pci_fixup_enable, dev); 725 err = do_pci_enable_device(dev, bars);
723 return 0; 726 if (err < 0)
727 atomic_dec(&dev->enable_cnt);
728 return err;
724} 729}
725 730
726/** 731/**
@@ -736,13 +741,105 @@ __pci_enable_device(struct pci_dev *dev)
736 */ 741 */
737int pci_enable_device(struct pci_dev *dev) 742int pci_enable_device(struct pci_dev *dev)
738{ 743{
739 int result; 744 return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
740 if (atomic_add_return(1, &dev->enable_cnt) > 1) 745}
741 return 0; /* already enabled */ 746
742 result = __pci_enable_device(dev); 747/*
743 if (result < 0) 748 * Managed PCI resources. This manages device on/off, intx/msi/msix
744 atomic_dec(&dev->enable_cnt); 749 * on/off and BAR regions. pci_dev itself records msi/msix status, so
745 return result; 750 * there's no need to track it separately. pci_devres is initialized
751 * when a device is enabled using managed PCI device enable interface.
752 */
753struct pci_devres {
754 unsigned int disable:1;
755 unsigned int orig_intx:1;
756 unsigned int restore_intx:1;
757 u32 region_mask;
758};
759
760static void pcim_release(struct device *gendev, void *res)
761{
762 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
763 struct pci_devres *this = res;
764 int i;
765
766 if (dev->msi_enabled)
767 pci_disable_msi(dev);
768 if (dev->msix_enabled)
769 pci_disable_msix(dev);
770
771 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
772 if (this->region_mask & (1 << i))
773 pci_release_region(dev, i);
774
775 if (this->restore_intx)
776 pci_intx(dev, this->orig_intx);
777
778 if (this->disable)
779 pci_disable_device(dev);
780}
781
782static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
783{
784 struct pci_devres *dr, *new_dr;
785
786 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
787 if (dr)
788 return dr;
789
790 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
791 if (!new_dr)
792 return NULL;
793 return devres_get(&pdev->dev, new_dr, NULL, NULL);
794}
795
796static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
797{
798 if (pci_is_managed(pdev))
799 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
800 return NULL;
801}
802
803/**
804 * pcim_enable_device - Managed pci_enable_device()
805 * @pdev: PCI device to be initialized
806 *
807 * Managed pci_enable_device().
808 */
809int pcim_enable_device(struct pci_dev *pdev)
810{
811 struct pci_devres *dr;
812 int rc;
813
814 dr = get_pci_dr(pdev);
815 if (unlikely(!dr))
816 return -ENOMEM;
817 WARN_ON(!!dr->disable);
818
819 rc = pci_enable_device(pdev);
820 if (!rc) {
821 pdev->is_managed = 1;
822 dr->disable = 1;
823 }
824 return rc;
825}
826
827/**
828 * pcim_pin_device - Pin managed PCI device
829 * @pdev: PCI device to pin
830 *
831 * Pin managed PCI device @pdev. Pinned device won't be disabled on
832 * driver detach. @pdev must have been enabled with
833 * pcim_enable_device().
834 */
835void pcim_pin_device(struct pci_dev *pdev)
836{
837 struct pci_devres *dr;
838
839 dr = find_pci_dr(pdev);
840 WARN_ON(!dr || !dr->disable);
841 if (dr)
842 dr->disable = 0;
746} 843}
747 844
748/** 845/**
@@ -768,8 +865,13 @@ void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
768void 865void
769pci_disable_device(struct pci_dev *dev) 866pci_disable_device(struct pci_dev *dev)
770{ 867{
868 struct pci_devres *dr;
771 u16 pci_command; 869 u16 pci_command;
772 870
871 dr = find_pci_dr(dev);
872 if (dr)
873 dr->disable = 0;
874
773 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 875 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
774 return; 876 return;
775 877
@@ -868,6 +970,8 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
868 */ 970 */
869void pci_release_region(struct pci_dev *pdev, int bar) 971void pci_release_region(struct pci_dev *pdev, int bar)
870{ 972{
973 struct pci_devres *dr;
974
871 if (pci_resource_len(pdev, bar) == 0) 975 if (pci_resource_len(pdev, bar) == 0)
872 return; 976 return;
873 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 977 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
@@ -876,6 +980,10 @@ void pci_release_region(struct pci_dev *pdev, int bar)
876 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 980 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
877 release_mem_region(pci_resource_start(pdev, bar), 981 release_mem_region(pci_resource_start(pdev, bar),
878 pci_resource_len(pdev, bar)); 982 pci_resource_len(pdev, bar));
983
984 dr = find_pci_dr(pdev);
985 if (dr)
986 dr->region_mask &= ~(1 << bar);
879} 987}
880 988
881/** 989/**
@@ -894,6 +1002,8 @@ void pci_release_region(struct pci_dev *pdev, int bar)
894 */ 1002 */
895int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1003int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
896{ 1004{
1005 struct pci_devres *dr;
1006
897 if (pci_resource_len(pdev, bar) == 0) 1007 if (pci_resource_len(pdev, bar) == 0)
898 return 0; 1008 return 0;
899 1009
@@ -907,7 +1017,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
907 pci_resource_len(pdev, bar), res_name)) 1017 pci_resource_len(pdev, bar), res_name))
908 goto err_out; 1018 goto err_out;
909 } 1019 }
910 1020
1021 dr = find_pci_dr(pdev);
1022 if (dr)
1023 dr->region_mask |= 1 << bar;
1024
911 return 0; 1025 return 0;
912 1026
913err_out: 1027err_out:
@@ -921,6 +1035,47 @@ err_out:
921 return -EBUSY; 1035 return -EBUSY;
922} 1036}
923 1037
1038/**
1039 * pci_release_selected_regions - Release selected PCI I/O and memory resources
1040 * @pdev: PCI device whose resources were previously reserved
1041 * @bars: Bitmask of BARs to be released
1042 *
1043 * Release selected PCI I/O and memory resources previously reserved.
1044 * Call this function only after all use of the PCI regions has ceased.
1045 */
1046void pci_release_selected_regions(struct pci_dev *pdev, int bars)
1047{
1048 int i;
1049
1050 for (i = 0; i < 6; i++)
1051 if (bars & (1 << i))
1052 pci_release_region(pdev, i);
1053}
1054
1055/**
1056 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
1057 * @pdev: PCI device whose resources are to be reserved
1058 * @bars: Bitmask of BARs to be requested
1059 * @res_name: Name to be associated with resource
1060 */
1061int pci_request_selected_regions(struct pci_dev *pdev, int bars,
1062 const char *res_name)
1063{
1064 int i;
1065
1066 for (i = 0; i < 6; i++)
1067 if (bars & (1 << i))
1068 if(pci_request_region(pdev, i, res_name))
1069 goto err_out;
1070 return 0;
1071
1072err_out:
1073 while(--i >= 0)
1074 if (bars & (1 << i))
1075 pci_release_region(pdev, i);
1076
1077 return -EBUSY;
1078}
924 1079
925/** 1080/**
926 * pci_release_regions - Release reserved PCI I/O and memory resources 1081 * pci_release_regions - Release reserved PCI I/O and memory resources
@@ -933,10 +1088,7 @@ err_out:
933 1088
934void pci_release_regions(struct pci_dev *pdev) 1089void pci_release_regions(struct pci_dev *pdev)
935{ 1090{
936 int i; 1091 pci_release_selected_regions(pdev, (1 << 6) - 1);
937
938 for (i = 0; i < 6; i++)
939 pci_release_region(pdev, i);
940} 1092}
941 1093
942/** 1094/**
@@ -954,18 +1106,7 @@ void pci_release_regions(struct pci_dev *pdev)
954 */ 1106 */
955int pci_request_regions(struct pci_dev *pdev, const char *res_name) 1107int pci_request_regions(struct pci_dev *pdev, const char *res_name)
956{ 1108{
957 int i; 1109 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
958
959 for (i = 0; i < 6; i++)
960 if(pci_request_region(pdev, i, res_name))
961 goto err_out;
962 return 0;
963
964err_out:
965 while(--i >= 0)
966 pci_release_region(pdev, i);
967
968 return -EBUSY;
969} 1110}
970 1111
971/** 1112/**
@@ -1118,7 +1259,15 @@ pci_intx(struct pci_dev *pdev, int enable)
1118 } 1259 }
1119 1260
1120 if (new != pci_command) { 1261 if (new != pci_command) {
1262 struct pci_devres *dr;
1263
1121 pci_write_config_word(pdev, PCI_COMMAND, new); 1264 pci_write_config_word(pdev, PCI_COMMAND, new);
1265
1266 dr = find_pci_dr(pdev);
1267 if (dr && !dr->restore_intx) {
1268 dr->restore_intx = 1;
1269 dr->orig_intx = !enable;
1270 }
1122 } 1271 }
1123} 1272}
1124 1273
@@ -1148,7 +1297,23 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
1148 return 0; 1297 return 0;
1149} 1298}
1150#endif 1299#endif
1151 1300
1301/**
1302 * pci_select_bars - Make BAR mask from the type of resource
1303 * @pdev: the PCI device for which BAR mask is made
1304 * @flags: resource type mask to be selected
1305 *
1306 * This helper routine makes bar mask from the type of resource.
1307 */
1308int pci_select_bars(struct pci_dev *dev, unsigned long flags)
1309{
1310 int i, bars = 0;
1311 for (i = 0; i < PCI_NUM_RESOURCES; i++)
1312 if (pci_resource_flags(dev, i) & flags)
1313 bars |= (1 << i);
1314 return bars;
1315}
1316
1152static int __devinit pci_init(void) 1317static int __devinit pci_init(void)
1153{ 1318{
1154 struct pci_dev *dev = NULL; 1319 struct pci_dev *dev = NULL;
@@ -1181,15 +1346,11 @@ early_param("pci", pci_setup);
1181 1346
1182device_initcall(pci_init); 1347device_initcall(pci_init);
1183 1348
1184#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
1185/* FIXME: Some boxes have multiple ISA bridges! */
1186struct pci_dev *isa_bridge;
1187EXPORT_SYMBOL(isa_bridge);
1188#endif
1189
1190EXPORT_SYMBOL_GPL(pci_restore_bars); 1349EXPORT_SYMBOL_GPL(pci_restore_bars);
1191EXPORT_SYMBOL(pci_enable_device_bars); 1350EXPORT_SYMBOL(pci_enable_device_bars);
1192EXPORT_SYMBOL(pci_enable_device); 1351EXPORT_SYMBOL(pci_enable_device);
1352EXPORT_SYMBOL(pcim_enable_device);
1353EXPORT_SYMBOL(pcim_pin_device);
1193EXPORT_SYMBOL(pci_disable_device); 1354EXPORT_SYMBOL(pci_disable_device);
1194EXPORT_SYMBOL(pci_find_capability); 1355EXPORT_SYMBOL(pci_find_capability);
1195EXPORT_SYMBOL(pci_bus_find_capability); 1356EXPORT_SYMBOL(pci_bus_find_capability);
@@ -1197,6 +1358,8 @@ EXPORT_SYMBOL(pci_release_regions);
1197EXPORT_SYMBOL(pci_request_regions); 1358EXPORT_SYMBOL(pci_request_regions);
1198EXPORT_SYMBOL(pci_release_region); 1359EXPORT_SYMBOL(pci_release_region);
1199EXPORT_SYMBOL(pci_request_region); 1360EXPORT_SYMBOL(pci_request_region);
1361EXPORT_SYMBOL(pci_release_selected_regions);
1362EXPORT_SYMBOL(pci_request_selected_regions);
1200EXPORT_SYMBOL(pci_set_master); 1363EXPORT_SYMBOL(pci_set_master);
1201EXPORT_SYMBOL(pci_set_mwi); 1364EXPORT_SYMBOL(pci_set_mwi);
1202EXPORT_SYMBOL(pci_clear_mwi); 1365EXPORT_SYMBOL(pci_clear_mwi);
@@ -1205,13 +1368,10 @@ EXPORT_SYMBOL(pci_set_dma_mask);
1205EXPORT_SYMBOL(pci_set_consistent_dma_mask); 1368EXPORT_SYMBOL(pci_set_consistent_dma_mask);
1206EXPORT_SYMBOL(pci_assign_resource); 1369EXPORT_SYMBOL(pci_assign_resource);
1207EXPORT_SYMBOL(pci_find_parent_resource); 1370EXPORT_SYMBOL(pci_find_parent_resource);
1371EXPORT_SYMBOL(pci_select_bars);
1208 1372
1209EXPORT_SYMBOL(pci_set_power_state); 1373EXPORT_SYMBOL(pci_set_power_state);
1210EXPORT_SYMBOL(pci_save_state); 1374EXPORT_SYMBOL(pci_save_state);
1211EXPORT_SYMBOL(pci_restore_state); 1375EXPORT_SYMBOL(pci_restore_state);
1212EXPORT_SYMBOL(pci_enable_wake); 1376EXPORT_SYMBOL(pci_enable_wake);
1213 1377
1214/* Quirk info */
1215
1216EXPORT_SYMBOL(isa_dma_bridge_buggy);
1217EXPORT_SYMBOL(pci_pci_problems);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 398852f526a6..a4f2d580625e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,6 +1,6 @@
1/* Functions internal to the PCI core code */ 1/* Functions internal to the PCI core code */
2 2
3extern int __must_check __pci_enable_device(struct pci_dev *); 3extern int __must_check __pci_reenable_device(struct pci_dev *);
4extern int pci_uevent(struct device *dev, char **envp, int num_envp, 4extern int pci_uevent(struct device *dev, char **envp, int num_envp,
5 char *buffer, int buffer_size); 5 char *buffer, int buffer_size);
6extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); 6extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
@@ -43,12 +43,8 @@ extern void pci_remove_legacy_files(struct pci_bus *bus);
43/* Lock for read/write access to pci device and bus lists */ 43/* Lock for read/write access to pci device and bus lists */
44extern struct rw_semaphore pci_bus_sem; 44extern struct rw_semaphore pci_bus_sem;
45 45
46#ifdef CONFIG_PCI_MSI
47extern int pci_msi_quirk;
48#else
49#define pci_msi_quirk 0
50#endif
51extern unsigned int pci_pm_d3_delay; 46extern unsigned int pci_pm_d3_delay;
47
52#ifdef CONFIG_PCI_MSI 48#ifdef CONFIG_PCI_MSI
53void disable_msi_mode(struct pci_dev *dev, int pos, int type); 49void disable_msi_mode(struct pci_dev *dev, int pos, int type);
54void pci_no_msi(void); 50void pci_no_msi(void);
@@ -56,17 +52,15 @@ void pci_no_msi(void);
56static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { } 52static inline void disable_msi_mode(struct pci_dev *dev, int pos, int type) { }
57static inline void pci_no_msi(void) { } 53static inline void pci_no_msi(void) { }
58#endif 54#endif
55
59#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM) 56#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PM)
60int pci_save_msi_state(struct pci_dev *dev); 57int pci_save_msi_state(struct pci_dev *dev);
61int pci_save_msix_state(struct pci_dev *dev);
62void pci_restore_msi_state(struct pci_dev *dev); 58void pci_restore_msi_state(struct pci_dev *dev);
63void pci_restore_msix_state(struct pci_dev *dev);
64#else 59#else
65static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; } 60static inline int pci_save_msi_state(struct pci_dev *dev) { return 0; }
66static inline int pci_save_msix_state(struct pci_dev *dev) { return 0; }
67static inline void pci_restore_msi_state(struct pci_dev *dev) {} 61static inline void pci_restore_msi_state(struct pci_dev *dev) {}
68static inline void pci_restore_msix_state(struct pci_dev *dev) {}
69#endif 62#endif
63
70static inline int pci_no_d1d2(struct pci_dev *dev) 64static inline int pci_no_d1d2(struct pci_dev *dev)
71{ 65{
72 unsigned int parent_dstates = 0; 66 unsigned int parent_dstates = 0;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0e0401dd02cb..2fe1d690eb13 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -144,6 +144,32 @@ static u32 pci_size(u32 base, u32 maxbase, u32 mask)
144 return size; 144 return size;
145} 145}
146 146
147static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
148{
149 u64 size = mask & maxbase; /* Find the significant bits */
150 if (!size)
151 return 0;
152
153 /* Get the lowest of them to find the decode size, and
154 from that the extent. */
155 size = (size & ~(size-1)) - 1;
156
157 /* base == maxbase can be valid only if the BAR has
158 already been programmed with all 1s. */
159 if (base == maxbase && ((base | size) & mask) != mask)
160 return 0;
161
162 return size;
163}
164
165static inline int is_64bit_memory(u32 mask)
166{
167 if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
168 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
169 return 1;
170 return 0;
171}
172
147static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 173static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
148{ 174{
149 unsigned int pos, reg, next; 175 unsigned int pos, reg, next;
@@ -151,6 +177,10 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
151 struct resource *res; 177 struct resource *res;
152 178
153 for(pos=0; pos<howmany; pos = next) { 179 for(pos=0; pos<howmany; pos = next) {
180 u64 l64;
181 u64 sz64;
182 u32 raw_sz;
183
154 next = pos+1; 184 next = pos+1;
155 res = &dev->resource[pos]; 185 res = &dev->resource[pos];
156 res->name = pci_name(dev); 186 res->name = pci_name(dev);
@@ -163,9 +193,16 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
163 continue; 193 continue;
164 if (l == 0xffffffff) 194 if (l == 0xffffffff)
165 l = 0; 195 l = 0;
166 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { 196 raw_sz = sz;
197 if ((l & PCI_BASE_ADDRESS_SPACE) ==
198 PCI_BASE_ADDRESS_SPACE_MEMORY) {
167 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); 199 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
168 if (!sz) 200 /*
201 * For 64bit prefetchable memory sz could be 0, if the
202 * real size is bigger than 4G, so we need to check
203 * szhi for that.
204 */
205 if (!is_64bit_memory(l) && !sz)
169 continue; 206 continue;
170 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 207 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
171 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; 208 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
@@ -178,30 +215,36 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
178 } 215 }
179 res->end = res->start + (unsigned long) sz; 216 res->end = res->start + (unsigned long) sz;
180 res->flags |= pci_calc_resource_flags(l); 217 res->flags |= pci_calc_resource_flags(l);
181 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) 218 if (is_64bit_memory(l)) {
182 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) {
183 u32 szhi, lhi; 219 u32 szhi, lhi;
220
184 pci_read_config_dword(dev, reg+4, &lhi); 221 pci_read_config_dword(dev, reg+4, &lhi);
185 pci_write_config_dword(dev, reg+4, ~0); 222 pci_write_config_dword(dev, reg+4, ~0);
186 pci_read_config_dword(dev, reg+4, &szhi); 223 pci_read_config_dword(dev, reg+4, &szhi);
187 pci_write_config_dword(dev, reg+4, lhi); 224 pci_write_config_dword(dev, reg+4, lhi);
188 szhi = pci_size(lhi, szhi, 0xffffffff); 225 sz64 = ((u64)szhi << 32) | raw_sz;
226 l64 = ((u64)lhi << 32) | l;
227 sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
189 next++; 228 next++;
190#if BITS_PER_LONG == 64 229#if BITS_PER_LONG == 64
191 res->start |= ((unsigned long) lhi) << 32; 230 if (!sz64) {
192 res->end = res->start + sz; 231 res->start = 0;
193 if (szhi) { 232 res->end = 0;
194 /* This BAR needs > 4GB? Wow. */ 233 res->flags = 0;
195 res->end |= (unsigned long)szhi<<32; 234 continue;
196 } 235 }
236 res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
237 res->end = res->start + sz64;
197#else 238#else
198 if (szhi) { 239 if (sz64 > 0x100000000ULL) {
199 printk(KERN_ERR "PCI: Unable to handle 64-bit BAR for device %s\n", pci_name(dev)); 240 printk(KERN_ERR "PCI: Unable to handle 64-bit "
241 "BAR for device %s\n", pci_name(dev));
200 res->start = 0; 242 res->start = 0;
201 res->flags = 0; 243 res->flags = 0;
202 } else if (lhi) { 244 } else if (lhi) {
203 /* 64-bit wide address, treat as disabled */ 245 /* 64-bit wide address, treat as disabled */
204 pci_write_config_dword(dev, reg, l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); 246 pci_write_config_dword(dev, reg,
247 l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
205 pci_write_config_dword(dev, reg+4, 0); 248 pci_write_config_dword(dev, reg+4, 0);
206 res->start = 0; 249 res->start = 0;
207 res->end = sz; 250 res->end = sz;
@@ -902,7 +945,6 @@ pci_scan_single_device(struct pci_bus *bus, int devfn)
902 return NULL; 945 return NULL;
903 946
904 pci_device_add(dev, bus); 947 pci_device_add(dev, bus);
905 pci_scan_msi_device(dev);
906 948
907 return dev; 949 return dev;
908} 950}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 16945c2ba2ca..1e6eda25c0d8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -61,7 +61,8 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p
61 61
62 This appears to be BIOS not version dependent. So presumably there is a 62 This appears to be BIOS not version dependent. So presumably there is a
63 chipset level fix */ 63 chipset level fix */
64int isa_dma_bridge_buggy; /* Exported */ 64int isa_dma_bridge_buggy;
65EXPORT_SYMBOL(isa_dma_bridge_buggy);
65 66
66static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) 67static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
67{ 68{
@@ -83,6 +84,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
83DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs ); 84DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs );
84 85
85int pci_pci_problems; 86int pci_pci_problems;
87EXPORT_SYMBOL(pci_pci_problems);
86 88
87/* 89/*
88 * Chipsets where PCI->PCI transfers vanish or hang 90 * Chipsets where PCI->PCI transfers vanish or hang
@@ -94,6 +96,8 @@ static void __devinit quirk_nopcipci(struct pci_dev *dev)
94 pci_pci_problems |= PCIPCI_FAIL; 96 pci_pci_problems |= PCIPCI_FAIL;
95 } 97 }
96} 98}
99DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci );
100DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci );
97 101
98static void __devinit quirk_nopciamd(struct pci_dev *dev) 102static void __devinit quirk_nopciamd(struct pci_dev *dev)
99{ 103{
@@ -105,9 +109,6 @@ static void __devinit quirk_nopciamd(struct pci_dev *dev)
105 pci_pci_problems |= PCIAGP_FAIL; 109 pci_pci_problems |= PCIAGP_FAIL;
106 } 110 }
107} 111}
108
109DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci );
110DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci );
111DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd ); 112DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd );
112 113
113/* 114/*
@@ -661,9 +662,11 @@ static void quirk_via_bridge(struct pci_dev *dev)
661 /* See what bridge we have and find the device ranges */ 662 /* See what bridge we have and find the device ranges */
662 switch (dev->device) { 663 switch (dev->device) {
663 case PCI_DEVICE_ID_VIA_82C686: 664 case PCI_DEVICE_ID_VIA_82C686:
664 /* 82C686 is special */ 665 /* The VT82C686 is special, it attaches to PCI and can have
665 via_vlink_dev_lo = 7; 666 any device number. All its subdevices are functions of
666 via_vlink_dev_hi = 7; 667 that single device. */
668 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
669 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
667 break; 670 break;
668 case PCI_DEVICE_ID_VIA_8237: 671 case PCI_DEVICE_ID_VIA_8237:
669 case PCI_DEVICE_ID_VIA_8237A: 672 case PCI_DEVICE_ID_VIA_8237A:
@@ -868,7 +871,7 @@ static void __devinit quirk_sb600_sata(struct pci_dev *pdev)
868 pci_write_config_byte(pdev, 0xa, 6); 871 pci_write_config_byte(pdev, 0xa, 6);
869 pci_write_config_byte(pdev, 0x40, tmp); 872 pci_write_config_byte(pdev, 0x40, tmp);
870 873
871 pdev->class = 0x010601; 874 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
872 } 875 }
873} 876}
874DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_sb600_sata); 877DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_sb600_sata);
@@ -974,52 +977,51 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
974 case 0x1626: /* L3C notebook */ 977 case 0x1626: /* L3C notebook */
975 asus_hides_smbus = 1; 978 asus_hides_smbus = 1;
976 } 979 }
977 if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) 980 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
978 switch(dev->subsystem_device) { 981 switch(dev->subsystem_device) {
979 case 0x80b1: /* P4GE-V */ 982 case 0x80b1: /* P4GE-V */
980 case 0x80b2: /* P4PE */ 983 case 0x80b2: /* P4PE */
981 case 0x8093: /* P4B533-V */ 984 case 0x8093: /* P4B533-V */
982 asus_hides_smbus = 1; 985 asus_hides_smbus = 1;
983 } 986 }
984 if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) 987 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
985 switch(dev->subsystem_device) { 988 switch(dev->subsystem_device) {
986 case 0x8030: /* P4T533 */ 989 case 0x8030: /* P4T533 */
987 asus_hides_smbus = 1; 990 asus_hides_smbus = 1;
988 } 991 }
989 if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) 992 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
990 switch (dev->subsystem_device) { 993 switch (dev->subsystem_device) {
991 case 0x8070: /* P4G8X Deluxe */ 994 case 0x8070: /* P4G8X Deluxe */
992 asus_hides_smbus = 1; 995 asus_hides_smbus = 1;
993 } 996 }
994 if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) 997 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
995 switch (dev->subsystem_device) { 998 switch (dev->subsystem_device) {
996 case 0x80c9: /* PU-DLS */ 999 case 0x80c9: /* PU-DLS */
997 asus_hides_smbus = 1; 1000 asus_hides_smbus = 1;
998 } 1001 }
999 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) 1002 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1000 switch (dev->subsystem_device) { 1003 switch (dev->subsystem_device) {
1001 case 0x1751: /* M2N notebook */ 1004 case 0x1751: /* M2N notebook */
1002 case 0x1821: /* M5N notebook */ 1005 case 0x1821: /* M5N notebook */
1003 asus_hides_smbus = 1; 1006 asus_hides_smbus = 1;
1004 } 1007 }
1005 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1008 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1006 switch (dev->subsystem_device) { 1009 switch (dev->subsystem_device) {
1007 case 0x184b: /* W1N notebook */ 1010 case 0x184b: /* W1N notebook */
1008 case 0x186a: /* M6Ne notebook */ 1011 case 0x186a: /* M6Ne notebook */
1009 asus_hides_smbus = 1; 1012 asus_hides_smbus = 1;
1010 } 1013 }
1011 if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) 1014 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1012 switch (dev->subsystem_device) { 1015 switch (dev->subsystem_device) {
1013 case 0x80f2: /* P4P800-X */ 1016 case 0x80f2: /* P4P800-X */
1014 asus_hides_smbus = 1; 1017 asus_hides_smbus = 1;
1015 } 1018 }
1016 if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { 1019 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1017 switch (dev->subsystem_device) { 1020 switch (dev->subsystem_device) {
1018 case 0x1882: /* M6V notebook */ 1021 case 0x1882: /* M6V notebook */
1019 case 0x1977: /* A6VA notebook */ 1022 case 0x1977: /* A6VA notebook */
1020 asus_hides_smbus = 1; 1023 asus_hides_smbus = 1;
1021 } 1024 }
1022 }
1023 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { 1025 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1024 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1026 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1025 switch(dev->subsystem_device) { 1027 switch(dev->subsystem_device) {
@@ -1027,25 +1029,24 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
1027 case 0x0890: /* HP Compaq nc6000 */ 1029 case 0x0890: /* HP Compaq nc6000 */
1028 asus_hides_smbus = 1; 1030 asus_hides_smbus = 1;
1029 } 1031 }
1030 if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) 1032 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1031 switch (dev->subsystem_device) { 1033 switch (dev->subsystem_device) {
1032 case 0x12bc: /* HP D330L */ 1034 case 0x12bc: /* HP D330L */
1033 case 0x12bd: /* HP D530 */ 1035 case 0x12bd: /* HP D530 */
1034 asus_hides_smbus = 1; 1036 asus_hides_smbus = 1;
1035 } 1037 }
1036 if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { 1038 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1037 switch (dev->subsystem_device) { 1039 switch (dev->subsystem_device) {
1038 case 0x099c: /* HP Compaq nx6110 */ 1040 case 0x099c: /* HP Compaq nx6110 */
1039 asus_hides_smbus = 1; 1041 asus_hides_smbus = 1;
1040 } 1042 }
1041 }
1042 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) { 1043 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) {
1043 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) 1044 if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1044 switch(dev->subsystem_device) { 1045 switch(dev->subsystem_device) {
1045 case 0x0001: /* Toshiba Satellite A40 */ 1046 case 0x0001: /* Toshiba Satellite A40 */
1046 asus_hides_smbus = 1; 1047 asus_hides_smbus = 1;
1047 } 1048 }
1048 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) 1049 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1049 switch(dev->subsystem_device) { 1050 switch(dev->subsystem_device) {
1050 case 0x0001: /* Toshiba Tecra M2 */ 1051 case 0x0001: /* Toshiba Tecra M2 */
1051 asus_hides_smbus = 1; 1052 asus_hides_smbus = 1;
@@ -1134,6 +1135,14 @@ static void quirk_sis_96x_smbus(struct pci_dev *dev)
1134 pci_write_config_byte(dev, 0x77, val & ~0x10); 1135 pci_write_config_byte(dev, 0x77, val & ~0x10);
1135 } 1136 }
1136} 1137}
1138DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1139DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1140DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1141DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1142DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1143DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1144DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1145DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1137 1146
1138/* 1147/*
1139 * ... This is further complicated by the fact that some SiS96x south 1148 * ... This is further complicated by the fact that some SiS96x south
@@ -1143,8 +1152,6 @@ static void quirk_sis_96x_smbus(struct pci_dev *dev)
1143 * 1152 *
1144 * We can also enable the sis96x bit in the discovery register.. 1153 * We can also enable the sis96x bit in the discovery register..
1145 */ 1154 */
1146static int __devinitdata sis_96x_compatible = 0;
1147
1148#define SIS_DETECT_REGISTER 0x40 1155#define SIS_DETECT_REGISTER 0x40
1149 1156
1150static void quirk_sis_503(struct pci_dev *dev) 1157static void quirk_sis_503(struct pci_dev *dev)
@@ -1160,9 +1167,6 @@ static void quirk_sis_503(struct pci_dev *dev)
1160 return; 1167 return;
1161 } 1168 }
1162 1169
1163 /* Make people aware that we changed the config.. */
1164 printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
1165
1166 /* 1170 /*
1167 * Ok, it now shows up as a 96x.. run the 96x quirk by 1171 * Ok, it now shows up as a 96x.. run the 96x quirk by
1168 * hand in case it has already been processed. 1172 * hand in case it has already been processed.
@@ -1171,20 +1175,10 @@ static void quirk_sis_503(struct pci_dev *dev)
1171 dev->device = devid; 1175 dev->device = devid;
1172 quirk_sis_96x_smbus(dev); 1176 quirk_sis_96x_smbus(dev);
1173} 1177}
1174
1175static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
1176{
1177 sis_96x_compatible = 1;
1178}
1179DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_645, quirk_sis_96x_compatible );
1180DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_646, quirk_sis_96x_compatible );
1181DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_648, quirk_sis_96x_compatible );
1182DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_650, quirk_sis_96x_compatible );
1183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_651, quirk_sis_96x_compatible );
1184DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_735, quirk_sis_96x_compatible );
1185
1186DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); 1178DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 );
1187DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); 1179DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 );
1180
1181
1188/* 1182/*
1189 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller 1183 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
1190 * and MC97 modem controller are disabled when a second PCI soundcard is 1184 * and MC97 modem controller are disabled when a second PCI soundcard is
@@ -1215,21 +1209,8 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
1215 } 1209 }
1216} 1210}
1217DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); 1211DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc );
1218
1219
1220DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1221DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1222DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1223DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1224
1225DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); 1212DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc );
1226 1213
1227
1228DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus );
1229DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus );
1230DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
1231DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
1232
1233#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) 1214#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1234 1215
1235/* 1216/*
@@ -1260,8 +1241,8 @@ static void quirk_jmicron_dualfn(struct pci_dev *pdev)
1260 pci_read_config_dword(pdev, 0x40, &conf); 1241 pci_read_config_dword(pdev, 0x40, &conf);
1261 /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ 1242 /* Enable dual function mode, AHCI on fn 0, IDE fn1 */
1262 /* Set the class codes correctly and then direct IDE 0 */ 1243 /* Set the class codes correctly and then direct IDE 0 */
1263 conf &= ~0x000F0200; /* Clear bit 9 and 16-19 */ 1244 conf &= ~0x000FF200; /* Clear bit 9 and 12-19 */
1264 conf |= 0x00C20002; /* Set bit 1, 17, 22, 23 */ 1245 conf |= 0x00C2A102; /* Set 1, 8, 13, 15, 17, 22, 23 */
1265 pci_write_config_dword(pdev, 0x40, conf); 1246 pci_write_config_dword(pdev, 0x40, conf);
1266 1247
1267 /* Reconfigure so that the PCI scanner discovers the 1248 /* Reconfigure so that the PCI scanner discovers the
@@ -1274,7 +1255,6 @@ static void quirk_jmicron_dualfn(struct pci_dev *pdev)
1274 break; 1255 break;
1275 } 1256 }
1276} 1257}
1277
1278DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); 1258DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn);
1279DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); 1259DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn);
1280 1260
@@ -1418,6 +1398,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_co
1418 1398
1419 1399
1420int pcie_mch_quirk; 1400int pcie_mch_quirk;
1401EXPORT_SYMBOL(pcie_mch_quirk);
1421 1402
1422static void __devinit quirk_pcie_mch(struct pci_dev *pdev) 1403static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
1423{ 1404{
@@ -1479,6 +1460,24 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1479DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); 1460DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1480DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); 1461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1481 1462
1463/*
1464 * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
1465 * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
1466 * Re-allocate the region if needed...
1467 */
1468static void __init quirk_tc86c001_ide(struct pci_dev *dev)
1469{
1470 struct resource *r = &dev->resource[0];
1471
1472 if (r->start & 0x8) {
1473 r->start = 0;
1474 r->end = 0xf;
1475 }
1476}
1477DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
1478 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
1479 quirk_tc86c001_ide);
1480
1482static void __devinit quirk_netmos(struct pci_dev *dev) 1481static void __devinit quirk_netmos(struct pci_dev *dev)
1483{ 1482{
1484 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; 1483 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
@@ -1644,6 +1643,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
1644 } 1643 }
1645 pci_do_fixups(dev, start, end); 1644 pci_do_fixups(dev, start, end);
1646} 1645}
1646EXPORT_SYMBOL(pci_fixup_device);
1647 1647
1648/* Enable 1k I/O space granularity on the Intel P64H2 */ 1648/* Enable 1k I/O space granularity on the Intel P64H2 */
1649static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) 1649static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
@@ -1671,6 +1671,31 @@ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
1671} 1671}
1672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); 1672DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
1673 1673
1674/* Fix the IOBL_ADR for 1k I/O space granularity on the Intel P64H2
1675 * The IOBL_ADR gets re-written to 4k boundaries in pci_setup_bridge()
1676 * in drivers/pci/setup-bus.c
1677 */
1678static void __devinit quirk_p64h2_1k_io_fix_iobl(struct pci_dev *dev)
1679{
1680 u16 en1k, iobl_adr, iobl_adr_1k;
1681 struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
1682
1683 pci_read_config_word(dev, 0x40, &en1k);
1684
1685 if (en1k & 0x200) {
1686 pci_read_config_word(dev, PCI_IO_BASE, &iobl_adr);
1687
1688 iobl_adr_1k = iobl_adr | (res->start >> 8) | (res->end & 0xfc00);
1689
1690 if (iobl_adr != iobl_adr_1k) {
1691 printk(KERN_INFO "PCI: Fixing P64H2 IOBL_ADR from 0x%x to 0x%x for 1 KB Granularity\n",
1692 iobl_adr,iobl_adr_1k);
1693 pci_write_config_word(dev, PCI_IO_BASE, iobl_adr_1k);
1694 }
1695 }
1696}
1697DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io_fix_iobl);
1698
1674/* Under some circumstances, AER is not linked with extended capabilities. 1699/* Under some circumstances, AER is not linked with extended capabilities.
1675 * Force it to be linked by setting the corresponding control bit in the 1700 * Force it to be linked by setting the corresponding control bit in the
1676 * config space. 1701 * config space.
@@ -1693,9 +1718,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1693 quirk_nvidia_ck804_pcie_aer_ext_cap); 1718 quirk_nvidia_ck804_pcie_aer_ext_cap);
1694 1719
1695#ifdef CONFIG_PCI_MSI 1720#ifdef CONFIG_PCI_MSI
1696/* To disable MSI globally */
1697int pci_msi_quirk;
1698
1699/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely 1721/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely
1700 * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually 1722 * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
1701 * some other busses controlled by the chipset even if Linux is not aware of it. 1723 * some other busses controlled by the chipset even if Linux is not aware of it.
@@ -1704,8 +1726,8 @@ int pci_msi_quirk;
1704 */ 1726 */
1705static void __init quirk_svw_msi(struct pci_dev *dev) 1727static void __init quirk_svw_msi(struct pci_dev *dev)
1706{ 1728{
1707 pci_msi_quirk = 1; 1729 pci_no_msi();
1708 printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n"); 1730 printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
1709} 1731}
1710DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi); 1732DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi);
1711 1733
@@ -1786,8 +1808,3 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
1786DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1808DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1787 quirk_nvidia_ck804_msi_ht_cap); 1809 quirk_nvidia_ck804_msi_ht_cap);
1788#endif /* CONFIG_PCI_MSI */ 1810#endif /* CONFIG_PCI_MSI */
1789
1790EXPORT_SYMBOL(pcie_mch_quirk);
1791#ifdef CONFIG_HOTPLUG
1792EXPORT_SYMBOL(pci_fixup_device);
1793#endif
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index fab381ed853c..ff98eaddaa73 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -200,11 +200,8 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
200 * can cause some machines to crash. So here we detect and flag that 200 * can cause some machines to crash. So here we detect and flag that
201 * situation and bail out early. 201 * situation and bail out early.
202 */ 202 */
203 if (unlikely(list_empty(&pci_devices))) { 203 if (unlikely(list_empty(&pci_devices)))
204 printk(KERN_INFO "pci_find_subsys() called while pci_devices "
205 "is still empty\n");
206 return NULL; 204 return NULL;
207 }
208 down_read(&pci_bus_sem); 205 down_read(&pci_bus_sem);
209 n = from ? from->global_list.next : pci_devices.next; 206 n = from ? from->global_list.next : pci_devices.next;
210 207
@@ -278,11 +275,8 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
278 * can cause some machines to crash. So here we detect and flag that 275 * can cause some machines to crash. So here we detect and flag that
279 * situation and bail out early. 276 * situation and bail out early.
280 */ 277 */
281 if (unlikely(list_empty(&pci_devices))) { 278 if (unlikely(list_empty(&pci_devices)))
282 printk(KERN_NOTICE "pci_get_subsys() called while pci_devices "
283 "is still empty\n");
284 return NULL; 279 return NULL;
285 }
286 down_read(&pci_bus_sem); 280 down_read(&pci_bus_sem);
287 n = from ? from->global_list.next : pci_devices.next; 281 n = from ? from->global_list.next : pci_devices.next;
288 282
@@ -364,43 +358,6 @@ exit:
364} 358}
365 359
366/** 360/**
367 * pci_find_device_reverse - begin or continue searching for a PCI device by vendor/device id
368 * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
369 * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
370 * @from: Previous PCI device found in search, or %NULL for new search.
371 *
372 * Iterates through the list of known PCI devices in the reverse order of
373 * pci_find_device().
374 * If a PCI device is found with a matching @vendor and @device, a pointer to
375 * its device structure is returned. Otherwise, %NULL is returned.
376 * A new search is initiated by passing %NULL as the @from argument.
377 * Otherwise if @from is not %NULL, searches continue from previous device
378 * on the global list.
379 */
380struct pci_dev *
381pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from)
382{
383 struct list_head *n;
384 struct pci_dev *dev;
385
386 WARN_ON(in_interrupt());
387 down_read(&pci_bus_sem);
388 n = from ? from->global_list.prev : pci_devices.prev;
389
390 while (n && (n != &pci_devices)) {
391 dev = pci_dev_g(n);
392 if ((vendor == PCI_ANY_ID || dev->vendor == vendor) &&
393 (device == PCI_ANY_ID || dev->device == device))
394 goto exit;
395 n = n->prev;
396 }
397 dev = NULL;
398exit:
399 up_read(&pci_bus_sem);
400 return dev;
401}
402
403/**
404 * pci_get_class - begin or continue searching for a PCI device by class 361 * pci_get_class - begin or continue searching for a PCI device by class
405 * @class: search for a PCI device with this class designation 362 * @class: search for a PCI device with this class designation
406 * @from: Previous PCI device found in search, or %NULL for new search. 363 * @from: Previous PCI device found in search, or %NULL for new search.
@@ -475,7 +432,6 @@ EXPORT_SYMBOL(pci_dev_present);
475EXPORT_SYMBOL(pci_find_present); 432EXPORT_SYMBOL(pci_find_present);
476 433
477EXPORT_SYMBOL(pci_find_device); 434EXPORT_SYMBOL(pci_find_device);
478EXPORT_SYMBOL(pci_find_device_reverse);
479EXPORT_SYMBOL(pci_find_slot); 435EXPORT_SYMBOL(pci_find_slot);
480/* For boot time work */ 436/* For boot time work */
481EXPORT_SYMBOL(pci_find_bus); 437EXPORT_SYMBOL(pci_find_bus);
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 606a46740338..ac004248324a 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -110,7 +110,7 @@ int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state)
110 110
111 down_read(&pcmcia_socket_list_rwsem); 111 down_read(&pcmcia_socket_list_rwsem);
112 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 112 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
113 if (socket->dev.dev != dev) 113 if (socket->dev.parent != dev)
114 continue; 114 continue;
115 mutex_lock(&socket->skt_mutex); 115 mutex_lock(&socket->skt_mutex);
116 socket_suspend(socket); 116 socket_suspend(socket);
@@ -128,7 +128,7 @@ int pcmcia_socket_dev_resume(struct device *dev)
128 128
129 down_read(&pcmcia_socket_list_rwsem); 129 down_read(&pcmcia_socket_list_rwsem);
130 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { 130 list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
131 if (socket->dev.dev != dev) 131 if (socket->dev.parent != dev)
132 continue; 132 continue;
133 mutex_lock(&socket->skt_mutex); 133 mutex_lock(&socket->skt_mutex);
134 socket_resume(socket); 134 socket_resume(socket);
@@ -143,12 +143,12 @@ EXPORT_SYMBOL(pcmcia_socket_dev_resume);
143 143
144struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt) 144struct pcmcia_socket * pcmcia_get_socket(struct pcmcia_socket *skt)
145{ 145{
146 struct class_device *cl_dev = class_device_get(&skt->dev); 146 struct device *dev = get_device(&skt->dev);
147 if (!cl_dev) 147 if (!dev)
148 return NULL; 148 return NULL;
149 skt = class_get_devdata(cl_dev); 149 skt = dev_get_drvdata(dev);
150 if (!try_module_get(skt->owner)) { 150 if (!try_module_get(skt->owner)) {
151 class_device_put(&skt->dev); 151 put_device(&skt->dev);
152 return NULL; 152 return NULL;
153 } 153 }
154 return (skt); 154 return (skt);
@@ -159,14 +159,14 @@ EXPORT_SYMBOL(pcmcia_get_socket);
159void pcmcia_put_socket(struct pcmcia_socket *skt) 159void pcmcia_put_socket(struct pcmcia_socket *skt)
160{ 160{
161 module_put(skt->owner); 161 module_put(skt->owner);
162 class_device_put(&skt->dev); 162 put_device(&skt->dev);
163} 163}
164EXPORT_SYMBOL(pcmcia_put_socket); 164EXPORT_SYMBOL(pcmcia_put_socket);
165 165
166 166
167static void pcmcia_release_socket(struct class_device *class_dev) 167static void pcmcia_release_socket(struct device *dev)
168{ 168{
169 struct pcmcia_socket *socket = class_get_devdata(class_dev); 169 struct pcmcia_socket *socket = dev_get_drvdata(dev);
170 170
171 complete(&socket->socket_released); 171 complete(&socket->socket_released);
172} 172}
@@ -181,7 +181,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
181 struct task_struct *tsk; 181 struct task_struct *tsk;
182 int ret; 182 int ret;
183 183
184 if (!socket || !socket->ops || !socket->dev.dev || !socket->resource_ops) 184 if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops)
185 return -EINVAL; 185 return -EINVAL;
186 186
187 cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops); 187 cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops);
@@ -226,9 +226,9 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
226#endif 226#endif
227 227
228 /* set proper values in socket->dev */ 228 /* set proper values in socket->dev */
229 socket->dev.class_data = socket; 229 dev_set_drvdata(&socket->dev, socket);
230 socket->dev.class = &pcmcia_socket_class; 230 socket->dev.class = &pcmcia_socket_class;
231 snprintf(socket->dev.class_id, BUS_ID_SIZE, "pcmcia_socket%u", socket->sock); 231 snprintf(socket->dev.bus_id, BUS_ID_SIZE, "pcmcia_socket%u", socket->sock);
232 232
233 /* base address = 0, map = 0 */ 233 /* base address = 0, map = 0 */
234 socket->cis_mem.flags = 0; 234 socket->cis_mem.flags = 0;
@@ -640,7 +640,7 @@ static int pccardd(void *__skt)
640 skt->ops->set_socket(skt, &skt->socket); 640 skt->ops->set_socket(skt, &skt->socket);
641 641
642 /* register with the device core */ 642 /* register with the device core */
643 ret = class_device_register(&skt->dev); 643 ret = device_register(&skt->dev);
644 if (ret) { 644 if (ret) {
645 printk(KERN_WARNING "PCMCIA: unable to register socket 0x%p\n", 645 printk(KERN_WARNING "PCMCIA: unable to register socket 0x%p\n",
646 skt); 646 skt);
@@ -689,7 +689,7 @@ static int pccardd(void *__skt)
689 remove_wait_queue(&skt->thread_wait, &wait); 689 remove_wait_queue(&skt->thread_wait, &wait);
690 690
691 /* remove from the device core */ 691 /* remove from the device core */
692 class_device_unregister(&skt->dev); 692 device_unregister(&skt->dev);
693 693
694 return 0; 694 return 0;
695} 695}
@@ -904,7 +904,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
904EXPORT_SYMBOL(pcmcia_insert_card); 904EXPORT_SYMBOL(pcmcia_insert_card);
905 905
906 906
907static int pcmcia_socket_uevent(struct class_device *dev, char **envp, 907static int pcmcia_socket_uevent(struct device *dev, char **envp,
908 int num_envp, char *buffer, int buffer_size) 908 int num_envp, char *buffer, int buffer_size)
909{ 909{
910 struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev); 910 struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev);
@@ -930,8 +930,8 @@ static void pcmcia_release_socket_class(struct class *data)
930 930
931struct class pcmcia_socket_class = { 931struct class pcmcia_socket_class = {
932 .name = "pcmcia_socket", 932 .name = "pcmcia_socket",
933 .uevent = pcmcia_socket_uevent, 933 .dev_uevent = pcmcia_socket_uevent,
934 .release = pcmcia_release_socket, 934 .dev_release = pcmcia_release_socket,
935 .class_release = pcmcia_release_socket_class, 935 .class_release = pcmcia_release_socket_class,
936}; 936};
937EXPORT_SYMBOL(pcmcia_socket_class); 937EXPORT_SYMBOL(pcmcia_socket_class);
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index f573ea04db6f..9fa207e3c7b3 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -142,7 +142,7 @@ struct pcmcia_callback{
142 142
143int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); 143int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
144 144
145#define cs_socket_name(skt) ((skt)->dev.class_id) 145#define cs_socket_name(skt) ((skt)->dev.bus_id)
146 146
147#ifdef DEBUG 147#ifdef DEBUG
148extern int cs_debug_level(int); 148extern int cs_debug_level(int);
@@ -158,6 +158,6 @@ extern int cs_debug_level(int);
158#endif 158#endif
159 159
160#define cs_err(skt, fmt, arg...) \ 160#define cs_err(skt, fmt, arg...) \
161 printk(KERN_ERR "cs: %s: " fmt, (skt)->dev.class_id , ## arg) 161 printk(KERN_ERR "cs: %s: " fmt, (skt)->dev.bus_id , ## arg)
162 162
163#endif /* _LINUX_CS_INTERNAL_H */ 163#endif /* _LINUX_CS_INTERNAL_H */
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 7355eb455a88..18e111e12339 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -572,7 +572,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
572 p_dev->func = function; 572 p_dev->func = function;
573 573
574 p_dev->dev.bus = &pcmcia_bus_type; 574 p_dev->dev.bus = &pcmcia_bus_type;
575 p_dev->dev.parent = s->dev.dev; 575 p_dev->dev.parent = s->dev.parent;
576 p_dev->dev.release = pcmcia_release_dev; 576 p_dev->dev.release = pcmcia_release_dev;
577 bus_id_len = sprintf (p_dev->dev.bus_id, "%d.%d", p_dev->socket->sock, p_dev->device_no); 577 bus_id_len = sprintf (p_dev->dev.bus_id, "%d.%d", p_dev->socket->sock, p_dev->device_no);
578 578
@@ -1328,10 +1328,10 @@ static struct pcmcia_callback pcmcia_bus_callback = {
1328 .resume = pcmcia_bus_resume, 1328 .resume = pcmcia_bus_resume,
1329}; 1329};
1330 1330
1331static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev, 1331static int __devinit pcmcia_bus_add_socket(struct device *dev,
1332 struct class_interface *class_intf) 1332 struct class_interface *class_intf)
1333{ 1333{
1334 struct pcmcia_socket *socket = class_get_devdata(class_dev); 1334 struct pcmcia_socket *socket = dev_get_drvdata(dev);
1335 int ret; 1335 int ret;
1336 1336
1337 socket = pcmcia_get_socket(socket); 1337 socket = pcmcia_get_socket(socket);
@@ -1364,10 +1364,10 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev,
1364 return 0; 1364 return 0;
1365} 1365}
1366 1366
1367static void pcmcia_bus_remove_socket(struct class_device *class_dev, 1367static void pcmcia_bus_remove_socket(struct device *dev,
1368 struct class_interface *class_intf) 1368 struct class_interface *class_intf)
1369{ 1369{
1370 struct pcmcia_socket *socket = class_get_devdata(class_dev); 1370 struct pcmcia_socket *socket = dev_get_drvdata(dev);
1371 1371
1372 if (!socket) 1372 if (!socket)
1373 return; 1373 return;
@@ -1389,8 +1389,8 @@ static void pcmcia_bus_remove_socket(struct class_device *class_dev,
1389/* the pcmcia_bus_interface is used to handle pcmcia socket devices */ 1389/* the pcmcia_bus_interface is used to handle pcmcia socket devices */
1390static struct class_interface pcmcia_bus_interface = { 1390static struct class_interface pcmcia_bus_interface = {
1391 .class = &pcmcia_socket_class, 1391 .class = &pcmcia_socket_class,
1392 .add = &pcmcia_bus_add_socket, 1392 .add_dev = &pcmcia_bus_add_socket,
1393 .remove = &pcmcia_bus_remove_socket, 1393 .remove_dev = &pcmcia_bus_remove_socket,
1394}; 1394};
1395 1395
1396 1396
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index c2ea07aa7a12..df21e2d16f87 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -161,7 +161,7 @@ static int __devinit i82092aa_pci_probe(struct pci_dev *dev, const struct pci_de
161 pci_set_drvdata(dev, &sockets[i].socket); 161 pci_set_drvdata(dev, &sockets[i].socket);
162 162
163 for (i = 0; i<socket_count; i++) { 163 for (i = 0; i<socket_count; i++) {
164 sockets[i].socket.dev.dev = &dev->dev; 164 sockets[i].socket.dev.parent = &dev->dev;
165 sockets[i].socket.ops = &i82092aa_operations; 165 sockets[i].socket.ops = &i82092aa_operations;
166 sockets[i].socket.resource_ops = &pccard_nonstatic_ops; 166 sockets[i].socket.resource_ops = &pccard_nonstatic_ops;
167 ret = pcmcia_register_socket(&sockets[i].socket); 167 ret = pcmcia_register_socket(&sockets[i].socket);
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index ea74f98a7350..72ff2f615b33 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1298,7 +1298,7 @@ static int __init init_i82365(void)
1298 1298
1299 /* register sockets with the pcmcia core */ 1299 /* register sockets with the pcmcia core */
1300 for (i = 0; i < sockets; i++) { 1300 for (i = 0; i < sockets; i++) {
1301 socket[i].socket.dev.dev = &i82365_device->dev; 1301 socket[i].socket.dev.parent = &i82365_device->dev;
1302 socket[i].socket.ops = &pcic_operations; 1302 socket[i].socket.ops = &pcic_operations;
1303 socket[i].socket.resource_ops = &pccard_nonstatic_ops; 1303 socket[i].socket.resource_ops = &pccard_nonstatic_ops;
1304 socket[i].socket.owner = THIS_MODULE; 1304 socket[i].socket.owner = THIS_MODULE;
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index bbf025874d0c..4dbef0762376 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -722,7 +722,7 @@ static int __init init_m32r_pcc(void)
722 /* Set up interrupt handler(s) */ 722 /* Set up interrupt handler(s) */
723 723
724 for (i = 0 ; i < pcc_sockets ; i++) { 724 for (i = 0 ; i < pcc_sockets ; i++) {
725 socket[i].socket.dev.dev = &pcc_device.dev; 725 socket[i].socket.dev.parent = &pcc_device.dev;
726 socket[i].socket.ops = &pcc_operations; 726 socket[i].socket.ops = &pcc_operations;
727 socket[i].socket.resource_ops = &pccard_static_ops; 727 socket[i].socket.resource_ops = &pccard_static_ops;
728 socket[i].socket.owner = THIS_MODULE; 728 socket[i].socket.owner = THIS_MODULE;
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 327372b7a54e..88494149e910 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -59,7 +59,6 @@ typedef struct user_info_t {
59 59
60#ifdef DEBUG 60#ifdef DEBUG
61extern int ds_pc_debug; 61extern int ds_pc_debug;
62#define cs_socket_name(skt) ((skt)->dev.class_id)
63 62
64#define ds_dbg(lvl, fmt, arg...) do { \ 63#define ds_dbg(lvl, fmt, arg...) do { \
65 if (ds_pc_debug >= lvl) \ 64 if (ds_pc_debug >= lvl) \
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index b9201c2ec38b..0ce39de834c4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -48,7 +48,6 @@ static u8 pcmcia_used_irq[NR_IRQS];
48 48
49#ifdef DEBUG 49#ifdef DEBUG
50extern int ds_pc_debug; 50extern int ds_pc_debug;
51#define cs_socket_name(skt) ((skt)->dev.class_id)
52 51
53#define ds_dbg(skt, lvl, fmt, arg...) do { \ 52#define ds_dbg(skt, lvl, fmt, arg...) do { \
54 if (ds_pc_debug >= lvl) \ 53 if (ds_pc_debug >= lvl) \
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 360c24896548..dd0ddf19ee57 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -682,7 +682,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
682 682
683 socket[i].socket.ops = &pd6729_operations; 683 socket[i].socket.ops = &pd6729_operations;
684 socket[i].socket.resource_ops = &pccard_nonstatic_ops; 684 socket[i].socket.resource_ops = &pccard_nonstatic_ops;
685 socket[i].socket.dev.dev = &dev->dev; 685 socket[i].socket.dev.parent = &dev->dev;
686 socket[i].socket.driver_data = &socket[i]; 686 socket[i].socket.driver_data = &socket[i];
687 } 687 }
688 688
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index c3176b16b7be..bfcaad6021cf 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -616,7 +616,7 @@ static int nonstatic_adjust_io_region(struct resource *res, unsigned long r_star
616static struct resource *nonstatic_find_io_region(unsigned long base, int num, 616static struct resource *nonstatic_find_io_region(unsigned long base, int num,
617 unsigned long align, struct pcmcia_socket *s) 617 unsigned long align, struct pcmcia_socket *s)
618{ 618{
619 struct resource *res = make_resource(0, num, IORESOURCE_IO, s->dev.class_id); 619 struct resource *res = make_resource(0, num, IORESOURCE_IO, s->dev.bus_id);
620 struct socket_data *s_data = s->resource_data; 620 struct socket_data *s_data = s->resource_data;
621 struct pcmcia_align_data data; 621 struct pcmcia_align_data data;
622 unsigned long min = base; 622 unsigned long min = base;
@@ -650,7 +650,7 @@ static struct resource *nonstatic_find_io_region(unsigned long base, int num,
650static struct resource * nonstatic_find_mem_region(u_long base, u_long num, 650static struct resource * nonstatic_find_mem_region(u_long base, u_long num,
651 u_long align, int low, struct pcmcia_socket *s) 651 u_long align, int low, struct pcmcia_socket *s)
652{ 652{
653 struct resource *res = make_resource(0, num, IORESOURCE_MEM, s->dev.class_id); 653 struct resource *res = make_resource(0, num, IORESOURCE_MEM, s->dev.bus_id);
654 struct socket_data *s_data = s->resource_data; 654 struct socket_data *s_data = s->resource_data;
655 struct pcmcia_align_data data; 655 struct pcmcia_align_data data;
656 unsigned long min, max; 656 unsigned long min, max;
@@ -897,9 +897,10 @@ EXPORT_SYMBOL(pccard_nonstatic_ops);
897 897
898/* sysfs interface to the resource database */ 898/* sysfs interface to the resource database */
899 899
900static ssize_t show_io_db(struct class_device *class_dev, char *buf) 900static ssize_t show_io_db(struct device *dev,
901 struct device_attribute *attr, char *buf)
901{ 902{
902 struct pcmcia_socket *s = class_get_devdata(class_dev); 903 struct pcmcia_socket *s = dev_get_drvdata(dev);
903 struct socket_data *data; 904 struct socket_data *data;
904 struct resource_map *p; 905 struct resource_map *p;
905 ssize_t ret = 0; 906 ssize_t ret = 0;
@@ -920,9 +921,11 @@ static ssize_t show_io_db(struct class_device *class_dev, char *buf)
920 return (ret); 921 return (ret);
921} 922}
922 923
923static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size_t count) 924static ssize_t store_io_db(struct device *dev,
925 struct device_attribute *attr,
926 const char *buf, size_t count)
924{ 927{
925 struct pcmcia_socket *s = class_get_devdata(class_dev); 928 struct pcmcia_socket *s = dev_get_drvdata(dev);
926 unsigned long start_addr, end_addr; 929 unsigned long start_addr, end_addr;
927 unsigned int add = ADD_MANAGED_RESOURCE; 930 unsigned int add = ADD_MANAGED_RESOURCE;
928 ssize_t ret = 0; 931 ssize_t ret = 0;
@@ -947,11 +950,12 @@ static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size
947 950
948 return ret ? ret : count; 951 return ret ? ret : count;
949} 952}
950static CLASS_DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db); 953static DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db);
951 954
952static ssize_t show_mem_db(struct class_device *class_dev, char *buf) 955static ssize_t show_mem_db(struct device *dev,
956 struct device_attribute *attr, char *buf)
953{ 957{
954 struct pcmcia_socket *s = class_get_devdata(class_dev); 958 struct pcmcia_socket *s = dev_get_drvdata(dev);
955 struct socket_data *data; 959 struct socket_data *data;
956 struct resource_map *p; 960 struct resource_map *p;
957 ssize_t ret = 0; 961 ssize_t ret = 0;
@@ -972,9 +976,11 @@ static ssize_t show_mem_db(struct class_device *class_dev, char *buf)
972 return (ret); 976 return (ret);
973} 977}
974 978
975static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, size_t count) 979static ssize_t store_mem_db(struct device *dev,
980 struct device_attribute *attr,
981 const char *buf, size_t count)
976{ 982{
977 struct pcmcia_socket *s = class_get_devdata(class_dev); 983 struct pcmcia_socket *s = dev_get_drvdata(dev);
978 unsigned long start_addr, end_addr; 984 unsigned long start_addr, end_addr;
979 unsigned int add = ADD_MANAGED_RESOURCE; 985 unsigned int add = ADD_MANAGED_RESOURCE;
980 ssize_t ret = 0; 986 ssize_t ret = 0;
@@ -999,25 +1005,25 @@ static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, siz
999 1005
1000 return ret ? ret : count; 1006 return ret ? ret : count;
1001} 1007}
1002static CLASS_DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); 1008static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db);
1003 1009
1004static struct class_device_attribute *pccard_rsrc_attributes[] = { 1010static struct device_attribute *pccard_rsrc_attributes[] = {
1005 &class_device_attr_available_resources_io, 1011 &dev_attr_available_resources_io,
1006 &class_device_attr_available_resources_mem, 1012 &dev_attr_available_resources_mem,
1007 NULL, 1013 NULL,
1008}; 1014};
1009 1015
1010static int __devinit pccard_sysfs_add_rsrc(struct class_device *class_dev, 1016static int __devinit pccard_sysfs_add_rsrc(struct device *dev,
1011 struct class_interface *class_intf) 1017 struct class_interface *class_intf)
1012{ 1018{
1013 struct pcmcia_socket *s = class_get_devdata(class_dev); 1019 struct pcmcia_socket *s = dev_get_drvdata(dev);
1014 struct class_device_attribute **attr; 1020 struct device_attribute **attr;
1015 int ret = 0; 1021 int ret = 0;
1016 if (s->resource_ops != &pccard_nonstatic_ops) 1022 if (s->resource_ops != &pccard_nonstatic_ops)
1017 return 0; 1023 return 0;
1018 1024
1019 for (attr = pccard_rsrc_attributes; *attr; attr++) { 1025 for (attr = pccard_rsrc_attributes; *attr; attr++) {
1020 ret = class_device_create_file(class_dev, *attr); 1026 ret = device_create_file(dev, *attr);
1021 if (ret) 1027 if (ret)
1022 break; 1028 break;
1023 } 1029 }
@@ -1025,23 +1031,23 @@ static int __devinit pccard_sysfs_add_rsrc(struct class_device *class_dev,
1025 return ret; 1031 return ret;
1026} 1032}
1027 1033
1028static void __devexit pccard_sysfs_remove_rsrc(struct class_device *class_dev, 1034static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
1029 struct class_interface *class_intf) 1035 struct class_interface *class_intf)
1030{ 1036{
1031 struct pcmcia_socket *s = class_get_devdata(class_dev); 1037 struct pcmcia_socket *s = dev_get_drvdata(dev);
1032 struct class_device_attribute **attr; 1038 struct device_attribute **attr;
1033 1039
1034 if (s->resource_ops != &pccard_nonstatic_ops) 1040 if (s->resource_ops != &pccard_nonstatic_ops)
1035 return; 1041 return;
1036 1042
1037 for (attr = pccard_rsrc_attributes; *attr; attr++) 1043 for (attr = pccard_rsrc_attributes; *attr; attr++)
1038 class_device_remove_file(class_dev, *attr); 1044 device_remove_file(dev, *attr);
1039} 1045}
1040 1046
1041static struct class_interface pccard_rsrc_interface = { 1047static struct class_interface pccard_rsrc_interface = {
1042 .class = &pcmcia_socket_class, 1048 .class = &pcmcia_socket_class,
1043 .add = &pccard_sysfs_add_rsrc, 1049 .add_dev = &pccard_sysfs_add_rsrc,
1044 .remove = __devexit_p(&pccard_sysfs_remove_rsrc), 1050 .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc),
1045}; 1051};
1046 1052
1047static int __init nonstatic_sysfs_init(void) 1053static int __init nonstatic_sysfs_init(void)
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index e433704e026a..d2a3bea55de2 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -478,10 +478,10 @@ dump_bits(char **p, const char *prefix, unsigned int val, struct bittbl *bits, i
478 * 478 *
479 * Returns: the number of characters added to the buffer 479 * Returns: the number of characters added to the buffer
480 */ 480 */
481static ssize_t show_status(struct class_device *class_dev, char *buf) 481static ssize_t show_status(struct device *dev, char *buf)
482{ 482{
483 struct soc_pcmcia_socket *skt = 483 struct soc_pcmcia_socket *skt =
484 container_of(class_dev, struct soc_pcmcia_socket, socket.dev); 484 container_of(dev, struct soc_pcmcia_socket, socket.dev);
485 char *p = buf; 485 char *p = buf;
486 486
487 p+=sprintf(p, "slot : %d\n", skt->nr); 487 p+=sprintf(p, "slot : %d\n", skt->nr);
@@ -747,7 +747,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
747 747
748 add_timer(&skt->poll_timer); 748 add_timer(&skt->poll_timer);
749 749
750 class_device_create_file(&skt->socket.dev, &class_device_attr_status); 750 device_create_file(&skt->socket.dev, &device_attr_status);
751 } 751 }
752 752
753 dev_set_drvdata(dev, sinfo); 753 dev_set_drvdata(dev, sinfo);
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index b005602d6b53..ea5765c3bdc0 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -40,7 +40,8 @@
40 40
41#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev) 41#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev)
42 42
43static ssize_t pccard_show_type(struct class_device *dev, char *buf) 43static ssize_t pccard_show_type(struct device *dev, struct device_attribute *attr,
44 char *buf)
44{ 45{
45 struct pcmcia_socket *s = to_socket(dev); 46 struct pcmcia_socket *s = to_socket(dev);
46 47
@@ -50,9 +51,10 @@ static ssize_t pccard_show_type(struct class_device *dev, char *buf)
50 return sprintf(buf, "32-bit\n"); 51 return sprintf(buf, "32-bit\n");
51 return sprintf(buf, "16-bit\n"); 52 return sprintf(buf, "16-bit\n");
52} 53}
53static CLASS_DEVICE_ATTR(card_type, 0444, pccard_show_type, NULL); 54static DEVICE_ATTR(card_type, 0444, pccard_show_type, NULL);
54 55
55static ssize_t pccard_show_voltage(struct class_device *dev, char *buf) 56static ssize_t pccard_show_voltage(struct device *dev, struct device_attribute *attr,
57 char *buf)
56{ 58{
57 struct pcmcia_socket *s = to_socket(dev); 59 struct pcmcia_socket *s = to_socket(dev);
58 60
@@ -63,28 +65,31 @@ static ssize_t pccard_show_voltage(struct class_device *dev, char *buf)
63 s->socket.Vcc % 10); 65 s->socket.Vcc % 10);
64 return sprintf(buf, "X.XV\n"); 66 return sprintf(buf, "X.XV\n");
65} 67}
66static CLASS_DEVICE_ATTR(card_voltage, 0444, pccard_show_voltage, NULL); 68static DEVICE_ATTR(card_voltage, 0444, pccard_show_voltage, NULL);
67 69
68static ssize_t pccard_show_vpp(struct class_device *dev, char *buf) 70static ssize_t pccard_show_vpp(struct device *dev, struct device_attribute *attr,
71 char *buf)
69{ 72{
70 struct pcmcia_socket *s = to_socket(dev); 73 struct pcmcia_socket *s = to_socket(dev);
71 if (!(s->state & SOCKET_PRESENT)) 74 if (!(s->state & SOCKET_PRESENT))
72 return -ENODEV; 75 return -ENODEV;
73 return sprintf(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10); 76 return sprintf(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10);
74} 77}
75static CLASS_DEVICE_ATTR(card_vpp, 0444, pccard_show_vpp, NULL); 78static DEVICE_ATTR(card_vpp, 0444, pccard_show_vpp, NULL);
76 79
77static ssize_t pccard_show_vcc(struct class_device *dev, char *buf) 80static ssize_t pccard_show_vcc(struct device *dev, struct device_attribute *attr,
81 char *buf)
78{ 82{
79 struct pcmcia_socket *s = to_socket(dev); 83 struct pcmcia_socket *s = to_socket(dev);
80 if (!(s->state & SOCKET_PRESENT)) 84 if (!(s->state & SOCKET_PRESENT))
81 return -ENODEV; 85 return -ENODEV;
82 return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10); 86 return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10);
83} 87}
84static CLASS_DEVICE_ATTR(card_vcc, 0444, pccard_show_vcc, NULL); 88static DEVICE_ATTR(card_vcc, 0444, pccard_show_vcc, NULL);
85 89
86 90
87static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, size_t count) 91static ssize_t pccard_store_insert(struct device *dev, struct device_attribute *attr,
92 const char *buf, size_t count)
88{ 93{
89 ssize_t ret; 94 ssize_t ret;
90 struct pcmcia_socket *s = to_socket(dev); 95 struct pcmcia_socket *s = to_socket(dev);
@@ -96,16 +101,20 @@ static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, si
96 101
97 return ret ? ret : count; 102 return ret ? ret : count;
98} 103}
99static CLASS_DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert); 104static DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert);
100 105
101 106
102static ssize_t pccard_show_card_pm_state(struct class_device *dev, char *buf) 107static ssize_t pccard_show_card_pm_state(struct device *dev,
108 struct device_attribute *attr,
109 char *buf)
103{ 110{
104 struct pcmcia_socket *s = to_socket(dev); 111 struct pcmcia_socket *s = to_socket(dev);
105 return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on"); 112 return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
106} 113}
107 114
108static ssize_t pccard_store_card_pm_state(struct class_device *dev, const char *buf, size_t count) 115static ssize_t pccard_store_card_pm_state(struct device *dev,
116 struct device_attribute *attr,
117 const char *buf, size_t count)
109{ 118{
110 ssize_t ret = -EINVAL; 119 ssize_t ret = -EINVAL;
111 struct pcmcia_socket *s = to_socket(dev); 120 struct pcmcia_socket *s = to_socket(dev);
@@ -120,9 +129,11 @@ static ssize_t pccard_store_card_pm_state(struct class_device *dev, const char *
120 129
121 return ret ? -ENODEV : count; 130 return ret ? -ENODEV : count;
122} 131}
123static CLASS_DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state); 132static DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state);
124 133
125static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, size_t count) 134static ssize_t pccard_store_eject(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf, size_t count)
126{ 137{
127 ssize_t ret; 138 ssize_t ret;
128 struct pcmcia_socket *s = to_socket(dev); 139 struct pcmcia_socket *s = to_socket(dev);
@@ -134,16 +145,20 @@ static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, siz
134 145
135 return ret ? ret : count; 146 return ret ? ret : count;
136} 147}
137static CLASS_DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject); 148static DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject);
138 149
139 150
140static ssize_t pccard_show_irq_mask(struct class_device *dev, char *buf) 151static ssize_t pccard_show_irq_mask(struct device *dev,
152 struct device_attribute *attr,
153 char *buf)
141{ 154{
142 struct pcmcia_socket *s = to_socket(dev); 155 struct pcmcia_socket *s = to_socket(dev);
143 return sprintf(buf, "0x%04x\n", s->irq_mask); 156 return sprintf(buf, "0x%04x\n", s->irq_mask);
144} 157}
145 158
146static ssize_t pccard_store_irq_mask(struct class_device *dev, const char *buf, size_t count) 159static ssize_t pccard_store_irq_mask(struct device *dev,
160 struct device_attribute *attr,
161 const char *buf, size_t count)
147{ 162{
148 ssize_t ret; 163 ssize_t ret;
149 struct pcmcia_socket *s = to_socket(dev); 164 struct pcmcia_socket *s = to_socket(dev);
@@ -161,16 +176,19 @@ static ssize_t pccard_store_irq_mask(struct class_device *dev, const char *buf,
161 176
162 return ret ? ret : count; 177 return ret ? ret : count;
163} 178}
164static CLASS_DEVICE_ATTR(card_irq_mask, 0600, pccard_show_irq_mask, pccard_store_irq_mask); 179static DEVICE_ATTR(card_irq_mask, 0600, pccard_show_irq_mask, pccard_store_irq_mask);
165 180
166 181
167static ssize_t pccard_show_resource(struct class_device *dev, char *buf) 182static ssize_t pccard_show_resource(struct device *dev,
183 struct device_attribute *attr, char *buf)
168{ 184{
169 struct pcmcia_socket *s = to_socket(dev); 185 struct pcmcia_socket *s = to_socket(dev);
170 return sprintf(buf, "%s\n", s->resource_setup_done ? "yes" : "no"); 186 return sprintf(buf, "%s\n", s->resource_setup_done ? "yes" : "no");
171} 187}
172 188
173static ssize_t pccard_store_resource(struct class_device *dev, const char *buf, size_t count) 189static ssize_t pccard_store_resource(struct device *dev,
190 struct device_attribute *attr,
191 const char *buf, size_t count)
174{ 192{
175 unsigned long flags; 193 unsigned long flags;
176 struct pcmcia_socket *s = to_socket(dev); 194 struct pcmcia_socket *s = to_socket(dev);
@@ -196,7 +214,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
196 214
197 return count; 215 return count;
198} 216}
199static CLASS_DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource); 217static DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource);
200 218
201 219
202static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count) 220static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count)
@@ -279,7 +297,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, char *buf, loff_t off, size
279 if (off + count > size) 297 if (off + count > size)
280 count = size - off; 298 count = size - off;
281 299
282 s = to_socket(container_of(kobj, struct class_device, kobj)); 300 s = to_socket(container_of(kobj, struct device, kobj));
283 301
284 if (!(s->state & SOCKET_PRESENT)) 302 if (!(s->state & SOCKET_PRESENT))
285 return -ENODEV; 303 return -ENODEV;
@@ -296,7 +314,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, char *buf, loff_t off, size
296 314
297static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, size_t count) 315static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, size_t count)
298{ 316{
299 struct pcmcia_socket *s = to_socket(container_of(kobj, struct class_device, kobj)); 317 struct pcmcia_socket *s = to_socket(container_of(kobj, struct device, kobj));
300 cisdump_t *cis; 318 cisdump_t *cis;
301 int error; 319 int error;
302 320
@@ -335,16 +353,16 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
335} 353}
336 354
337 355
338static struct class_device_attribute *pccard_socket_attributes[] = { 356static struct device_attribute *pccard_socket_attributes[] = {
339 &class_device_attr_card_type, 357 &dev_attr_card_type,
340 &class_device_attr_card_voltage, 358 &dev_attr_card_voltage,
341 &class_device_attr_card_vpp, 359 &dev_attr_card_vpp,
342 &class_device_attr_card_vcc, 360 &dev_attr_card_vcc,
343 &class_device_attr_card_insert, 361 &dev_attr_card_insert,
344 &class_device_attr_card_pm_state, 362 &dev_attr_card_pm_state,
345 &class_device_attr_card_eject, 363 &dev_attr_card_eject,
346 &class_device_attr_card_irq_mask, 364 &dev_attr_card_irq_mask,
347 &class_device_attr_available_resources_setup_done, 365 &dev_attr_available_resources_setup_done,
348 NULL, 366 NULL,
349}; 367};
350 368
@@ -355,35 +373,35 @@ static struct bin_attribute pccard_cis_attr = {
355 .write = pccard_store_cis, 373 .write = pccard_store_cis,
356}; 374};
357 375
358static int __devinit pccard_sysfs_add_socket(struct class_device *class_dev, 376static int __devinit pccard_sysfs_add_socket(struct device *dev,
359 struct class_interface *class_intf) 377 struct class_interface *class_intf)
360{ 378{
361 struct class_device_attribute **attr; 379 struct device_attribute **attr;
362 int ret = 0; 380 int ret = 0;
363 381
364 for (attr = pccard_socket_attributes; *attr; attr++) { 382 for (attr = pccard_socket_attributes; *attr; attr++) {
365 ret = class_device_create_file(class_dev, *attr); 383 ret = device_create_file(dev, *attr);
366 if (ret) 384 if (ret)
367 break; 385 break;
368 } 386 }
369 if (!ret) 387 if (!ret)
370 ret = sysfs_create_bin_file(&class_dev->kobj, &pccard_cis_attr); 388 ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
371 389
372 return ret; 390 return ret;
373} 391}
374 392
375static void __devexit pccard_sysfs_remove_socket(struct class_device *class_dev, 393static void __devexit pccard_sysfs_remove_socket(struct device *dev,
376 struct class_interface *class_intf) 394 struct class_interface *class_intf)
377{ 395{
378 struct class_device_attribute **attr; 396 struct device_attribute **attr;
379 397
380 sysfs_remove_bin_file(&class_dev->kobj, &pccard_cis_attr); 398 sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
381 for (attr = pccard_socket_attributes; *attr; attr++) 399 for (attr = pccard_socket_attributes; *attr; attr++)
382 class_device_remove_file(class_dev, *attr); 400 device_remove_file(dev, *attr);
383} 401}
384 402
385struct class_interface pccard_sysfs_interface = { 403struct class_interface pccard_sysfs_interface = {
386 .class = &pcmcia_socket_class, 404 .class = &pcmcia_socket_class,
387 .add = &pccard_sysfs_add_socket, 405 .add_dev = &pccard_sysfs_add_socket,
388 .remove = __devexit_p(&pccard_sysfs_remove_socket), 406 .remove_dev = __devexit_p(&pccard_sysfs_remove_socket),
389}; 407};
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 2d2f415f80a8..c158cf38b9dd 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -512,7 +512,7 @@ static int __init init_tcic(void)
512 for (i = 0; i < sockets; i++) { 512 for (i = 0; i < sockets; i++) {
513 socket_table[i].socket.ops = &tcic_operations; 513 socket_table[i].socket.ops = &tcic_operations;
514 socket_table[i].socket.resource_ops = &pccard_nonstatic_ops; 514 socket_table[i].socket.resource_ops = &pccard_nonstatic_ops;
515 socket_table[i].socket.dev.dev = &tcic_device.dev; 515 socket_table[i].socket.dev.parent = &tcic_device.dev;
516 ret = pcmcia_register_socket(&socket_table[i].socket); 516 ret = pcmcia_register_socket(&socket_table[i].socket);
517 if (ret && i) 517 if (ret && i)
518 pcmcia_unregister_socket(&socket_table[0].socket); 518 pcmcia_unregister_socket(&socket_table[0].socket);
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index da471bddc972..a61d768f6e0e 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1104,7 +1104,7 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1104 /* prepare pcmcia_socket */ 1104 /* prepare pcmcia_socket */
1105 socket->socket.ops = &yenta_socket_operations; 1105 socket->socket.ops = &yenta_socket_operations;
1106 socket->socket.resource_ops = &pccard_nonstatic_ops; 1106 socket->socket.resource_ops = &pccard_nonstatic_ops;
1107 socket->socket.dev.dev = &dev->dev; 1107 socket->socket.dev.parent = &dev->dev;
1108 socket->socket.driver_data = socket; 1108 socket->socket.driver_data = socket;
1109 socket->socket.owner = THIS_MODULE; 1109 socket->socket.owner = THIS_MODULE;
1110 socket->socket.features = SS_CAP_PAGE_REGS | SS_CAP_PCCARD; 1110 socket->socket.features = SS_CAP_PAGE_REGS | SS_CAP_PCCARD;
diff --git a/drivers/pnp/pnpacpi/Kconfig b/drivers/pnp/pnpacpi/Kconfig
index b1854171b963..ad27e5e0101f 100644
--- a/drivers/pnp/pnpacpi/Kconfig
+++ b/drivers/pnp/pnpacpi/Kconfig
@@ -2,8 +2,8 @@
2# Plug and Play ACPI configuration 2# Plug and Play ACPI configuration
3# 3#
4config PNPACPI 4config PNPACPI
5 bool "Plug and Play ACPI support (EXPERIMENTAL)" 5 bool "Plug and Play ACPI support"
6 depends on PNP && ACPI && EXPERIMENTAL 6 depends on PNP && ACPI
7 default y 7 default y
8 ---help--- 8 ---help---
9 Linux uses the PNPACPI to autodetect built-in 9 Linux uses the PNPACPI to autodetect built-in
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index d42015c382af..2065e74bb63f 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Some code is based on pnpbios_core.c 4 * Some code is based on pnpbios_core.c
5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com> 5 * Copyright 2002 Adam Belay <ambx1@neo.rr.com>
6 * 6 * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
7 */ 8 */
8 9
9#include <linux/pnp.h> 10#include <linux/pnp.h>
@@ -21,18 +22,21 @@ static const struct pnp_device_id pnp_dev_table[] = {
21 { "", 0 } 22 { "", 0 }
22}; 23};
23 24
24static void reserve_ioport_range(char *pnpid, int start, int end) 25static void reserve_range(char *pnpid, int start, int end, int port)
25{ 26{
26 struct resource *res; 27 struct resource *res;
27 char *regionid; 28 char *regionid;
28 29
29 regionid = kmalloc(16, GFP_KERNEL); 30 regionid = kmalloc(16, GFP_KERNEL);
30 if ( regionid == NULL ) 31 if (regionid == NULL)
31 return; 32 return;
32 snprintf(regionid, 16, "pnp %s", pnpid); 33 snprintf(regionid, 16, "pnp %s", pnpid);
33 res = request_region(start,end-start+1,regionid); 34 if (port)
34 if ( res == NULL ) 35 res = request_region(start,end-start+1,regionid);
35 kfree( regionid ); 36 else
37 res = request_mem_region(start,end-start+1,regionid);
38 if (res == NULL)
39 kfree(regionid);
36 else 40 else
37 res->flags &= ~IORESOURCE_BUSY; 41 res->flags &= ~IORESOURCE_BUSY;
38 /* 42 /*
@@ -41,26 +45,20 @@ static void reserve_ioport_range(char *pnpid, int start, int end)
41 * have double reservations. 45 * have double reservations.
42 */ 46 */
43 printk(KERN_INFO 47 printk(KERN_INFO
44 "pnp: %s: ioport range 0x%x-0x%x %s reserved\n", 48 "pnp: %s: %s range 0x%x-0x%x %s reserved\n",
45 pnpid, start, end, 49 pnpid, port ? "ioport" : "iomem", start, end,
46 NULL != res ? "has been" : "could not be" 50 NULL != res ? "has been" : "could not be");
47 );
48
49 return;
50} 51}
51 52
52static void reserve_resources_of_dev( struct pnp_dev *dev ) 53static void reserve_resources_of_dev(struct pnp_dev *dev)
53{ 54{
54 int i; 55 int i;
55 56
56 for (i=0;i<PNP_MAX_PORT;i++) { 57 for (i = 0; i < PNP_MAX_PORT; i++) {
57 if (!pnp_port_valid(dev, i)) 58 if (!pnp_port_valid(dev, i))
58 /* end of resources */
59 continue; 59 continue;
60 if (pnp_port_start(dev, i) == 0) 60 if (pnp_port_start(dev, i) == 0)
61 /* disabled */ 61 continue; /* disabled */
62 /* Do nothing */
63 continue;
64 if (pnp_port_start(dev, i) < 0x100) 62 if (pnp_port_start(dev, i) < 0x100)
65 /* 63 /*
66 * Below 0x100 is only standard PC hardware 64 * Below 0x100 is only standard PC hardware
@@ -72,14 +70,18 @@ static void reserve_resources_of_dev( struct pnp_dev *dev )
72 */ 70 */
73 continue; 71 continue;
74 if (pnp_port_end(dev, i) < pnp_port_start(dev, i)) 72 if (pnp_port_end(dev, i) < pnp_port_start(dev, i))
75 /* invalid endpoint */ 73 continue; /* invalid */
76 /* Do nothing */ 74
75 reserve_range(dev->dev.bus_id, pnp_port_start(dev, i),
76 pnp_port_end(dev, i), 1);
77 }
78
79 for (i = 0; i < PNP_MAX_MEM; i++) {
80 if (!pnp_mem_valid(dev, i))
77 continue; 81 continue;
78 reserve_ioport_range( 82
79 dev->dev.bus_id, 83 reserve_range(dev->dev.bus_id, pnp_mem_start(dev, i),
80 pnp_port_start(dev, i), 84 pnp_mem_end(dev, i), 0);
81 pnp_port_end(dev, i)
82 );
83 } 85 }
84 86
85 return; 87 return;
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index 8433eb7562cb..d547cf50ca9d 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,2 +1 @@
1obj-y += system-bus.o
2obj-$(CONFIG_PS3_VUART) += vuart.o obj-$(CONFIG_PS3_VUART) += vuart.o
diff --git a/drivers/ps3/system-bus.c b/drivers/ps3/system-bus.c
deleted file mode 100644
index d79f949bcb2a..000000000000
--- a/drivers/ps3/system-bus.c
+++ /dev/null
@@ -1,362 +0,0 @@
1/*
2 * PS3 system bus driver.
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/dma-mapping.h>
25#include <linux/err.h>
26
27#include <asm/udbg.h>
28#include <asm/ps3.h>
29#include <asm/lv1call.h>
30#include <asm/firmware.h>
31
32#define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
33static void _dump_mmio_region(const struct ps3_mmio_region* r,
34 const char* func, int line)
35{
36 pr_debug("%s:%d: dev %u:%u\n", func, line, r->did.bus_id,
37 r->did.dev_id);
38 pr_debug("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
39 pr_debug("%s:%d: len %lxh\n", func, line, r->len);
40 pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
41}
42
43int ps3_mmio_region_create(struct ps3_mmio_region *r)
44{
45 int result;
46
47 result = lv1_map_device_mmio_region(r->did.bus_id, r->did.dev_id,
48 r->bus_addr, r->len, r->page_size, &r->lpar_addr);
49
50 if (result) {
51 pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
52 __func__, __LINE__, ps3_result(result));
53 r->lpar_addr = r->len = r->bus_addr = 0;
54 }
55
56 dump_mmio_region(r);
57 return result;
58}
59
60int ps3_free_mmio_region(struct ps3_mmio_region *r)
61{
62 int result;
63
64 result = lv1_unmap_device_mmio_region(r->did.bus_id, r->did.dev_id,
65 r->bus_addr);
66
67 if (result)
68 pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
69 __func__, __LINE__, ps3_result(result));
70
71 r->lpar_addr = r->len = r->bus_addr = 0;
72 return result;
73}
74
75static int ps3_system_bus_match(struct device *_dev,
76 struct device_driver *_drv)
77{
78 int result;
79 struct ps3_system_bus_driver *drv = to_ps3_system_bus_driver(_drv);
80 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
81
82 result = dev->match_id == drv->match_id;
83
84 pr_info("%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__, __LINE__,
85 dev->match_id, dev->core.bus_id, drv->match_id, drv->core.name,
86 (result ? "match" : "miss"));
87 return result;
88}
89
90static int ps3_system_bus_probe(struct device *_dev)
91{
92 int result;
93 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
94 struct ps3_system_bus_driver *drv =
95 to_ps3_system_bus_driver(_dev->driver);
96
97 result = lv1_open_device(dev->did.bus_id, dev->did.dev_id, 0);
98
99 if (result) {
100 pr_debug("%s:%d: lv1_open_device failed (%d)\n",
101 __func__, __LINE__, result);
102 result = -EACCES;
103 goto clean_none;
104 }
105
106 if (dev->d_region->did.bus_id) {
107 result = ps3_dma_region_create(dev->d_region);
108
109 if (result) {
110 pr_debug("%s:%d: ps3_dma_region_create failed (%d)\n",
111 __func__, __LINE__, result);
112 BUG_ON("check region type");
113 result = -EINVAL;
114 goto clean_device;
115 }
116 }
117
118 BUG_ON(!drv);
119
120 if (drv->probe)
121 result = drv->probe(dev);
122 else
123 pr_info("%s:%d: %s no probe method\n", __func__, __LINE__,
124 dev->core.bus_id);
125
126 if (result) {
127 pr_debug("%s:%d: drv->probe failed\n", __func__, __LINE__);
128 goto clean_dma;
129 }
130
131 return result;
132
133clean_dma:
134 ps3_dma_region_free(dev->d_region);
135clean_device:
136 lv1_close_device(dev->did.bus_id, dev->did.dev_id);
137clean_none:
138 return result;
139}
140
141static int ps3_system_bus_remove(struct device *_dev)
142{
143 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
144 struct ps3_system_bus_driver *drv =
145 to_ps3_system_bus_driver(_dev->driver);
146
147 if (drv->remove)
148 drv->remove(dev);
149 else
150 pr_info("%s:%d: %s no remove method\n", __func__, __LINE__,
151 dev->core.bus_id);
152
153 ps3_dma_region_free(dev->d_region);
154 ps3_free_mmio_region(dev->m_region);
155 lv1_close_device(dev->did.bus_id, dev->did.dev_id);
156
157 return 0;
158}
159
160struct bus_type ps3_system_bus_type = {
161 .name = "ps3_system_bus",
162 .match = ps3_system_bus_match,
163 .probe = ps3_system_bus_probe,
164 .remove = ps3_system_bus_remove,
165};
166
167int __init ps3_system_bus_init(void)
168{
169 int result;
170
171 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
172 return 0;
173
174 result = bus_register(&ps3_system_bus_type);
175 BUG_ON(result);
176 return result;
177}
178
179core_initcall(ps3_system_bus_init);
180
181/* Allocates a contiguous real buffer and creates mappings over it.
182 * Returns the virtual address of the buffer and sets dma_handle
183 * to the dma address (mapping) of the first page.
184 */
185
186static void * ps3_alloc_coherent(struct device *_dev, size_t size,
187 dma_addr_t *dma_handle, gfp_t flag)
188{
189 int result;
190 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
191 unsigned long virt_addr;
192
193 BUG_ON(!dev->d_region->bus_addr);
194
195 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
196 flag |= __GFP_ZERO;
197
198 virt_addr = __get_free_pages(flag, get_order(size));
199
200 if (!virt_addr) {
201 pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
202 goto clean_none;
203 }
204
205 result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle);
206
207 if (result) {
208 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
209 __func__, __LINE__, result);
210 BUG_ON("check region type");
211 goto clean_alloc;
212 }
213
214 return (void*)virt_addr;
215
216clean_alloc:
217 free_pages(virt_addr, get_order(size));
218clean_none:
219 dma_handle = NULL;
220 return NULL;
221}
222
223static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
224 dma_addr_t dma_handle)
225{
226 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
227
228 ps3_dma_unmap(dev->d_region, dma_handle, size);
229 free_pages((unsigned long)vaddr, get_order(size));
230}
231
232/* Creates TCEs for a user provided buffer. The user buffer must be
233 * contiguous real kernel storage (not vmalloc). The address of the buffer
234 * passed here is the kernel (virtual) address of the buffer. The buffer
235 * need not be page aligned, the dma_addr_t returned will point to the same
236 * byte within the page as vaddr.
237 */
238
239static dma_addr_t ps3_map_single(struct device *_dev, void *ptr, size_t size,
240 enum dma_data_direction direction)
241{
242 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
243 int result;
244 unsigned long bus_addr;
245
246 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
247 &bus_addr);
248
249 if (result) {
250 pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
251 __func__, __LINE__, result);
252 }
253
254 return bus_addr;
255}
256
257static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
258 size_t size, enum dma_data_direction direction)
259{
260 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
261 int result;
262
263 result = ps3_dma_unmap(dev->d_region, dma_addr, size);
264
265 if (result) {
266 pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
267 __func__, __LINE__, result);
268 }
269}
270
271static int ps3_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
272 enum dma_data_direction direction)
273{
274#if defined(CONFIG_PS3_DYNAMIC_DMA)
275 BUG_ON("do");
276#endif
277 return 0;
278}
279
280static void ps3_unmap_sg(struct device *_dev, struct scatterlist *sg,
281 int nents, enum dma_data_direction direction)
282{
283#if defined(CONFIG_PS3_DYNAMIC_DMA)
284 BUG_ON("do");
285#endif
286}
287
288static int ps3_dma_supported(struct device *_dev, u64 mask)
289{
290 return 1;
291}
292
293static struct dma_mapping_ops ps3_dma_ops = {
294 .alloc_coherent = ps3_alloc_coherent,
295 .free_coherent = ps3_free_coherent,
296 .map_single = ps3_map_single,
297 .unmap_single = ps3_unmap_single,
298 .map_sg = ps3_map_sg,
299 .unmap_sg = ps3_unmap_sg,
300 .dma_supported = ps3_dma_supported
301};
302
303/**
304 * ps3_system_bus_release_device - remove a device from the system bus
305 */
306
307static void ps3_system_bus_release_device(struct device *_dev)
308{
309 struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
310 kfree(dev);
311}
312
313/**
314 * ps3_system_bus_device_register - add a device to the system bus
315 *
316 * ps3_system_bus_device_register() expects the dev object to be allocated
317 * dynamically by the caller. The system bus takes ownership of the dev
318 * object and frees the object in ps3_system_bus_release_device().
319 */
320
321int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
322{
323 int result;
324 static unsigned int dev_count = 1;
325
326 dev->core.parent = NULL;
327 dev->core.bus = &ps3_system_bus_type;
328 dev->core.release = ps3_system_bus_release_device;
329
330 dev->core.archdata.of_node = NULL;
331 dev->core.archdata.dma_ops = &ps3_dma_ops;
332 dev->core.archdata.numa_node = 0;
333
334 snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "sb_%02x",
335 dev_count++);
336
337 pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
338
339 result = device_register(&dev->core);
340 return result;
341}
342
343EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
344
345int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
346{
347 int result;
348
349 drv->core.bus = &ps3_system_bus_type;
350
351 result = driver_register(&drv->core);
352 return result;
353}
354
355EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
356
357void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
358{
359 driver_unregister(&drv->core);
360}
361
362EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
diff --git a/drivers/ps3/vuart.c b/drivers/ps3/vuart.c
index 6974f65bcda5..a72da8f651f8 100644
--- a/drivers/ps3/vuart.c
+++ b/drivers/ps3/vuart.c
@@ -783,8 +783,8 @@ static int ps3_vuart_probe(struct device *_dev)
783 783
784 vuart_private.in_use++; 784 vuart_private.in_use++;
785 if (vuart_private.in_use == 1) { 785 if (vuart_private.in_use == 1) {
786 result = ps3_alloc_vuart_irq((void*)&vuart_private.bmp.status, 786 result = ps3_alloc_vuart_irq(PS3_BINDING_CPU_ANY,
787 &vuart_private.virq); 787 (void*)&vuart_private.bmp.status, &vuart_private.virq);
788 788
789 if (result) { 789 if (result) {
790 dev_dbg(&dev->core, 790 dev_dbg(&dev->core,
diff --git a/drivers/ps3/vuart.h b/drivers/ps3/vuart.h
index 28fd89f0c8aa..11c421cf7a03 100644
--- a/drivers/ps3/vuart.h
+++ b/drivers/ps3/vuart.h
@@ -21,37 +21,6 @@
21#if !defined(_PS3_VUART_H) 21#if !defined(_PS3_VUART_H)
22#define _PS3_VUART_H 22#define _PS3_VUART_H
23 23
24struct ps3_vuart_stats {
25 unsigned long bytes_written;
26 unsigned long bytes_read;
27 unsigned long tx_interrupts;
28 unsigned long rx_interrupts;
29 unsigned long disconnect_interrupts;
30};
31
32/**
33 * struct ps3_vuart_port_device - a device on a vuart port
34 */
35
36struct ps3_vuart_port_device {
37 enum ps3_match_id match_id;
38 struct device core;
39
40 /* private driver variables */
41 unsigned int port_number;
42 unsigned long interrupt_mask;
43 struct {
44 spinlock_t lock;
45 struct list_head head;
46 } tx_list;
47 struct {
48 unsigned long bytes_held;
49 spinlock_t lock;
50 struct list_head head;
51 } rx_list;
52 struct ps3_vuart_stats stats;
53};
54
55/** 24/**
56 * struct ps3_vuart_port_driver - a driver for a device on a vuart port 25 * struct ps3_vuart_port_driver - a driver for a device on a vuart port
57 */ 26 */
@@ -68,9 +37,9 @@ struct ps3_vuart_port_driver {
68 /* int (*resume)(struct ps3_vuart_port_device *); */ 37 /* int (*resume)(struct ps3_vuart_port_device *); */
69}; 38};
70 39
71int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev);
72int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv); 40int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv);
73void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv); 41void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv);
42
74int ps3_vuart_write(struct ps3_vuart_port_device *dev, 43int ps3_vuart_write(struct ps3_vuart_port_device *dev,
75 const void* buf, unsigned int bytes); 44 const void* buf, unsigned int bytes);
76int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf, 45int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
@@ -86,9 +55,4 @@ static inline struct ps3_vuart_port_device *to_ps3_vuart_port_device(
86 return container_of(_dev, struct ps3_vuart_port_device, core); 55 return container_of(_dev, struct ps3_vuart_port_device, core);
87} 56}
88 57
89int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
90 unsigned int bytes);
91int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
92 unsigned int bytes);
93
94#endif 58#endif
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 94d3df62a5fa..82f2ac87ccd4 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -305,7 +305,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
305 305
306 case RTC_IRQP_READ: 306 case RTC_IRQP_READ:
307 if (ops->irq_set_freq) 307 if (ops->irq_set_freq)
308 err = put_user(rtc->irq_freq, (unsigned long *) arg); 308 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
309 break; 309 break;
310 310
311 case RTC_IRQP_SET: 311 case RTC_IRQP_SET:
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 4b72b8ef5d66..038118bbfaea 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -53,6 +53,25 @@ I2C_CLIENT_INSMOD;
53#define PCF8563_SC_LV 0x80 /* low voltage */ 53#define PCF8563_SC_LV 0x80 /* low voltage */
54#define PCF8563_MO_C 0x80 /* century */ 54#define PCF8563_MO_C 0x80 /* century */
55 55
56struct pcf8563 {
57 struct i2c_client client;
58 /*
59 * The meaning of MO_C bit varies by the chip type.
60 * From PCF8563 datasheet: this bit is toggled when the years
61 * register overflows from 99 to 00
62 * 0 indicates the century is 20xx
63 * 1 indicates the century is 19xx
64 * From RTC8564 datasheet: this bit indicates change of
65 * century. When the year digit data overflows from 99 to 00,
66 * this bit is set. By presetting it to 0 while still in the
67 * 20th century, it will be set in year 2000, ...
68 * There seems no reliable way to know how the system use this
69 * bit. So let's do it heuristically, assuming we are live in
70 * 1970...2069.
71 */
72 int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
73};
74
56static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind); 75static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind);
57static int pcf8563_detach(struct i2c_client *client); 76static int pcf8563_detach(struct i2c_client *client);
58 77
@@ -62,6 +81,7 @@ static int pcf8563_detach(struct i2c_client *client);
62 */ 81 */
63static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) 82static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
64{ 83{
84 struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
65 unsigned char buf[13] = { PCF8563_REG_ST1 }; 85 unsigned char buf[13] = { PCF8563_REG_ST1 };
66 86
67 struct i2c_msg msgs[] = { 87 struct i2c_msg msgs[] = {
@@ -94,8 +114,12 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
94 tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F); 114 tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F);
95 tm->tm_wday = buf[PCF8563_REG_DW] & 0x07; 115 tm->tm_wday = buf[PCF8563_REG_DW] & 0x07;
96 tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */ 116 tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
97 tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR]) 117 tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR]);
98 + (buf[PCF8563_REG_MO] & PCF8563_MO_C ? 0 : 100); 118 if (tm->tm_year < 70)
119 tm->tm_year += 100; /* assume we are in 1970...2069 */
120 /* detect the polarity heuristically. see note above. */
121 pcf8563->c_polarity = (buf[PCF8563_REG_MO] & PCF8563_MO_C) ?
122 (tm->tm_year >= 100) : (tm->tm_year < 100);
99 123
100 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " 124 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
101 "mday=%d, mon=%d, year=%d, wday=%d\n", 125 "mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -114,6 +138,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
114 138
115static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) 139static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
116{ 140{
141 struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
117 int i, err; 142 int i, err;
118 unsigned char buf[9]; 143 unsigned char buf[9];
119 144
@@ -135,7 +160,7 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
135 160
136 /* year and century */ 161 /* year and century */
137 buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100); 162 buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100);
138 if (tm->tm_year < 100) 163 if (pcf8563->c_polarity ? (tm->tm_year >= 100) : (tm->tm_year < 100))
139 buf[PCF8563_REG_MO] |= PCF8563_MO_C; 164 buf[PCF8563_REG_MO] |= PCF8563_MO_C;
140 165
141 buf[PCF8563_REG_DW] = tm->tm_wday & 0x07; 166 buf[PCF8563_REG_DW] = tm->tm_wday & 0x07;
@@ -248,6 +273,7 @@ static struct i2c_driver pcf8563_driver = {
248 273
249static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind) 274static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind)
250{ 275{
276 struct pcf8563 *pcf8563;
251 struct i2c_client *client; 277 struct i2c_client *client;
252 struct rtc_device *rtc; 278 struct rtc_device *rtc;
253 279
@@ -260,11 +286,12 @@ static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind)
260 goto exit; 286 goto exit;
261 } 287 }
262 288
263 if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { 289 if (!(pcf8563 = kzalloc(sizeof(struct pcf8563), GFP_KERNEL))) {
264 err = -ENOMEM; 290 err = -ENOMEM;
265 goto exit; 291 goto exit;
266 } 292 }
267 293
294 client = &pcf8563->client;
268 client->addr = address; 295 client->addr = address;
269 client->driver = &pcf8563_driver; 296 client->driver = &pcf8563_driver;
270 client->adapter = adapter; 297 client->adapter = adapter;
@@ -301,7 +328,7 @@ exit_detach:
301 i2c_detach_client(client); 328 i2c_detach_client(client);
302 329
303exit_kfree: 330exit_kfree:
304 kfree(client); 331 kfree(pcf8563);
305 332
306exit: 333exit:
307 return err; 334 return err;
@@ -309,6 +336,7 @@ exit:
309 336
310static int pcf8563_detach(struct i2c_client *client) 337static int pcf8563_detach(struct i2c_client *client)
311{ 338{
339 struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
312 int err; 340 int err;
313 struct rtc_device *rtc = i2c_get_clientdata(client); 341 struct rtc_device *rtc = i2c_get_clientdata(client);
314 342
@@ -318,7 +346,7 @@ static int pcf8563_detach(struct i2c_client *client)
318 if ((err = i2c_detach_client(client))) 346 if ((err = i2c_detach_client(client)))
319 return err; 347 return err;
320 348
321 kfree(client); 349 kfree(pcf8563);
322 350
323 return 0; 351 return 0;
324} 352}
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index ae89b9b88743..165af398fdea 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -103,14 +103,8 @@ config CCW_CONSOLE
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 103 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 104 default y
105 105
106config SCLP
107 bool "Support for SCLP"
108 help
109 Include support for the SCLP interface to the service element.
110
111config SCLP_TTY 106config SCLP_TTY
112 bool "Support for SCLP line mode terminal" 107 bool "Support for SCLP line mode terminal"
113 depends on SCLP
114 help 108 help
115 Include support for IBM SCLP line-mode terminals. 109 Include support for IBM SCLP line-mode terminals.
116 110
@@ -123,7 +117,6 @@ config SCLP_CONSOLE
123 117
124config SCLP_VT220_TTY 118config SCLP_VT220_TTY
125 bool "Support for SCLP VT220-compatible terminal" 119 bool "Support for SCLP VT220-compatible terminal"
126 depends on SCLP
127 help 120 help
128 Include support for an IBM SCLP VT220-compatible terminal. 121 Include support for an IBM SCLP VT220-compatible terminal.
129 122
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE
136 129
137config SCLP_CPI 130config SCLP_CPI
138 tristate "Control-Program Identification" 131 tristate "Control-Program Identification"
139 depends on SCLP
140 help 132 help
141 This option enables the hardware console interface for system 133 This option enables the hardware console interface for system
142 identification. This is commonly used for workload management and 134 identification. This is commonly used for workload management and
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 9803c9352d78..5a888704a8d0 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6
5obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 9
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 492b68bcd7cc..eb5dc62f0d9c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,7 @@
37 */ 37 */
38debug_info_t *dasd_debug_area; 38debug_info_t *dasd_debug_area;
39struct dasd_discipline *dasd_diag_discipline_pointer; 39struct dasd_discipline *dasd_diag_discipline_pointer;
40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
40 41
41MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
42MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device);
51static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device);
52static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device);
53static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_device *);
54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_ccw_queue(struct dasd_device *, int);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_tasklet(struct dasd_device *);
57static void do_kick_device(struct work_struct *); 57static void do_kick_device(struct work_struct *);
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
483/* 483/*
484 * Add profiling information for cqr before execution. 484 * Add profiling information for cqr before execution.
485 */ 485 */
486static inline void 486static void
487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
488 struct request *req) 488 struct request *req)
489{ 489{
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
505/* 505/*
506 * Add profiling information for cqr after execution. 506 * Add profiling information for cqr after execution.
507 */ 507 */
508static inline void 508static void
509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
510 struct request *req) 510 struct request *req)
511{ 511{
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1022 irb->scsw.cstat == 0 && 1022 irb->scsw.cstat == 0 &&
1023 !irb->esw.esw0.erw.cons) 1023 !irb->esw.esw0.erw.cons)
1024 era = dasd_era_none; 1024 era = dasd_era_none;
1025 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1026 era = dasd_era_fatal; /* don't recover this request */
1027 else if (irb->esw.esw0.erw.cons) 1025 else if (irb->esw.esw0.erw.cons)
1028 era = device->discipline->examine_error(cqr, irb); 1026 era = device->discipline->examine_error(cqr, irb);
1029 else 1027 else
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1104/* 1102/*
1105 * Process ccw request queue. 1103 * Process ccw request queue.
1106 */ 1104 */
1107static inline void 1105static void
1108__dasd_process_ccw_queue(struct dasd_device * device, 1106__dasd_process_ccw_queue(struct dasd_device * device,
1109 struct list_head *final_queue) 1107 struct list_head *final_queue)
1110{ 1108{
@@ -1127,7 +1125,9 @@ restart:
1127 cqr->status = DASD_CQR_FAILED; 1125 cqr->status = DASD_CQR_FAILED;
1128 cqr->stopclk = get_clock(); 1126 cqr->stopclk = get_clock();
1129 } else { 1127 } else {
1130 if (cqr->irb.esw.esw0.erw.cons) { 1128 if (cqr->irb.esw.esw0.erw.cons &&
1129 test_bit(DASD_CQR_FLAGS_USE_ERP,
1130 &cqr->flags)) {
1131 erp_fn = device->discipline-> 1131 erp_fn = device->discipline->
1132 erp_action(cqr); 1132 erp_action(cqr);
1133 erp_fn(cqr); 1133 erp_fn(cqr);
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1181/* 1181/*
1182 * Fetch requests from the block device queue. 1182 * Fetch requests from the block device queue.
1183 */ 1183 */
1184static inline void 1184static void
1185__dasd_process_blk_queue(struct dasd_device * device) 1185__dasd_process_blk_queue(struct dasd_device * device)
1186{ 1186{
1187 request_queue_t *queue; 1187 request_queue_t *queue;
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device)
1232 if (IS_ERR(cqr)) { 1232 if (IS_ERR(cqr)) {
1233 if (PTR_ERR(cqr) == -ENOMEM) 1233 if (PTR_ERR(cqr) == -ENOMEM)
1234 break; /* terminate request queue loop */ 1234 break; /* terminate request queue loop */
1235 if (PTR_ERR(cqr) == -EAGAIN) {
1236 /*
1237 * The current request cannot be build right
1238 * now, we have to try later. If this request
1239 * is the head-of-queue we stop the device
1240 * for 1/2 second.
1241 */
1242 if (!list_empty(&device->ccw_queue))
1243 break;
1244 device->stopped |= DASD_STOPPED_PENDING;
1245 dasd_set_timer(device, HZ/2);
1246 break;
1247 }
1235 DBF_DEV_EVENT(DBF_ERR, device, 1248 DBF_DEV_EVENT(DBF_ERR, device,
1236 "CCW creation failed (rc=%ld) " 1249 "CCW creation failed (rc=%ld) "
1237 "on request %p", 1250 "on request %p",
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device)
1254 * Take a look at the first request on the ccw queue and check 1267 * Take a look at the first request on the ccw queue and check
1255 * if it reached its expire time. If so, terminate the IO. 1268 * if it reached its expire time. If so, terminate the IO.
1256 */ 1269 */
1257static inline void 1270static void
1258__dasd_check_expire(struct dasd_device * device) 1271__dasd_check_expire(struct dasd_device * device)
1259{ 1272{
1260 struct dasd_ccw_req *cqr; 1273 struct dasd_ccw_req *cqr;
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device)
1285 * Take a look at the first request on the ccw queue and check 1298 * Take a look at the first request on the ccw queue and check
1286 * if it needs to be started. 1299 * if it needs to be started.
1287 */ 1300 */
1288static inline void 1301static void
1289__dasd_start_head(struct dasd_device * device) 1302__dasd_start_head(struct dasd_device * device)
1290{ 1303{
1291 struct dasd_ccw_req *cqr; 1304 struct dasd_ccw_req *cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4d01040c2c63..8b9d68f6e016 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
170 /* log the erp chain if fatal error occurred */ 170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { 171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb); 172 dasd_log_sense(cqr, irb);
173 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
174 } 173 }
175 174
176 return era; 175 return era;
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2640 2639
2641 struct dasd_ccw_req *erp = NULL; 2640 struct dasd_ccw_req *erp = NULL;
2642 struct dasd_device *device = cqr->device; 2641 struct dasd_device *device = cqr->device;
2643 __u32 cpa = cqr->irb.scsw.cpa;
2644 struct dasd_ccw_req *temp_erp = NULL; 2642 struct dasd_ccw_req *temp_erp = NULL;
2645 2643
2646 if (device->features & DASD_FEATURE_ERPLOG) { 2644 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2706 } 2704 }
2707 } 2705 }
2708 2706
2709 if (erp->status == DASD_CQR_FAILED)
2710 dasd_log_ccw(erp, 1, cpa);
2711
2712 /* enqueue added ERP request */ 2707 /* enqueue added ERP request */
2713 if (erp->status == DASD_CQR_FILLED) { 2708 if (erp->status == DASD_CQR_FILLED) {
2714 erp->status = DASD_CQR_QUEUED; 2709 erp->status = DASD_CQR_QUEUED;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 5943266152f5..ed70852cc915 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup);
136/* 136/*
137 * Read a device busid/devno from a string. 137 * Read a device busid/devno from a string.
138 */ 138 */
139static inline int 139static int
140dasd_busid(char **str, int *id0, int *id1, int *devno) 140dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 141{
142 int val, old_style; 142 int val, old_style;
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
182 * only one: "ro" for read-only devices. The default feature set 182 * only one: "ro" for read-only devices. The default feature set
183 * is empty (value 0). 183 * is empty (value 0).
184 */ 184 */
185static inline int 185static int
186dasd_feature_list(char *str, char **endp) 186dasd_feature_list(char *str, char **endp)
187{ 187{
188 int features, len, rc; 188 int features, len, rc;
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) {
341 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
342} 342}
343 343
344static inline char * 344static char *
345dasd_parse_next_element( char *parsestring ) { 345dasd_parse_next_element( char *parsestring ) {
346 char * residual_str; 346 char * residual_str;
347 residual_str = dasd_parse_keyword(parsestring); 347 residual_str = dasd_parse_keyword(parsestring);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 53db58a68617..ab782bb46ac1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43#define DIAG_MAX_RETRIES 32 43#define DIAG_MAX_RETRIES 32
44#define DIAG_TIMEOUT 50 * HZ 44#define DIAG_TIMEOUT 50 * HZ
45 45
46struct dasd_discipline dasd_diag_discipline; 46static struct dasd_discipline dasd_diag_discipline;
47 47
48struct dasd_diag_private { 48struct dasd_diag_private {
49 struct dasd_diag_characteristics rdc_data; 49 struct dasd_diag_characteristics rdc_data;
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd)
90 * block offset. On success, return zero and set end_block to contain the 90 * block offset. On success, return zero and set end_block to contain the
91 * number of blocks on the device minus the specified offset. Return non-zero 91 * number of blocks on the device minus the specified offset. Return non-zero
92 * otherwise. */ 92 * otherwise. */
93static __inline__ int 93static inline int
94mdsk_init_io(struct dasd_device *device, unsigned int blocksize, 94mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
95 blocknum_t offset, blocknum_t *end_block) 95 blocknum_t offset, blocknum_t *end_block)
96{ 96{
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
117 117
118/* Remove block I/O environment for device. Return zero on success, non-zero 118/* Remove block I/O environment for device. Return zero on success, non-zero
119 * otherwise. */ 119 * otherwise. */
120static __inline__ int 120static inline int
121mdsk_term_io(struct dasd_device * device) 121mdsk_term_io(struct dasd_device * device)
122{ 122{
123 struct dasd_diag_private *private; 123 struct dasd_diag_private *private;
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
576 "dump sense not available for DIAG data"); 576 "dump sense not available for DIAG data");
577} 577}
578 578
579struct dasd_discipline dasd_diag_discipline = { 579static struct dasd_discipline dasd_diag_discipline = {
580 .owner = THIS_MODULE, 580 .owner = THIS_MODULE,
581 .name = "DIAG", 581 .name = "DIAG",
582 .ebcname = "DIAG", 582 .ebcname = "DIAG",
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fdaa471e845f..cecab2274a6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2)
134 return (d1 + (d2 - 1)) / d2; 134 return (d1 + (d2 - 1)) / d2;
135} 135}
136 136
137static inline int 137static unsigned int
138bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
139{
140 unsigned int fl1, fl2, int1, int2;
141 int bpr;
142
143 switch (rdc->formula) {
144 case 0x01:
145 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
146 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
147 ECKD_F1(rdc));
148 bpr = fl1 + fl2;
149 break;
150 case 0x02:
151 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
152 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
153 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
154 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
155 ECKD_F1(rdc));
156 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
157 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
158 ECKD_F1(rdc));
159 bpr = fl1 + fl2;
160 break;
161 default:
162 bpr = 0;
163 break;
164 }
165 return bpr;
166}
167
168static inline unsigned int
169bytes_per_track(struct dasd_eckd_characteristics *rdc)
170{
171 return *(unsigned int *) (rdc->byte_per_track) >> 8;
172}
173
174static inline unsigned int
175recs_per_track(struct dasd_eckd_characteristics * rdc, 138recs_per_track(struct dasd_eckd_characteristics * rdc,
176 unsigned int kl, unsigned int dl) 139 unsigned int kl, unsigned int dl)
177{ 140{
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
204 return 0; 167 return 0;
205} 168}
206 169
207static inline void 170static int
208check_XRC (struct ccw1 *de_ccw, 171check_XRC (struct ccw1 *de_ccw,
209 struct DE_eckd_data *data, 172 struct DE_eckd_data *data,
210 struct dasd_device *device) 173 struct dasd_device *device)
211{ 174{
212 struct dasd_eckd_private *private; 175 struct dasd_eckd_private *private;
176 int rc;
213 177
214 private = (struct dasd_eckd_private *) device->private; 178 private = (struct dasd_eckd_private *) device->private;
179 if (!private->rdc_data.facilities.XRC_supported)
180 return 0;
215 181
216 /* switch on System Time Stamp - needed for XRC Support */ 182 /* switch on System Time Stamp - needed for XRC Support */
217 if (private->rdc_data.facilities.XRC_supported) { 183 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
218 184 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
219 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
220 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
221
222 data->ep_sys_time = get_clock ();
223
224 de_ccw->count = sizeof (struct DE_eckd_data);
225 de_ccw->flags |= CCW_FLAG_SLI;
226 }
227 185
228 return; 186 rc = get_sync_clock(&data->ep_sys_time);
187 /* Ignore return code if sync clock is switched off. */
188 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0;
229 190
230} /* end check_XRC */ 191 de_ccw->count = sizeof (struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc;
194}
231 195
232static inline void 196static int
233define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 197define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
234 int totrk, int cmd, struct dasd_device * device) 198 int totrk, int cmd, struct dasd_device * device)
235{ 199{
236 struct dasd_eckd_private *private; 200 struct dasd_eckd_private *private;
237 struct ch_t geo, beg, end; 201 struct ch_t geo, beg, end;
202 int rc = 0;
238 203
239 private = (struct dasd_eckd_private *) device->private; 204 private = (struct dasd_eckd_private *) device->private;
240 205
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
263 case DASD_ECKD_CCW_WRITE_KD_MT: 228 case DASD_ECKD_CCW_WRITE_KD_MT:
264 data->mask.perm = 0x02; 229 data->mask.perm = 0x02;
265 data->attributes.operation = private->attrib.operation; 230 data->attributes.operation = private->attrib.operation;
266 check_XRC (ccw, data, device); 231 rc = check_XRC (ccw, data, device);
267 break; 232 break;
268 case DASD_ECKD_CCW_WRITE_CKD: 233 case DASD_ECKD_CCW_WRITE_CKD:
269 case DASD_ECKD_CCW_WRITE_CKD_MT: 234 case DASD_ECKD_CCW_WRITE_CKD_MT:
270 data->attributes.operation = DASD_BYPASS_CACHE; 235 data->attributes.operation = DASD_BYPASS_CACHE;
271 check_XRC (ccw, data, device); 236 rc = check_XRC (ccw, data, device);
272 break; 237 break;
273 case DASD_ECKD_CCW_ERASE: 238 case DASD_ECKD_CCW_ERASE:
274 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
276 data->mask.perm = 0x3; 241 data->mask.perm = 0x3;
277 data->mask.auth = 0x1; 242 data->mask.auth = 0x1;
278 data->attributes.operation = DASD_BYPASS_CACHE; 243 data->attributes.operation = DASD_BYPASS_CACHE;
279 check_XRC (ccw, data, device); 244 rc = check_XRC (ccw, data, device);
280 break; 245 break;
281 default: 246 default:
282 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 247 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
312 data->beg_ext.head = beg.head; 277 data->beg_ext.head = beg.head;
313 data->end_ext.cyl = end.cyl; 278 data->end_ext.cyl = end.cyl;
314 data->end_ext.head = end.head; 279 data->end_ext.head = end.head;
280 return rc;
315} 281}
316 282
317static inline void 283static void
318locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
319 int rec_on_trk, int no_rec, int cmd, 285 int rec_on_trk, int no_rec, int cmd,
320 struct dasd_device * device, int reclen) 286 struct dasd_device * device, int reclen)
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
548/* 514/*
549 * Build CP for Perform Subsystem Function - SSC. 515 * Build CP for Perform Subsystem Function - SSC.
550 */ 516 */
551struct dasd_ccw_req * 517static struct dasd_ccw_req *
552dasd_eckd_build_psf_ssc(struct dasd_device *device) 518dasd_eckd_build_psf_ssc(struct dasd_device *device)
553{ 519{
554 struct dasd_ccw_req *cqr; 520 struct dasd_ccw_req *cqr;
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1200 return cqr; 1166 return cqr;
1201 ccw = cqr->cpaddr; 1167 ccw = cqr->cpaddr;
1202 /* First ccw is define extent. */ 1168 /* First ccw is define extent. */
1203 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); 1169 if (define_extent(ccw++, cqr->data, first_trk,
1170 last_trk, cmd, device) == -EAGAIN) {
1171 /* Clock not in sync and XRC is enabled. Try again later. */
1172 dasd_sfree_request(cqr, device);
1173 return ERR_PTR(-EAGAIN);
1174 }
1204 /* Build locate_record+read/write/ccws. */ 1175 /* Build locate_record+read/write/ccws. */
1205 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); 1176 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1206 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1177 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device)
1380 cqr->device = device; 1351 cqr->device = device;
1381 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1352 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1382 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1383 cqr->retries = 0; 1354 cqr->retries = 2; /* set retry counter to enable basic ERP */
1384 cqr->expires = 2 * HZ; 1355 cqr->expires = 2 * HZ;
1385 cqr->buildclk = get_clock(); 1356 cqr->buildclk = get_clock();
1386 cqr->status = DASD_CQR_FILLED; 1357 cqr->status = DASD_CQR_FILLED;
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1420 cqr->device = device; 1391 cqr->device = device;
1421 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1392 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1422 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1393 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1423 cqr->retries = 0; 1394 cqr->retries = 2; /* set retry counter to enable basic ERP */
1424 cqr->expires = 2 * HZ; 1395 cqr->expires = 2 * HZ;
1425 cqr->buildclk = get_clock(); 1396 cqr->buildclk = get_clock();
1426 cqr->status = DASD_CQR_FILLED; 1397 cqr->status = DASD_CQR_FILLED;
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1459 cqr->device = device; 1430 cqr->device = device;
1460 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1431 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1461 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1432 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1462 cqr->retries = 0; 1433 cqr->retries = 2; /* set retry counter to enable basic ERP */
1463 cqr->expires = 2 * HZ; 1434 cqr->expires = 2 * HZ;
1464 cqr->buildclk = get_clock(); 1435 cqr->buildclk = get_clock();
1465 cqr->status = DASD_CQR_FILLED; 1436 cqr->status = DASD_CQR_FILLED;
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1609 * Dump the range of CCWs into 'page' buffer 1580 * Dump the range of CCWs into 'page' buffer
1610 * and return number of printed chars. 1581 * and return number of printed chars.
1611 */ 1582 */
1612static inline int 1583static int
1613dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 1584dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1614{ 1585{
1615 int len, count; 1586 int len, count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index e0bf30ebb215..6cedc914077e 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = {
658 .owner = THIS_MODULE, 658 .owner = THIS_MODULE,
659}; 659};
660 660
661static struct miscdevice dasd_eer_dev = { 661static struct miscdevice *dasd_eer_dev = NULL;
662 .minor = MISC_DYNAMIC_MINOR,
663 .name = "dasd_eer",
664 .fops = &dasd_eer_fops,
665};
666 662
667int __init dasd_eer_init(void) 663int __init dasd_eer_init(void)
668{ 664{
669 int rc; 665 int rc;
670 666
671 rc = misc_register(&dasd_eer_dev); 667 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
668 if (!dasd_eer_dev)
669 return -ENOMEM;
670
671 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
672 dasd_eer_dev->name = "dasd_eer";
673 dasd_eer_dev->fops = &dasd_eer_fops;
674
675 rc = misc_register(dasd_eer_dev);
672 if (rc) { 676 if (rc) {
677 kfree(dasd_eer_dev);
678 dasd_eer_dev = NULL;
673 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 679 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
674 "register misc device"); 680 "register misc device");
675 return rc; 681 return rc;
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void)
680 686
681void dasd_eer_exit(void) 687void dasd_eer_exit(void)
682{ 688{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 689 if (dasd_eer_dev) {
690 WARN_ON(misc_deregister(dasd_eer_dev) != 0);
691 kfree(dasd_eer_dev);
692 dasd_eer_dev = NULL;
693 }
684} 694}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 58a65097922b..caa5d91420f8 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
152 152
153} /* end default_erp_postaction */ 153} /* end default_erp_postaction */
154 154
155/*
156 * Print the hex dump of the memory used by a request. This includes
157 * all error recovery ccws that have been chained in from of the
158 * real request.
159 */
160static inline void
161hex_dump_memory(struct dasd_device *device, void *data, int len)
162{
163 int *pint;
164
165 pint = (int *) data;
166 while (len > 0) {
167 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
168 pint, pint[0], pint[1], pint[2], pint[3]);
169 pint += 4;
170 len -= 16;
171 }
172}
173
174void 155void
175dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) 156dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
176{ 157{
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
182 device->discipline->dump_sense(device, cqr, irb); 163 device->discipline->dump_sense(device, cqr, irb);
183} 164}
184 165
185void
186dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
187{
188 struct dasd_device *device;
189 struct dasd_ccw_req *lcqr;
190 struct ccw1 *ccw;
191 int cplength;
192
193 device = cqr->device;
194 /* log the channel program */
195 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
196 DEV_MESSAGE(KERN_ERR, device,
197 "(%s) ERP chain report for req: %p",
198 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
199 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
200
201 cplength = 1;
202 ccw = lcqr->cpaddr;
203 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
204 cplength++;
205
206 if (cplength > 40) { /* log only parts of the CP */
207 DEV_MESSAGE(KERN_ERR, device, "%s",
208 "Start of channel program:");
209 hex_dump_memory(device, lcqr->cpaddr,
210 40*sizeof(struct ccw1));
211
212 DEV_MESSAGE(KERN_ERR, device, "%s",
213 "End of channel program:");
214 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
215 10*sizeof(struct ccw1));
216 } else { /* log the whole CP */
217 DEV_MESSAGE(KERN_ERR, device, "%s",
218 "Channel program (complete):");
219 hex_dump_memory(device, lcqr->cpaddr,
220 cplength*sizeof(struct ccw1));
221 }
222
223 if (lcqr != cqr)
224 continue;
225
226 /*
227 * Log bytes arround failed CCW but only if we did
228 * not log the whole CP of the CCW is outside the
229 * logged CP.
230 */
231 if (cplength > 40 ||
232 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
233 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
234
235 DEV_MESSAGE(KERN_ERR, device,
236 "Failed CCW (%p) (area):",
237 (void *) (long) cpa);
238 hex_dump_memory(device, cqr->cpaddr - 10,
239 20*sizeof(struct ccw1));
240 }
241 }
242
243} /* end log_erp_chain */
244
245EXPORT_SYMBOL(dasd_default_erp_action); 166EXPORT_SYMBOL(dasd_default_erp_action);
246EXPORT_SYMBOL(dasd_default_erp_postaction); 167EXPORT_SYMBOL(dasd_default_erp_postaction);
247EXPORT_SYMBOL(dasd_alloc_erp_request); 168EXPORT_SYMBOL(dasd_alloc_erp_request);
248EXPORT_SYMBOL(dasd_free_erp_request); 169EXPORT_SYMBOL(dasd_free_erp_request);
249EXPORT_SYMBOL(dasd_log_sense); 170EXPORT_SYMBOL(dasd_log_sense);
250EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index b857fd5893fd..be0909e39226 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = {
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76}; 76};
77 77
78static inline void 78static void
79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, 79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
80 int blksize, int beg, int nr) 80 int blksize, int beg, int nr)
81{ 81{
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
95 data->ext_end = nr - 1; 95 data->ext_end = nr - 1;
96} 96}
97 97
98static inline void 98static void
99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, 99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
100 int block_nr, int block_ct) 100 int block_nr, int block_ct)
101{ 101{
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d163632101d2..47ba4462708d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device)
147 */ 147 */
148 memset(&bpart, 0, sizeof(struct blkpg_partition)); 148 memset(&bpart, 0, sizeof(struct blkpg_partition));
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __user *) &bpart; 150 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 151 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fb725e3b08fe..a2cc69e11410 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
559 struct dasd_device *); 559 struct dasd_device *);
560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
561void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 561void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
562void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
563 562
564/* externals in dasd_3370_erp.c */ 563/* externals in dasd_3370_erp.c */
565dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); 564dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bfa010f6dab2..8b7e11815d70 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL;
28static struct proc_dir_entry *dasd_devices_entry = NULL; 28static struct proc_dir_entry *dasd_devices_entry = NULL;
29static struct proc_dir_entry *dasd_statistics_entry = NULL; 29static struct proc_dir_entry *dasd_statistics_entry = NULL;
30 30
31static inline char * 31static char *
32dasd_get_user_string(const char __user *user_buf, size_t user_len) 32dasd_get_user_string(const char __user *user_buf, size_t user_len)
33{ 33{
34 char *buffer; 34 char *buffer;
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = {
154 .release = seq_release, 154 .release = seq_release,
155}; 155};
156 156
157static inline int 157static int
158dasd_calc_metrics(char *page, char **start, off_t off, 158dasd_calc_metrics(char *page, char **start, off_t off,
159 int count, int *eof, int len) 159 int count, int *eof, int len)
160{ 160{
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off,
167 return len; 167 return len;
168} 168}
169 169
170static inline char * 170static char *
171dasd_statistics_array(char *str, int *array, int shift) 171dasd_statistics_array(char *str, unsigned int *array, int shift)
172{ 172{
173 int i; 173 int i;
174 174
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index be9b05347b4f..1340451ea408 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev)
102 * device needs to be enqueued before the semaphore is 102 * device needs to be enqueued before the semaphore is
103 * freed. 103 * freed.
104 */ 104 */
105static inline int 105static int
106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
107{ 107{
108 int minor, found; 108 int minor, found;
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
230 SEGMENT_SHARED); 230 SEGMENT_SHARED);
231 if (rc < 0) { 231 if (rc < 0) {
232 BUG_ON(rc == -EINVAL); 232 BUG_ON(rc == -EINVAL);
233 if (rc == -EIO || rc == -ENOENT) 233 if (rc != -EAGAIN)
234 goto removeseg; 234 goto removeseg;
235 } else { 235 } else {
236 dev_info->is_shared = 1; 236 dev_info->is_shared = 1;
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
253 SEGMENT_EXCLUSIVE); 253 SEGMENT_EXCLUSIVE);
254 if (rc < 0) { 254 if (rc < 0) {
255 BUG_ON(rc == -EINVAL); 255 BUG_ON(rc == -EINVAL);
256 if (rc == -EIO || rc == -ENOENT) 256 if (rc != -EAGAIN)
257 goto removeseg; 257 goto removeseg;
258 } else { 258 } else {
259 dev_info->is_shared = 0; 259 dev_info->is_shared = 0;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c3e97b4fc186..293e667b50f2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,7 +2,8 @@
2# S/390 character devices 2# S/390 character devices
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o
6 7
7obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o
11 12
12obj-$(CONFIG_TN3215) += con3215.o 13obj-$(CONFIG_TN3215) += con3215.o
13 14
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o 15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 25b5d7a66417..9a328f14a641 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = {
1121 * 3215 tty registration code called from tty_init(). 1121 * 3215 tty registration code called from tty_init().
1122 * Most kernel services (incl. kmalloc) are available at this poimt. 1122 * Most kernel services (incl. kmalloc) are available at this poimt.
1123 */ 1123 */
1124int __init 1124static int __init
1125tty3215_init(void) 1125tty3215_init(void)
1126{ 1126{
1127 struct tty_driver *driver; 1127 struct tty_driver *driver;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 7566be890688..8e7f2d7633d6 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *);
69/* 69/*
70 * Setup timeout for a device. On timeout trigger an update. 70 * Setup timeout for a device. On timeout trigger an update.
71 */ 71 */
72void 72static void con3270_set_timer(struct con3270 *cp, int expires)
73con3270_set_timer(struct con3270 *cp, int expires)
74{ 73{
75 if (expires == 0) { 74 if (expires == 0) {
76 if (timer_pending(&cp->timer)) 75 if (timer_pending(&cp->timer))
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 17027d918cf7..564baca01b7c 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -5,6 +5,8 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/keyboard.h> 6#include <linux/keyboard.h>
7#include <linux/kd.h> 7#include <linux/kd.h>
8#include <linux/kbd_kern.h>
9#include <linux/kbd_diacr.h>
8 10
9u_short plain_map[NR_KEYS] = { 11u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0893d306ae80..e1a746269c4c 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -23,7 +23,7 @@
23#include "raw3270.h" 23#include "raw3270.h"
24#include "ctrlchar.h" 24#include "ctrlchar.h"
25 25
26struct raw3270_fn fs3270_fn; 26static struct raw3270_fn fs3270_fn;
27 27
28struct fs3270 { 28struct fs3270 {
29 struct raw3270_view view; 29 struct raw3270_view view;
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view)
401} 401}
402 402
403/* View to a 3270 device. Can be console, tty or fullscreen. */ 403/* View to a 3270 device. Can be console, tty or fullscreen. */
404struct raw3270_fn fs3270_fn = { 404static struct raw3270_fn fs3270_fn = {
405 .activate = fs3270_activate, 405 .activate = fs3270_activate,
406 .deactivate = fs3270_deactivate, 406 .deactivate = fs3270_deactivate,
407 .intv = (void *) fs3270_irq, 407 .intv = (void *) fs3270_irq,
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 3e86fd1756e5..f62f9a4e8950 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
148 } 148 }
149} 149}
150 150
151#if 0
151/* 152/*
152 * Generate ebcdic -> ascii translation table from kbd_data. 153 * Generate ebcdic -> ascii translation table from kbd_data.
153 */ 154 */
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
173 } 174 }
174 } 175 }
175} 176}
177#endif
176 178
177/* 179/*
178 * We have a combining character DIACR here, followed by the character CH. 180 * We have a combining character DIACR here, followed by the character CH.
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index a138b1510093..3a1a958fb5f2 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Character device driver for reading z/VM *MONITOR service records. 4 * Character device driver for reading z/VM *MONITOR service records.
5 * 5 *
6 * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
7 * 7 *
8 * Author: Gerald Schaefer <geraldsc@de.ibm.com> 8 * Author: Gerald Schaefer <geraldsc@de.ibm.com>
9 */ 9 */
@@ -22,7 +22,7 @@
22#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
23#include <asm/extmem.h> 23#include <asm/extmem.h>
24#include <linux/poll.h> 24#include <linux/poll.h>
25#include "../net/iucv.h" 25#include <net/iucv/iucv.h>
26 26
27 27
28//#define MON_DEBUG /* Debug messages on/off */ 28//#define MON_DEBUG /* Debug messages on/off */
@@ -50,14 +50,13 @@ static char mon_dcss_name[9] = "MONDCSS\0";
50struct mon_msg { 50struct mon_msg {
51 u32 pos; 51 u32 pos;
52 u32 mca_offset; 52 u32 mca_offset;
53 iucv_MessagePending local_eib; 53 struct iucv_message msg;
54 char msglim_reached; 54 char msglim_reached;
55 char replied_msglim; 55 char replied_msglim;
56}; 56};
57 57
58struct mon_private { 58struct mon_private {
59 u16 pathid; 59 struct iucv_path *path;
60 iucv_handle_t iucv_handle;
61 struct mon_msg *msg_array[MON_MSGLIM]; 60 struct mon_msg *msg_array[MON_MSGLIM];
62 unsigned int write_index; 61 unsigned int write_index;
63 unsigned int read_index; 62 unsigned int read_index;
@@ -75,8 +74,6 @@ static unsigned long mon_dcss_end;
75static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); 74static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
76static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); 75static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
77 76
78static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
79
80static u8 user_data_connect[16] = { 77static u8 user_data_connect[16] = {
81 /* Version code, must be 0x01 for shared mode */ 78 /* Version code, must be 0x01 for shared mode */
82 0x01, 79 0x01,
@@ -100,8 +97,7 @@ static u8 user_data_sever[16] = {
100 * Create the 8 bytes EBCDIC DCSS segment name from 97 * Create the 8 bytes EBCDIC DCSS segment name from
101 * an ASCII name, incl. padding 98 * an ASCII name, incl. padding
102 */ 99 */
103static inline void 100static inline void dcss_mkname(char *ascii_name, char *ebcdic_name)
104dcss_mkname(char *ascii_name, char *ebcdic_name)
105{ 101{
106 int i; 102 int i;
107 103
@@ -119,8 +115,7 @@ dcss_mkname(char *ascii_name, char *ebcdic_name)
119 * print appropriate error message for segment_load()/segment_type() 115 * print appropriate error message for segment_load()/segment_type()
120 * return code 116 * return code
121 */ 117 */
122static void 118static void mon_segment_warn(int rc, char* seg_name)
123mon_segment_warn(int rc, char* seg_name)
124{ 119{
125 switch (rc) { 120 switch (rc) {
126 case -ENOENT: 121 case -ENOENT:
@@ -166,44 +161,37 @@ mon_segment_warn(int rc, char* seg_name)
166 } 161 }
167} 162}
168 163
169static inline unsigned long 164static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
170mon_mca_start(struct mon_msg *monmsg)
171{ 165{
172 return monmsg->local_eib.ln1msg1.iprmmsg1_u32; 166 return *(u32 *) &monmsg->msg.rmmsg;
173} 167}
174 168
175static inline unsigned long 169static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
176mon_mca_end(struct mon_msg *monmsg)
177{ 170{
178 return monmsg->local_eib.ln1msg2.ipbfln1f; 171 return *(u32 *) &monmsg->msg.rmmsg[4];
179} 172}
180 173
181static inline u8 174static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
182mon_mca_type(struct mon_msg *monmsg, u8 index)
183{ 175{
184 return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); 176 return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
185} 177}
186 178
187static inline u32 179static inline u32 mon_mca_size(struct mon_msg *monmsg)
188mon_mca_size(struct mon_msg *monmsg)
189{ 180{
190 return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; 181 return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
191} 182}
192 183
193static inline u32 184static inline u32 mon_rec_start(struct mon_msg *monmsg)
194mon_rec_start(struct mon_msg *monmsg)
195{ 185{
196 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); 186 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
197} 187}
198 188
199static inline u32 189static inline u32 mon_rec_end(struct mon_msg *monmsg)
200mon_rec_end(struct mon_msg *monmsg)
201{ 190{
202 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); 191 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
203} 192}
204 193
205static inline int 194static inline int mon_check_mca(struct mon_msg *monmsg)
206mon_check_mca(struct mon_msg *monmsg)
207{ 195{
208 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || 196 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
209 (mon_rec_start(monmsg) < mon_dcss_start) || 197 (mon_rec_start(monmsg) < mon_dcss_start) ||
@@ -221,20 +209,17 @@ mon_check_mca(struct mon_msg *monmsg)
221 return 0; 209 return 0;
222} 210}
223 211
224static inline int 212static inline int mon_send_reply(struct mon_msg *monmsg,
225mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) 213 struct mon_private *monpriv)
226{ 214{
227 u8 prmmsg[8];
228 int rc; 215 int rc;
229 216
230 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " 217 P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
231 "0x%08X\n\n", 218 "0x%08X\n\n",
232 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, 219 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
233 monmsg->local_eib.iptrgcls); 220
234 rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid, 221 rc = iucv_message_reply(monpriv->path, &monmsg->msg,
235 monmsg->local_eib.ipmsgid, 222 IUCV_IPRMDATA, NULL, 0);
236 monmsg->local_eib.iptrgcls,
237 0, prmmsg);
238 atomic_dec(&monpriv->msglim_count); 223 atomic_dec(&monpriv->msglim_count);
239 if (likely(!monmsg->msglim_reached)) { 224 if (likely(!monmsg->msglim_reached)) {
240 monmsg->pos = 0; 225 monmsg->pos = 0;
@@ -251,10 +236,19 @@ mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
251 return 0; 236 return 0;
252} 237}
253 238
254static inline struct mon_private * 239static inline void mon_free_mem(struct mon_private *monpriv)
255mon_alloc_mem(void) 240{
241 int i;
242
243 for (i = 0; i < MON_MSGLIM; i++)
244 if (monpriv->msg_array[i])
245 kfree(monpriv->msg_array[i]);
246 kfree(monpriv);
247}
248
249static inline struct mon_private *mon_alloc_mem(void)
256{ 250{
257 int i,j; 251 int i;
258 struct mon_private *monpriv; 252 struct mon_private *monpriv;
259 253
260 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 254 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
@@ -267,16 +261,15 @@ mon_alloc_mem(void)
267 GFP_KERNEL); 261 GFP_KERNEL);
268 if (!monpriv->msg_array[i]) { 262 if (!monpriv->msg_array[i]) {
269 P_ERROR("open, no memory for msg_array\n"); 263 P_ERROR("open, no memory for msg_array\n");
270 for (j = 0; j < i; j++) 264 mon_free_mem(monpriv);
271 kfree(monpriv->msg_array[j]);
272 return NULL; 265 return NULL;
273 } 266 }
274 } 267 }
275 return monpriv; 268 return monpriv;
276} 269}
277 270
278static inline void 271static inline void mon_read_debug(struct mon_msg *monmsg,
279mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) 272 struct mon_private *monpriv)
280{ 273{
281#ifdef MON_DEBUG 274#ifdef MON_DEBUG
282 u8 msg_type[2], mca_type; 275 u8 msg_type[2], mca_type;
@@ -284,7 +277,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
284 277
285 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; 278 records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
286 279
287 memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2); 280 memcpy(msg_type, &monmsg->msg.class, 2);
288 EBCASC(msg_type, 2); 281 EBCASC(msg_type, 2);
289 mca_type = mon_mca_type(monmsg, 0); 282 mca_type = mon_mca_type(monmsg, 0);
290 EBCASC(&mca_type, 1); 283 EBCASC(&mca_type, 1);
@@ -292,8 +285,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
292 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", 285 P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
293 monpriv->read_index, monpriv->write_index); 286 monpriv->read_index, monpriv->write_index);
294 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", 287 P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
295 monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, 288 monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
296 monmsg->local_eib.iptrgcls);
297 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", 289 P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
298 msg_type[0], msg_type[1], mca_type ? mca_type : 'X', 290 msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
299 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); 291 mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
@@ -306,8 +298,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
306#endif 298#endif
307} 299}
308 300
309static inline void 301static inline void mon_next_mca(struct mon_msg *monmsg)
310mon_next_mca(struct mon_msg *monmsg)
311{ 302{
312 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) 303 if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
313 return; 304 return;
@@ -316,8 +307,7 @@ mon_next_mca(struct mon_msg *monmsg)
316 monmsg->pos = 0; 307 monmsg->pos = 0;
317} 308}
318 309
319static inline struct mon_msg * 310static inline struct mon_msg *mon_next_message(struct mon_private *monpriv)
320mon_next_message(struct mon_private *monpriv)
321{ 311{
322 struct mon_msg *monmsg; 312 struct mon_msg *monmsg;
323 313
@@ -342,39 +332,37 @@ mon_next_message(struct mon_private *monpriv)
342/****************************************************************************** 332/******************************************************************************
343 * IUCV handler * 333 * IUCV handler *
344 *****************************************************************************/ 334 *****************************************************************************/
345static void 335static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
346mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data)
347{ 336{
348 struct mon_private *monpriv = (struct mon_private *) pgm_data; 337 struct mon_private *monpriv = path->private;
349 338
350 P_DEBUG("IUCV connection completed\n"); 339 P_DEBUG("IUCV connection completed\n");
351 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " 340 P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
352 "0x%02X, Sample = 0x%02X\n", 341 "0x%02X, Sample = 0x%02X\n",
353 eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]); 342 ipuser[0], ipuser[1], ipuser[2]);
354 atomic_set(&monpriv->iucv_connected, 1); 343 atomic_set(&monpriv->iucv_connected, 1);
355 wake_up(&mon_conn_wait_queue); 344 wake_up(&mon_conn_wait_queue);
356} 345}
357 346
358static void 347static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
359mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data)
360{ 348{
361 struct mon_private *monpriv = (struct mon_private *) pgm_data; 349 struct mon_private *monpriv = path->private;
362 350
363 P_ERROR("IUCV connection severed with rc = 0x%X\n", 351 P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]);
364 (u8) eib->ipuser[0]); 352 iucv_path_sever(path, NULL);
365 atomic_set(&monpriv->iucv_severed, 1); 353 atomic_set(&monpriv->iucv_severed, 1);
366 wake_up(&mon_conn_wait_queue); 354 wake_up(&mon_conn_wait_queue);
367 wake_up_interruptible(&mon_read_wait_queue); 355 wake_up_interruptible(&mon_read_wait_queue);
368} 356}
369 357
370static void 358static void mon_iucv_message_pending(struct iucv_path *path,
371mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) 359 struct iucv_message *msg)
372{ 360{
373 struct mon_private *monpriv = (struct mon_private *) pgm_data; 361 struct mon_private *monpriv = path->private;
374 362
375 P_DEBUG("IUCV message pending\n"); 363 P_DEBUG("IUCV message pending\n");
376 memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib, 364 memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
377 sizeof(iucv_MessagePending)); 365 msg, sizeof(*msg));
378 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { 366 if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
379 P_WARNING("IUCV message pending, message limit (%i) reached\n", 367 P_WARNING("IUCV message pending, message limit (%i) reached\n",
380 MON_MSGLIM); 368 MON_MSGLIM);
@@ -385,54 +373,45 @@ mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
385 wake_up_interruptible(&mon_read_wait_queue); 373 wake_up_interruptible(&mon_read_wait_queue);
386} 374}
387 375
388static iucv_interrupt_ops_t mon_iucvops = { 376static struct iucv_handler monreader_iucv_handler = {
389 .ConnectionComplete = mon_iucv_ConnectionComplete, 377 .path_complete = mon_iucv_path_complete,
390 .ConnectionSevered = mon_iucv_ConnectionSevered, 378 .path_severed = mon_iucv_path_severed,
391 .MessagePending = mon_iucv_MessagePending, 379 .message_pending = mon_iucv_message_pending,
392}; 380};
393 381
394/****************************************************************************** 382/******************************************************************************
395 * file operations * 383 * file operations *
396 *****************************************************************************/ 384 *****************************************************************************/
397static int 385static int mon_open(struct inode *inode, struct file *filp)
398mon_open(struct inode *inode, struct file *filp)
399{ 386{
400 int rc, i;
401 struct mon_private *monpriv; 387 struct mon_private *monpriv;
388 int rc;
402 389
403 /* 390 /*
404 * only one user allowed 391 * only one user allowed
405 */ 392 */
393 rc = -EBUSY;
406 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 394 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
407 return -EBUSY; 395 goto out;
408 396
397 rc = -ENOMEM;
409 monpriv = mon_alloc_mem(); 398 monpriv = mon_alloc_mem();
410 if (!monpriv) 399 if (!monpriv)
411 return -ENOMEM; 400 goto out_use;
412 401
413 /* 402 /*
414 * Register with IUCV and connect to *MONITOR service 403 * Connect to *MONITOR service
415 */ 404 */
416 monpriv->iucv_handle = iucv_register_program("my_monreader ", 405 monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
417 MON_SERVICE, 406 if (!monpriv->path)
418 NULL, 407 goto out_priv;
419 &mon_iucvops, 408 rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
420 monpriv); 409 MON_SERVICE, NULL, user_data_connect, monpriv);
421 if (!monpriv->iucv_handle) {
422 P_ERROR("failed to register with iucv driver\n");
423 rc = -EIO;
424 goto out_error;
425 }
426 P_INFO("open, registered with IUCV\n");
427
428 rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect,
429 MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL,
430 monpriv->iucv_handle, NULL);
431 if (rc) { 410 if (rc) {
432 P_ERROR("iucv connection to *MONITOR failed with " 411 P_ERROR("iucv connection to *MONITOR failed with "
433 "IPUSER SEVER code = %i\n", rc); 412 "IPUSER SEVER code = %i\n", rc);
434 rc = -EIO; 413 rc = -EIO;
435 goto out_unregister; 414 goto out_path;
436 } 415 }
437 /* 416 /*
438 * Wait for connection confirmation 417 * Wait for connection confirmation
@@ -444,24 +423,23 @@ mon_open(struct inode *inode, struct file *filp)
444 atomic_set(&monpriv->iucv_severed, 0); 423 atomic_set(&monpriv->iucv_severed, 0);
445 atomic_set(&monpriv->iucv_connected, 0); 424 atomic_set(&monpriv->iucv_connected, 0);
446 rc = -EIO; 425 rc = -EIO;
447 goto out_unregister; 426 goto out_path;
448 } 427 }
449 P_INFO("open, established connection to *MONITOR service\n\n"); 428 P_INFO("open, established connection to *MONITOR service\n\n");
450 filp->private_data = monpriv; 429 filp->private_data = monpriv;
451 return nonseekable_open(inode, filp); 430 return nonseekable_open(inode, filp);
452 431
453out_unregister: 432out_path:
454 iucv_unregister_program(monpriv->iucv_handle); 433 kfree(monpriv->path);
455out_error: 434out_priv:
456 for (i = 0; i < MON_MSGLIM; i++) 435 mon_free_mem(monpriv);
457 kfree(monpriv->msg_array[i]); 436out_use:
458 kfree(monpriv);
459 clear_bit(MON_IN_USE, &mon_in_use); 437 clear_bit(MON_IN_USE, &mon_in_use);
438out:
460 return rc; 439 return rc;
461} 440}
462 441
463static int 442static int mon_close(struct inode *inode, struct file *filp)
464mon_close(struct inode *inode, struct file *filp)
465{ 443{
466 int rc, i; 444 int rc, i;
467 struct mon_private *monpriv = filp->private_data; 445 struct mon_private *monpriv = filp->private_data;
@@ -469,18 +447,12 @@ mon_close(struct inode *inode, struct file *filp)
469 /* 447 /*
470 * Close IUCV connection and unregister 448 * Close IUCV connection and unregister
471 */ 449 */
472 rc = iucv_sever(monpriv->pathid, user_data_sever); 450 rc = iucv_path_sever(monpriv->path, user_data_sever);
473 if (rc) 451 if (rc)
474 P_ERROR("close, iucv_sever failed with rc = %i\n", rc); 452 P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
475 else 453 else
476 P_INFO("close, terminated connection to *MONITOR service\n"); 454 P_INFO("close, terminated connection to *MONITOR service\n");
477 455
478 rc = iucv_unregister_program(monpriv->iucv_handle);
479 if (rc)
480 P_ERROR("close, iucv_unregister failed with rc = %i\n", rc);
481 else
482 P_INFO("close, unregistered with IUCV\n");
483
484 atomic_set(&monpriv->iucv_severed, 0); 456 atomic_set(&monpriv->iucv_severed, 0);
485 atomic_set(&monpriv->iucv_connected, 0); 457 atomic_set(&monpriv->iucv_connected, 0);
486 atomic_set(&monpriv->read_ready, 0); 458 atomic_set(&monpriv->read_ready, 0);
@@ -495,8 +467,8 @@ mon_close(struct inode *inode, struct file *filp)
495 return 0; 467 return 0;
496} 468}
497 469
498static ssize_t 470static ssize_t mon_read(struct file *filp, char __user *data,
499mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 471 size_t count, loff_t *ppos)
500{ 472{
501 struct mon_private *monpriv = filp->private_data; 473 struct mon_private *monpriv = filp->private_data;
502 struct mon_msg *monmsg; 474 struct mon_msg *monmsg;
@@ -563,8 +535,7 @@ out_copy:
563 return count; 535 return count;
564} 536}
565 537
566static unsigned int 538static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
567mon_poll(struct file *filp, struct poll_table_struct *p)
568{ 539{
569 struct mon_private *monpriv = filp->private_data; 540 struct mon_private *monpriv = filp->private_data;
570 541
@@ -593,8 +564,7 @@ static struct miscdevice mon_dev = {
593/****************************************************************************** 564/******************************************************************************
594 * module init/exit * 565 * module init/exit *
595 *****************************************************************************/ 566 *****************************************************************************/
596static int __init 567static int __init mon_init(void)
597mon_init(void)
598{ 568{
599 int rc; 569 int rc;
600 570
@@ -603,22 +573,34 @@ mon_init(void)
603 return -ENODEV; 573 return -ENODEV;
604 } 574 }
605 575
576 /*
577 * Register with IUCV and connect to *MONITOR service
578 */
579 rc = iucv_register(&monreader_iucv_handler, 1);
580 if (rc) {
581 P_ERROR("failed to register with iucv driver\n");
582 return rc;
583 }
584 P_INFO("open, registered with IUCV\n");
585
606 rc = segment_type(mon_dcss_name); 586 rc = segment_type(mon_dcss_name);
607 if (rc < 0) { 587 if (rc < 0) {
608 mon_segment_warn(rc, mon_dcss_name); 588 mon_segment_warn(rc, mon_dcss_name);
609 return rc; 589 goto out_iucv;
610 } 590 }
611 if (rc != SEG_TYPE_SC) { 591 if (rc != SEG_TYPE_SC) {
612 P_ERROR("segment %s has unsupported type, should be SC\n", 592 P_ERROR("segment %s has unsupported type, should be SC\n",
613 mon_dcss_name); 593 mon_dcss_name);
614 return -EINVAL; 594 rc = -EINVAL;
595 goto out_iucv;
615 } 596 }
616 597
617 rc = segment_load(mon_dcss_name, SEGMENT_SHARED, 598 rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
618 &mon_dcss_start, &mon_dcss_end); 599 &mon_dcss_start, &mon_dcss_end);
619 if (rc < 0) { 600 if (rc < 0) {
620 mon_segment_warn(rc, mon_dcss_name); 601 mon_segment_warn(rc, mon_dcss_name);
621 return -EINVAL; 602 rc = -EINVAL;
603 goto out_iucv;
622 } 604 }
623 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 605 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
624 606
@@ -634,14 +616,16 @@ mon_init(void)
634 616
635out: 617out:
636 segment_unload(mon_dcss_name); 618 segment_unload(mon_dcss_name);
619out_iucv:
620 iucv_unregister(&monreader_iucv_handler, 1);
637 return rc; 621 return rc;
638} 622}
639 623
640static void __exit 624static void __exit mon_exit(void)
641mon_exit(void)
642{ 625{
643 segment_unload(mon_dcss_name); 626 segment_unload(mon_dcss_name);
644 WARN_ON(misc_deregister(&mon_dev) != 0); 627 WARN_ON(misc_deregister(&mon_dev) != 0);
628 iucv_unregister(&monreader_iucv_handler, 1);
645 return; 629 return;
646} 630}
647 631
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index cdb24f528112..9e451acc6491 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
67 return -EINVAL; 67 return -EINVAL;
68} 68}
69 69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, 70static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr) 71 struct monwrite_hdr *monhdr)
72{ 72{
73 struct mon_buf *entry, *next; 73 struct mon_buf *entry, *next;
74 74
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7a84014f2037..8facd14adb7c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32struct class *class3270; 32static struct class *class3270;
33 33
34/* The main 3270 data structure. */ 34/* The main 3270 data structure. */
35struct raw3270 { 35struct raw3270 {
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
86/* 86/*
87 * Encode array for 12 bit 3270 addresses. 87 * Encode array for 12 bit 3270 addresses.
88 */ 88 */
89unsigned char raw3270_ebcgraf[64] = { 89static unsigned char raw3270_ebcgraf[64] = {
90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8a056df09d6b..f171de3b0b11 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
59/* Internal state: is a request active at the sclp? */ 59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t { 60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle, 61 sclp_running_state_idle,
62 sclp_running_state_running 62 sclp_running_state_running,
63 sclp_running_state_reset_pending
63} sclp_running_state = sclp_running_state_idle; 64} sclp_running_state = sclp_running_state_idle;
64 65
65/* Internal state: is a read request pending? */ 66/* Internal state: is a read request pending? */
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t {
88 89
89/* Timeout intervals in seconds.*/ 90/* Timeout intervals in seconds.*/
90#define SCLP_BUSY_INTERVAL 10 91#define SCLP_BUSY_INTERVAL 10
91#define SCLP_RETRY_INTERVAL 15 92#define SCLP_RETRY_INTERVAL 30
92 93
93static void sclp_process_queue(void); 94static void sclp_process_queue(void);
94static int sclp_init_mask(int calculate); 95static int sclp_init_mask(int calculate);
95static int sclp_init(void); 96static int sclp_init(void);
96 97
97/* Perform service call. Return 0 on success, non-zero otherwise. */ 98/* Perform service call. Return 0 on success, non-zero otherwise. */
98static int 99int
99service_call(sclp_cmdw_t command, void *sccb) 100sclp_service_call(sclp_cmdw_t command, void *sccb)
100{ 101{
101 int cc; 102 int cc;
102 103
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
113 return 0; 114 return 0;
114} 115}
115 116
116/* Request timeout handler. Restart the request queue. If DATA is non-zero, 117static inline void __sclp_make_read_req(void);
117 * force restart of running request. */ 118
118static void 119static void
119sclp_request_timeout(unsigned long data) 120__sclp_queue_read_req(void)
120{ 121{
121 unsigned long flags; 122 if (sclp_reading_state == sclp_reading_state_idle) {
122 123 sclp_reading_state = sclp_reading_state_reading;
123 if (data) { 124 __sclp_make_read_req();
124 spin_lock_irqsave(&sclp_lock, flags); 125 /* Add request to head of queue */
125 sclp_running_state = sclp_running_state_idle; 126 list_add(&sclp_read_req.list, &sclp_req_queue);
126 spin_unlock_irqrestore(&sclp_lock, flags);
127 } 127 }
128 sclp_process_queue();
129} 128}
130 129
131/* Set up request retry timer. Called while sclp_lock is locked. */ 130/* Set up request retry timer. Called while sclp_lock is locked. */
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
140 add_timer(&sclp_request_timer); 139 add_timer(&sclp_request_timer);
141} 140}
142 141
142/* Request timeout handler. Restart the request queue. If DATA is non-zero,
143 * force restart of running request. */
144static void
145sclp_request_timeout(unsigned long data)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&sclp_lock, flags);
150 if (data) {
151 if (sclp_running_state == sclp_running_state_running) {
152 /* Break running state and queue NOP read event request
153 * to get a defined interface state. */
154 __sclp_queue_read_req();
155 sclp_running_state = sclp_running_state_idle;
156 }
157 } else {
158 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 sclp_request_timeout, 0);
160 }
161 spin_unlock_irqrestore(&sclp_lock, flags);
162 sclp_process_queue();
163}
164
143/* Try to start a request. Return zero if the request was successfully 165/* Try to start a request. Return zero if the request was successfully
144 * started or if it will be started at a later time. Return non-zero otherwise. 166 * started or if it will be started at a later time. Return non-zero otherwise.
145 * Called while sclp_lock is locked. */ 167 * Called while sclp_lock is locked. */
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req)
151 if (sclp_running_state != sclp_running_state_idle) 173 if (sclp_running_state != sclp_running_state_idle)
152 return 0; 174 return 0;
153 del_timer(&sclp_request_timer); 175 del_timer(&sclp_request_timer);
154 rc = service_call(req->command, req->sccb); 176 rc = sclp_service_call(req->command, req->sccb);
155 req->start_count++; 177 req->start_count++;
156 178
157 if (rc == 0) { 179 if (rc == 0) {
@@ -191,7 +213,15 @@ sclp_process_queue(void)
191 rc = __sclp_start_request(req); 213 rc = __sclp_start_request(req);
192 if (rc == 0) 214 if (rc == 0)
193 break; 215 break;
194 /* Request failed. */ 216 /* Request failed */
217 if (req->start_count > 1) {
218 /* Cannot abort already submitted request - could still
219 * be active at the SCLP */
220 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 sclp_request_timeout, 0);
222 break;
223 }
224 /* Post-processing for aborted request */
195 list_del(&req->list); 225 list_del(&req->list);
196 if (req->callback) { 226 if (req->callback) {
197 spin_unlock_irqrestore(&sclp_lock, flags); 227 spin_unlock_irqrestore(&sclp_lock, flags);
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
221 list_add_tail(&req->list, &sclp_req_queue); 251 list_add_tail(&req->list, &sclp_req_queue);
222 rc = 0; 252 rc = 0;
223 /* Start if request is first in list */ 253 /* Start if request is first in list */
224 if (req->list.prev == &sclp_req_queue) { 254 if (sclp_running_state == sclp_running_state_idle &&
255 req->list.prev == &sclp_req_queue) {
225 rc = __sclp_start_request(req); 256 rc = __sclp_start_request(req);
226 if (rc) 257 if (rc)
227 list_del(&req->list); 258 list_del(&req->list);
@@ -294,7 +325,7 @@ __sclp_make_read_req(void)
294 sccb = (struct sccb_header *) sclp_read_sccb; 325 sccb = (struct sccb_header *) sclp_read_sccb;
295 clear_page(sccb); 326 clear_page(sccb);
296 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 327 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
297 sclp_read_req.command = SCLP_CMDW_READDATA; 328 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
298 sclp_read_req.status = SCLP_REQ_QUEUED; 329 sclp_read_req.status = SCLP_REQ_QUEUED;
299 sclp_read_req.start_count = 0; 330 sclp_read_req.start_count = 0;
300 sclp_read_req.callback = sclp_read_cb; 331 sclp_read_req.callback = sclp_read_cb;
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code)
334 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 365 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
335 evbuf_pending = S390_lowcore.ext_params & 0x3; 366 evbuf_pending = S390_lowcore.ext_params & 0x3;
336 if (finished_sccb) { 367 if (finished_sccb) {
368 del_timer(&sclp_request_timer);
369 sclp_running_state = sclp_running_state_reset_pending;
337 req = __sclp_find_req(finished_sccb); 370 req = __sclp_find_req(finished_sccb);
338 if (req) { 371 if (req) {
339 /* Request post-processing */ 372 /* Request post-processing */
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code)
348 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
349 } 382 }
350 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending && sclp_receive_mask != 0 &&
351 sclp_reading_state == sclp_reading_state_idle && 384 sclp_activation_state == sclp_activation_state_active)
352 sclp_activation_state == sclp_activation_state_active ) { 385 __sclp_queue_read_req();
353 sclp_reading_state = sclp_reading_state_reading;
354 __sclp_make_read_req();
355 /* Add request to head of queue */
356 list_add(&sclp_read_req.list, &sclp_req_queue);
357 }
358 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
359 sclp_process_queue(); 387 sclp_process_queue();
360} 388}
@@ -374,6 +402,7 @@ sclp_sync_wait(void)
374 unsigned long flags; 402 unsigned long flags;
375 unsigned long cr0, cr0_sync; 403 unsigned long cr0, cr0_sync;
376 u64 timeout; 404 u64 timeout;
405 int irq_context;
377 406
378 /* We'll be disabling timer interrupts, so we need a custom timeout 407 /* We'll be disabling timer interrupts, so we need a custom timeout
379 * mechanism */ 408 * mechanism */
@@ -386,7 +415,9 @@ sclp_sync_wait(void)
386 } 415 }
387 local_irq_save(flags); 416 local_irq_save(flags);
388 /* Prevent bottom half from executing once we force interrupts open */ 417 /* Prevent bottom half from executing once we force interrupts open */
389 local_bh_disable(); 418 irq_context = in_interrupt();
419 if (!irq_context)
420 local_bh_disable();
390 /* Enable service-signal interruption, disable timer interrupts */ 421 /* Enable service-signal interruption, disable timer interrupts */
391 trace_hardirqs_on(); 422 trace_hardirqs_on();
392 __ctl_store(cr0, 0, 0); 423 __ctl_store(cr0, 0, 0);
@@ -402,19 +433,19 @@ sclp_sync_wait(void)
402 get_clock() > timeout && 433 get_clock() > timeout &&
403 del_timer(&sclp_request_timer)) 434 del_timer(&sclp_request_timer))
404 sclp_request_timer.function(sclp_request_timer.data); 435 sclp_request_timer.function(sclp_request_timer.data);
405 barrier();
406 cpu_relax(); 436 cpu_relax();
407 } 437 }
408 local_irq_disable(); 438 local_irq_disable();
409 __ctl_load(cr0, 0, 0); 439 __ctl_load(cr0, 0, 0);
410 _local_bh_enable(); 440 if (!irq_context)
441 _local_bh_enable();
411 local_irq_restore(flags); 442 local_irq_restore(flags);
412} 443}
413 444
414EXPORT_SYMBOL(sclp_sync_wait); 445EXPORT_SYMBOL(sclp_sync_wait);
415 446
416/* Dispatch changes in send and receive mask to registered listeners. */ 447/* Dispatch changes in send and receive mask to registered listeners. */
417static inline void 448static void
418sclp_dispatch_state_change(void) 449sclp_dispatch_state_change(void)
419{ 450{
420 struct list_head *l; 451 struct list_head *l;
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
597 sccb = (struct init_sccb *) sclp_init_sccb; 628 sccb = (struct init_sccb *) sclp_init_sccb;
598 clear_page(sccb); 629 clear_page(sccb);
599 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 630 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
600 sclp_init_req.command = SCLP_CMDW_WRITEMASK; 631 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
601 sclp_init_req.status = SCLP_REQ_FILLED; 632 sclp_init_req.status = SCLP_REQ_FILLED;
602 sclp_init_req.start_count = 0; 633 sclp_init_req.start_count = 0;
603 sclp_init_req.callback = NULL; 634 sclp_init_req.callback = NULL;
@@ -800,7 +831,7 @@ sclp_check_interface(void)
800 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 831 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
801 __sclp_make_init_req(0, 0); 832 __sclp_make_init_req(0, 0);
802 sccb = (struct init_sccb *) sclp_init_req.sccb; 833 sccb = (struct init_sccb *) sclp_init_req.sccb;
803 rc = service_call(sclp_init_req.command, sccb); 834 rc = sclp_service_call(sclp_init_req.command, sccb);
804 if (rc == -EIO) 835 if (rc == -EIO)
805 break; 836 break;
806 sclp_init_req.status = SCLP_REQ_RUNNING; 837 sclp_init_req.status = SCLP_REQ_RUNNING;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 2c71d6ee7b5b..7d29ab45a6ed 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15 15#include <asm/sclp.h>
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17 17
18/* maximum number of pages concerning our own memory management */ 18/* maximum number of pages concerning our own memory management */
@@ -49,9 +49,11 @@
49 49
50typedef unsigned int sclp_cmdw_t; 50typedef unsigned int sclp_cmdw_t;
51 51
52#define SCLP_CMDW_READDATA 0x00770005 52#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005 53#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005 54#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
55#define SCLP_CMDW_READ_SCP_INFO 0x00020001
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
55 57
56#define GDS_ID_MDSMU 0x1310 58#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311 59#define GDS_ID_MDSRouteInfo 0x1311
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t;
66 68
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68 70
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector { 71struct gds_subvector {
77 u8 length; 72 u8 length;
78 u8 key; 73 u8 key;
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb); 126int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void); 127int sclp_deactivate(void);
133int sclp_reactivate(void); 128int sclp_reactivate(void);
129int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 130
135/* useful inlines */ 131/* useful inlines */
136 132
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 86864f641716..ead1043d788e 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); 66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
67} 67}
68 68
69static inline void 69static void
70sclp_conbuf_emit(void) 70sclp_conbuf_emit(void)
71{ 71{
72 struct sclp_buffer* buffer; 72 struct sclp_buffer* buffer;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 4f873ae148b7..65aa2c85737f 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -169,7 +169,7 @@ cpi_prepare_req(void)
169 } 169 }
170 170
171 /* prepare request data structure presented to SCLP driver */ 171 /* prepare request data structure presented to SCLP driver */
172 req->command = SCLP_CMDW_WRITEDATA; 172 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
173 req->sccb = sccb; 173 req->sccb = sccb;
174 req->status = SCLP_REQ_FILLED; 174 req->status = SCLP_REQ_FILLED;
175 req->callback = cpi_callback; 175 req->callback = cpi_callback;
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
new file mode 100644
index 000000000000..7bcbe643b087
--- /dev/null
+++ b/drivers/s390/char/sclp_info.c
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb s390_readinfo_sccb;
15
16void __init sclp_readinfo_early(void)
17{
18 sclp_cmdw_t command;
19 struct sccb_header *sccb;
20 int ret;
21
22 __ctl_set_bit(0, 9); /* enable service signal subclass mask */
23
24 sccb = &s390_readinfo_sccb.header;
25 command = SCLP_CMDW_READ_SCP_INFO_FORCED;
26 while (1) {
27 u16 response;
28
29 memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb));
30 sccb->length = sizeof(s390_readinfo_sccb);
31 sccb->control_mask[2] = 0x80;
32
33 ret = sclp_service_call(command, &s390_readinfo_sccb);
34
35 if (ret == -EIO)
36 goto out;
37 if (ret == -EBUSY)
38 continue;
39
40 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
41 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
42 local_irq_disable();
43 barrier();
44
45 response = sccb->response_code;
46
47 if (response == 0x10)
48 break;
49
50 if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO)
51 break;
52
53 command = SCLP_CMDW_READ_SCP_INFO;
54 }
55out:
56 __ctl_clear_bit(0, 9); /* disable service signal subclass mask */
57}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 0c92d3909cca..2486783ea58e 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITEDATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
464 buffer->request.status = SCLP_REQ_FILLED; 464 buffer->request.status = SCLP_REQ_FILLED;
465 buffer->request.callback = sclp_writedata_callback; 465 buffer->request.callback = sclp_writedata_callback;
466 buffer->request.callback_data = buffer; 466 buffer->request.callback_data = buffer;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2d173e5c8a09..90536f60bf50 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = {
721 .ioctl = sclp_tty_ioctl, 721 .ioctl = sclp_tty_ioctl,
722}; 722};
723 723
724int __init 724static int __init
725sclp_tty_init(void) 725sclp_tty_init(void)
726{ 726{
727 struct tty_driver *driver; 727 struct tty_driver *driver;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 723bf4191bfe..544f137d70d7 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
207 request->sclp_req.status = SCLP_REQ_FAILED; 207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO; 208 return -EIO;
209 } 209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA; 210 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
211 request->sclp_req.status = SCLP_REQ_FILLED; 211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback; 212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request; 213 request->sclp_req.callback_data = (void *) request;
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = {
669/* 669/*
670 * Register driver with SCLP and Linux and initialize internal tty structures. 670 * Register driver with SCLP and Linux and initialize internal tty structures.
671 */ 671 */
672int __init 672static int __init
673sclp_vt220_tty_init(void) 673sclp_vt220_tty_init(void)
674{ 674{
675 struct tty_driver *driver; 675 struct tty_driver *driver;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c9f1c4c8bb13..bb4ff537729d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
3 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -99,7 +99,11 @@ enum tape_op {
99 TO_DIS, /* Tape display */ 99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */ 100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */ 101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */ 102 TO_CRYPT_ON, /* Enable encrpytion */
103 TO_CRYPT_OFF, /* Disable encrpytion */
104 TO_KEKL_SET, /* Set KEK label */
105 TO_KEKL_QUERY, /* Query KEK label */
106 TO_SIZE, /* #entries in tape_op_t */
103}; 107};
104 108
105/* Forward declaration */ 109/* Forward declaration */
@@ -112,6 +116,7 @@ enum tape_request_status {
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */ 116 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */ 117 TAPE_REQUEST_DONE, /* request is completed. */
114 TAPE_REQUEST_CANCEL, /* request should be canceled. */ 118 TAPE_REQUEST_CANCEL, /* request should be canceled. */
119 TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
115}; 120};
116 121
117/* Tape CCW request */ 122/* Tape CCW request */
@@ -164,10 +169,11 @@ struct tape_discipline {
164 * The discipline irq function either returns an error code (<0) which 169 * The discipline irq function either returns an error code (<0) which
165 * means that the request has failed with an error or one of the following: 170 * means that the request has failed with an error or one of the following:
166 */ 171 */
167#define TAPE_IO_SUCCESS 0 /* request successful */ 172#define TAPE_IO_SUCCESS 0 /* request successful */
168#define TAPE_IO_PENDING 1 /* request still running */ 173#define TAPE_IO_PENDING 1 /* request still running */
169#define TAPE_IO_RETRY 2 /* retry to current request */ 174#define TAPE_IO_RETRY 2 /* retry to current request */
170#define TAPE_IO_STOP 3 /* stop the running request */ 175#define TAPE_IO_STOP 3 /* stop the running request */
176#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
171 177
172/* Char Frontend Data */ 178/* Char Frontend Data */
173struct tape_char_data { 179struct tape_char_data {
@@ -242,6 +248,10 @@ struct tape_device {
242 248
243 /* Function to start or stop the next request later. */ 249 /* Function to start or stop the next request later. */
244 struct delayed_work tape_dnr; 250 struct delayed_work tape_dnr;
251
252 /* Timer for long busy */
253 struct timer_list lb_timeout;
254
245}; 255};
246 256
247/* Externals from tape_core.c */ 257/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 9df912f63188..50f5edab83d7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.c 2 * drivers/s390/char/tape_3590.c
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <asm/ebcdic.h>
14 15
15#define TAPE_DBF_AREA tape_3590_dbf 16#define TAPE_DBF_AREA tape_3590_dbf
16 17
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
30 * - Read Device (buffered) log: BRA 31 * - Read Device (buffered) log: BRA
31 * - Read Library log: BRA 32 * - Read Library log: BRA
32 * - Swap Devices: BRA 33 * - Swap Devices: BRA
33 * - Long Busy: BRA 34 * - Long Busy: implemented
34 * - Special Intercept: BRA 35 * - Special Intercept: BRA
35 * - Read Alternate: implemented 36 * - Read Alternate: implemented
36 *******************************************************************/ 37 *******************************************************************/
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
94 [0xae] = "Subsystem environmental alert", 95 [0xae] = "Subsystem environmental alert",
95}; 96};
96 97
98static int crypt_supported(struct tape_device *device)
99{
100 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
101}
102
103static int crypt_enabled(struct tape_device *device)
104{
105 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
106}
107
108static void ext_to_int_kekl(struct tape390_kekl *in,
109 struct tape3592_kekl *out)
110{
111 int i;
112
113 memset(out, 0, sizeof(*out));
114 if (in->type == TAPE390_KEKL_TYPE_HASH)
115 out->flags |= 0x40;
116 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
117 out->flags |= 0x80;
118 strncpy(out->label, in->label, 64);
119 for (i = strlen(in->label); i < sizeof(out->label); i++)
120 out->label[i] = ' ';
121 ASCEBC(out->label, sizeof(out->label));
122}
123
124static void int_to_ext_kekl(struct tape3592_kekl *in,
125 struct tape390_kekl *out)
126{
127 memset(out, 0, sizeof(*out));
128 if(in->flags & 0x40)
129 out->type = TAPE390_KEKL_TYPE_HASH;
130 else
131 out->type = TAPE390_KEKL_TYPE_LABEL;
132 if(in->flags & 0x80)
133 out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
134 else
135 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
136 memcpy(out->label, in->label, sizeof(in->label));
137 EBCASC(out->label, sizeof(in->label));
138 strstrip(out->label);
139}
140
141static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
142 struct tape390_kekl_pair *out)
143{
144 if (in->count == 0) {
145 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
146 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
147 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
148 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
149 } else if (in->count == 1) {
150 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
151 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
152 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
153 } else if (in->count == 2) {
154 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
155 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
156 } else {
157 printk("Invalid KEKL number: %d\n", in->count);
158 BUG();
159 }
160}
161
162static int check_ext_kekl(struct tape390_kekl *kekl)
163{
164 if (kekl->type == TAPE390_KEKL_TYPE_NONE)
165 goto invalid;
166 if (kekl->type > TAPE390_KEKL_TYPE_HASH)
167 goto invalid;
168 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
169 goto invalid;
170 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
171 goto invalid;
172 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
173 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
174 goto invalid;
175
176 return 0;
177invalid:
178 return -EINVAL;
179}
180
181static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
182{
183 if (check_ext_kekl(&kekls->kekl[0]))
184 goto invalid;
185 if (check_ext_kekl(&kekls->kekl[1]))
186 goto invalid;
187
188 return 0;
189invalid:
190 return -EINVAL;
191}
192
193/*
194 * Query KEKLs
195 */
196static int tape_3592_kekl_query(struct tape_device *device,
197 struct tape390_kekl_pair *ext_kekls)
198{
199 struct tape_request *request;
200 struct tape3592_kekl_query_order *order;
201 struct tape3592_kekl_query_data *int_kekls;
202 int rc;
203
204 DBF_EVENT(6, "tape3592_kekl_query\n");
205 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
206 if (!int_kekls)
207 return -ENOMEM;
208 request = tape_alloc_request(2, sizeof(*order));
209 if (IS_ERR(request)) {
210 rc = PTR_ERR(request);
211 goto fail_malloc;
212 }
213 order = request->cpdata;
214 memset(order,0,sizeof(*order));
215 order->code = 0xe2;
216 order->max_count = 2;
217 request->op = TO_KEKL_QUERY;
218 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
219 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
220 int_kekls);
221 rc = tape_do_io(device, request);
222 if (rc)
223 goto fail_request;
224 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
225
226 rc = 0;
227fail_request:
228 tape_free_request(request);
229fail_malloc:
230 kfree(int_kekls);
231 return rc;
232}
233
234/*
235 * IOCTL: Query KEKLs
236 */
237static int tape_3592_ioctl_kekl_query(struct tape_device *device,
238 unsigned long arg)
239{
240 int rc;
241 struct tape390_kekl_pair *ext_kekls;
242
243 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
244 if (!crypt_supported(device))
245 return -ENOSYS;
246 if (!crypt_enabled(device))
247 return -EUNATCH;
248 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
249 if (!ext_kekls)
250 return -ENOMEM;
251 rc = tape_3592_kekl_query(device, ext_kekls);
252 if (rc != 0)
253 goto fail;
254 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
255 rc = -EFAULT;
256 goto fail;
257 }
258 rc = 0;
259fail:
260 kfree(ext_kekls);
261 return rc;
262}
263
264static int tape_3590_mttell(struct tape_device *device, int mt_count);
265
266/*
267 * Set KEKLs
268 */
269static int tape_3592_kekl_set(struct tape_device *device,
270 struct tape390_kekl_pair *ext_kekls)
271{
272 struct tape_request *request;
273 struct tape3592_kekl_set_order *order;
274
275 DBF_EVENT(6, "tape3592_kekl_set\n");
276 if (check_ext_kekl_pair(ext_kekls)) {
277 DBF_EVENT(6, "invalid kekls\n");
278 return -EINVAL;
279 }
280 if (tape_3590_mttell(device, 0) != 0)
281 return -EBADSLT;
282 request = tape_alloc_request(1, sizeof(*order));
283 if (IS_ERR(request))
284 return PTR_ERR(request);
285 order = request->cpdata;
286 memset(order, 0, sizeof(*order));
287 order->code = 0xe3;
288 order->kekls.count = 2;
289 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
290 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
291 request->op = TO_KEKL_SET;
292 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
293
294 return tape_do_io_free(device, request);
295}
296
297/*
298 * IOCTL: Set KEKLs
299 */
300static int tape_3592_ioctl_kekl_set(struct tape_device *device,
301 unsigned long arg)
302{
303 int rc;
304 struct tape390_kekl_pair *ext_kekls;
305
306 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
307 if (!crypt_supported(device))
308 return -ENOSYS;
309 if (!crypt_enabled(device))
310 return -EUNATCH;
311 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
312 if (!ext_kekls)
313 return -ENOMEM;
314 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
315 rc = -EFAULT;
316 goto out;
317 }
318 rc = tape_3592_kekl_set(device, ext_kekls);
319out:
320 kfree(ext_kekls);
321 return rc;
322}
323
324/*
325 * Enable encryption
326 */
327static int tape_3592_enable_crypt(struct tape_device *device)
328{
329 struct tape_request *request;
330 char *data;
331
332 DBF_EVENT(6, "tape_3592_enable_crypt\n");
333 if (!crypt_supported(device))
334 return -ENOSYS;
335 request = tape_alloc_request(2, 72);
336 if (IS_ERR(request))
337 return PTR_ERR(request);
338 data = request->cpdata;
339 memset(data,0,72);
340
341 data[0] = 0x05;
342 data[36 + 0] = 0x03;
343 data[36 + 1] = 0x03;
344 data[36 + 4] = 0x40;
345 data[36 + 6] = 0x01;
346 data[36 + 14] = 0x2f;
347 data[36 + 18] = 0xc3;
348 data[36 + 35] = 0x72;
349 request->op = TO_CRYPT_ON;
350 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
351 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
352 return tape_do_io_free(device, request);
353}
354
355/*
356 * Disable encryption
357 */
358static int tape_3592_disable_crypt(struct tape_device *device)
359{
360 struct tape_request *request;
361 char *data;
362
363 DBF_EVENT(6, "tape_3592_disable_crypt\n");
364 if (!crypt_supported(device))
365 return -ENOSYS;
366 request = tape_alloc_request(2, 72);
367 if (IS_ERR(request))
368 return PTR_ERR(request);
369 data = request->cpdata;
370 memset(data,0,72);
371
372 data[0] = 0x05;
373 data[36 + 0] = 0x03;
374 data[36 + 1] = 0x03;
375 data[36 + 35] = 0x32;
376
377 request->op = TO_CRYPT_OFF;
378 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
379 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
380
381 return tape_do_io_free(device, request);
382}
383
384/*
385 * IOCTL: Set encryption status
386 */
387static int tape_3592_ioctl_crypt_set(struct tape_device *device,
388 unsigned long arg)
389{
390 struct tape390_crypt_info info;
391
392 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
393 if (!crypt_supported(device))
394 return -ENOSYS;
395 if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
396 return -EFAULT;
397 if (info.status & ~TAPE390_CRYPT_ON_MASK)
398 return -EINVAL;
399 if (info.status & TAPE390_CRYPT_ON_MASK)
400 return tape_3592_enable_crypt(device);
401 else
402 return tape_3592_disable_crypt(device);
403}
404
405static int tape_3590_sense_medium(struct tape_device *device);
406
407/*
408 * IOCTL: Query enryption status
409 */
410static int tape_3592_ioctl_crypt_query(struct tape_device *device,
411 unsigned long arg)
412{
413 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
414 if (!crypt_supported(device))
415 return -ENOSYS;
416 tape_3590_sense_medium(device);
417 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
418 sizeof(TAPE_3590_CRYPT_INFO(device))))
419 return -EFAULT;
420 else
421 return 0;
422}
423
97/* 424/*
98 * 3590 IOCTL Overload 425 * 3590 IOCTL Overload
99 */ 426 */
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
109 436
110 return tape_std_display(device, &disp); 437 return tape_std_display(device, &disp);
111 } 438 }
439 case TAPE390_KEKL_SET:
440 return tape_3592_ioctl_kekl_set(device, arg);
441 case TAPE390_KEKL_QUERY:
442 return tape_3592_ioctl_kekl_query(device, arg);
443 case TAPE390_CRYPT_SET:
444 return tape_3592_ioctl_crypt_set(device, arg);
445 case TAPE390_CRYPT_QUERY:
446 return tape_3592_ioctl_crypt_query(device, arg);
112 default: 447 default:
113 return -EINVAL; /* no additional ioctls */ 448 return -EINVAL; /* no additional ioctls */
114 } 449 }
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work)
248 case TO_READ_ATTMSG: 583 case TO_READ_ATTMSG:
249 tape_3590_read_attmsg(p->device); 584 tape_3590_read_attmsg(p->device);
250 break; 585 break;
586 case TO_CRYPT_ON:
587 tape_3592_enable_crypt(p->device);
588 break;
589 case TO_CRYPT_OFF:
590 tape_3592_disable_crypt(p->device);
591 break;
251 default: 592 default:
252 DBF_EVENT(3, "T3590: work handler undefined for " 593 DBF_EVENT(3, "T3590: work handler undefined for "
253 "operation 0x%02x\n", p->op); 594 "operation 0x%02x\n", p->op);
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
365} 706}
366#endif 707#endif
367 708
709static void tape_3590_med_state_set(struct tape_device *device,
710 struct tape_3590_med_sense *sense)
711{
712 struct tape390_crypt_info *c_info;
713
714 c_info = &TAPE_3590_CRYPT_INFO(device);
715
716 if (sense->masst == MSENSE_UNASSOCIATED) {
717 tape_med_state_set(device, MS_UNLOADED);
718 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
719 return;
720 }
721 if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
722 PRINT_ERR("Unknown medium state: %x\n", sense->masst);
723 return;
724 }
725 tape_med_state_set(device, MS_LOADED);
726 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
727 if (sense->flags & MSENSE_CRYPT_MASK) {
728 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
729 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
730 } else {
731 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
732 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
733 }
734}
735
368/* 736/*
369 * The done handler is called at device/channel end and wakes up the sleeping 737 * The done handler is called at device/channel end and wakes up the sleeping
370 * process 738 * process
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
372static int 740static int
373tape_3590_done(struct tape_device *device, struct tape_request *request) 741tape_3590_done(struct tape_device *device, struct tape_request *request)
374{ 742{
375 struct tape_3590_med_sense *sense; 743 struct tape_3590_disc_data *disc_data;
376 744
377 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 745 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
746 disc_data = device->discdata;
378 747
379 switch (request->op) { 748 switch (request->op) {
380 case TO_BSB: 749 case TO_BSB:
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
394 break; 763 break;
395 case TO_RUN: 764 case TO_RUN:
396 tape_med_state_set(device, MS_UNLOADED); 765 tape_med_state_set(device, MS_UNLOADED);
766 tape_3590_schedule_work(device, TO_CRYPT_OFF);
397 break; 767 break;
398 case TO_MSEN: 768 case TO_MSEN:
399 sense = (struct tape_3590_med_sense *) request->cpdata; 769 tape_3590_med_state_set(device, request->cpdata);
400 if (sense->masst == MSENSE_UNASSOCIATED) 770 break;
401 tape_med_state_set(device, MS_UNLOADED); 771 case TO_CRYPT_ON:
402 if (sense->masst == MSENSE_ASSOCIATED_MOUNT) 772 TAPE_3590_CRYPT_INFO(device).status
403 tape_med_state_set(device, MS_LOADED); 773 |= TAPE390_CRYPT_ON_MASK;
774 *(device->modeset_byte) |= 0x03;
775 break;
776 case TO_CRYPT_OFF:
777 TAPE_3590_CRYPT_INFO(device).status
778 &= ~TAPE390_CRYPT_ON_MASK;
779 *(device->modeset_byte) &= ~0x03;
404 break; 780 break;
405 case TO_RBI: /* RBI seems to succeed even without medium loaded. */ 781 case TO_RBI: /* RBI seems to succeed even without medium loaded. */
406 case TO_NOP: /* Same to NOP. */ 782 case TO_NOP: /* Same to NOP. */
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
409 case TO_DIS: 785 case TO_DIS:
410 case TO_ASSIGN: 786 case TO_ASSIGN:
411 case TO_UNASSIGN: 787 case TO_UNASSIGN:
412 break;
413 case TO_SIZE: 788 case TO_SIZE:
789 case TO_KEKL_SET:
790 case TO_KEKL_QUERY:
414 break; 791 break;
415 } 792 }
416 return TAPE_IO_SUCCESS; 793 return TAPE_IO_SUCCESS;
@@ -540,10 +917,8 @@ static int
540tape_3590_erp_long_busy(struct tape_device *device, 917tape_3590_erp_long_busy(struct tape_device *device,
541 struct tape_request *request, struct irb *irb) 918 struct tape_request *request, struct irb *irb)
542{ 919{
543 /* FIXME: how about WAITING for a minute ? */ 920 DBF_EVENT(6, "Device is busy\n");
544 PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", 921 return TAPE_IO_LONG_BUSY;
545 device->cdev->dev.bus_id);
546 return tape_3590_erp_basic(device, request, irb, -EBUSY);
547} 922}
548 923
549/* 924/*
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
951 device->cdev->dev.bus_id, sense->mc); 1326 device->cdev->dev.bus_id, sense->mc);
952} 1327}
953 1328
1329static int tape_3590_crypt_error(struct tape_device *device,
1330 struct tape_request *request, struct irb *irb)
1331{
1332 u8 cu_rc, ekm_rc1;
1333 u16 ekm_rc2;
1334 u32 drv_rc;
1335 char *bus_id, *sense;
1336
1337 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1338 bus_id = device->cdev->dev.bus_id;
1339 cu_rc = sense[0];
1340 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1341 ekm_rc1 = sense[9];
1342 ekm_rc2 = *((u16*) &sense[10]);
1343 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1344 /* key not defined on EKM */
1345 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
1346 if ((cu_rc == 1) || (cu_rc == 2))
1347 /* No connection to EKM */
1348 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1349
1350 PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id);
1351 PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc,
1352 drv_rc, ekm_rc1, ekm_rc2);
1353
1354 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1355}
1356
954/* 1357/*
955 * 3590 error Recovery routine: 1358 * 3590 error Recovery routine:
956 * If possible, it tries to recover from the error. If this is not possible, 1359 * If possible, it tries to recover from the error. If this is not possible,
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
979 1382
980 sense = (struct tape_3590_sense *) irb->ecw; 1383 sense = (struct tape_3590_sense *) irb->ecw;
981 1384
1385 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
1386
982 /* 1387 /*
983 * First check all RC-QRCs where we want to do something special 1388 * First check all RC-QRCs where we want to do something special
984 * - "break": basic error recovery is done 1389 * - "break": basic error recovery is done
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
999 case 0x2231: 1404 case 0x2231:
1000 tape_3590_print_era_msg(device, irb); 1405 tape_3590_print_era_msg(device, irb);
1001 return tape_3590_erp_special_interrupt(device, request, irb); 1406 return tape_3590_erp_special_interrupt(device, request, irb);
1407 case 0x2240:
1408 return tape_3590_crypt_error(device, request, irb);
1002 1409
1003 case 0x3010: 1410 case 0x3010:
1004 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", 1411 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1020 DBF_EVENT(2, "(%08x): Rewind Unload complete\n", 1427 DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
1021 device->cdev_id); 1428 device->cdev_id);
1022 tape_med_state_set(device, MS_UNLOADED); 1429 tape_med_state_set(device, MS_UNLOADED);
1430 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1023 return tape_3590_erp_basic(device, request, irb, 0); 1431 return tape_3590_erp_basic(device, request, irb, 0);
1024 1432
1025 case 0x4010: 1433 case 0x4010:
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1030 PRINT_WARN("(%s): Tape operation when medium not loaded\n", 1438 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1031 device->cdev->dev.bus_id); 1439 device->cdev->dev.bus_id);
1032 tape_med_state_set(device, MS_UNLOADED); 1440 tape_med_state_set(device, MS_UNLOADED);
1441 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1033 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1442 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1034 case 0x4012: /* Device Long Busy */ 1443 case 0x4012: /* Device Long Busy */
1444 /* XXX: Also use long busy handling here? */
1445 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
1035 tape_3590_print_era_msg(device, irb); 1446 tape_3590_print_era_msg(device, irb);
1447 return tape_3590_erp_basic(device, request, irb, -EBUSY);
1448 case 0x4014:
1449 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
1036 return tape_3590_erp_long_busy(device, request, irb); 1450 return tape_3590_erp_long_busy(device, request, irb);
1037 1451
1038 case 0x5010: 1452 case 0x5010:
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1064 case 0x5120: 1478 case 0x5120:
1065 case 0x1120: 1479 case 0x1120:
1066 tape_med_state_set(device, MS_UNLOADED); 1480 tape_med_state_set(device, MS_UNLOADED);
1481 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1067 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1482 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1068 1483
1069 case 0x6020: 1484 case 0x6020:
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device)
1142{ 1557{
1143 int rc; 1558 int rc;
1144 struct tape_3590_disc_data *data; 1559 struct tape_3590_disc_data *data;
1560 char *rdc_data;
1145 1561
1146 DBF_EVENT(6, "3590 device setup\n"); 1562 DBF_EVENT(6, "3590 device setup\n");
1147 data = kmalloc(sizeof(struct tape_3590_disc_data), 1563 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
1148 GFP_KERNEL | GFP_DMA);
1149 if (data == NULL) 1564 if (data == NULL)
1150 return -ENOMEM; 1565 return -ENOMEM;
1151 data->read_back_op = READ_PREVIOUS; 1566 data->read_back_op = READ_PREVIOUS;
1152 device->discdata = data; 1567 device->discdata = data;
1153 1568
1154 if ((rc = tape_std_assign(device)) == 0) { 1569 rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA);
1155 /* Try to find out if medium is loaded */ 1570 if (!rdc_data) {
1156 if ((rc = tape_3590_sense_medium(device)) != 0) 1571 rc = -ENOMEM;
1157 DBF_LH(3, "3590 medium sense returned %d\n", rc); 1572 goto fail_kmalloc;
1573 }
1574 rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64);
1575 if (rc) {
1576 DBF_LH(3, "Read device characteristics failed!\n");
1577 goto fail_kmalloc;
1578 }
1579 rc = tape_std_assign(device);
1580 if (rc)
1581 goto fail_rdc_data;
1582 if (rdc_data[31] == 0x13) {
1583 PRINT_INFO("Device has crypto support\n");
1584 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1585 tape_3592_disable_crypt(device);
1586 } else {
1587 DBF_EVENT(6, "Device has NO crypto support\n");
1158 } 1588 }
1589 /* Try to find out if medium is loaded */
1590 rc = tape_3590_sense_medium(device);
1591 if (rc) {
1592 DBF_LH(3, "3590 medium sense returned %d\n", rc);
1593 goto fail_rdc_data;
1594 }
1595 return 0;
1159 1596
1597fail_rdc_data:
1598 kfree(rdc_data);
1599fail_kmalloc:
1600 kfree(data);
1160 return rc; 1601 return rc;
1161} 1602}
1162 1603
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index cf274b9445a6..aa5138807af1 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.h 2 * drivers/s390/char/tape_3590.h
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -38,16 +38,22 @@
38#define MSENSE_UNASSOCIATED 0x00 38#define MSENSE_UNASSOCIATED 0x00
39#define MSENSE_ASSOCIATED_MOUNT 0x01 39#define MSENSE_ASSOCIATED_MOUNT 0x01
40#define MSENSE_ASSOCIATED_UMOUNT 0x02 40#define MSENSE_ASSOCIATED_UMOUNT 0x02
41#define MSENSE_CRYPT_MASK 0x00000010
41 42
42#define TAPE_3590_MAX_MSG 0xb0 43#define TAPE_3590_MAX_MSG 0xb0
43 44
44/* Datatypes */ 45/* Datatypes */
45 46
46struct tape_3590_disc_data { 47struct tape_3590_disc_data {
47 unsigned char modeset_byte; 48 struct tape390_crypt_info crypt_info;
48 int read_back_op; 49 int read_back_op;
49}; 50};
50 51
52#define TAPE_3590_CRYPT_INFO(device) \
53 ((struct tape_3590_disc_data*)(device->discdata))->crypt_info
54#define TAPE_3590_READ_BACK_OP(device) \
55 ((struct tape_3590_disc_data*)(device->discdata))->read_back_op
56
51struct tape_3590_sense { 57struct tape_3590_sense {
52 58
53 unsigned int command_rej:1; 59 unsigned int command_rej:1;
@@ -118,7 +124,48 @@ struct tape_3590_sense {
118struct tape_3590_med_sense { 124struct tape_3590_med_sense {
119 unsigned int macst:4; 125 unsigned int macst:4;
120 unsigned int masst:4; 126 unsigned int masst:4;
121 char pad[127]; 127 char pad1[7];
128 unsigned int flags;
129 char pad2[116];
130} __attribute__ ((packed));
131
132/* Datastructures for 3592 encryption support */
133
134struct tape3592_kekl {
135 __u8 flags;
136 char label[64];
137} __attribute__ ((packed));
138
139struct tape3592_kekl_pair {
140 __u8 count;
141 struct tape3592_kekl kekl[2];
142} __attribute__ ((packed));
143
144struct tape3592_kekl_query_data {
145 __u16 len;
146 __u8 fmt;
147 __u8 mc;
148 __u32 id;
149 __u8 flags;
150 struct tape3592_kekl_pair kekls;
151 char reserved[116];
152} __attribute__ ((packed));
153
154struct tape3592_kekl_query_order {
155 __u8 code;
156 __u8 flags;
157 char reserved1[2];
158 __u8 max_count;
159 char reserved2[35];
160} __attribute__ ((packed));
161
162struct tape3592_kekl_set_order {
163 __u8 code;
164 __u8 flags;
165 char reserved1[2];
166 __u8 op;
167 struct tape3592_kekl_pair kekls;
168 char reserved2[120];
122} __attribute__ ((packed)); 169} __attribute__ ((packed));
123 170
124#endif /* _TAPE_3590_H */ 171#endif /* _TAPE_3590_H */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index c8a89b3b87d4..dd0ecaed592e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
73/* 73/*
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static inline void 76static void
77tapeblock_end_request(struct request *req, int uptodate) 77tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
108/* 108/*
109 * Feed the tape device CCW queue with requests supplied in a list. 109 * Feed the tape device CCW queue with requests supplied in a list.
110 */ 110 */
111static inline int 111static int
112tapeblock_start_request(struct tape_device *device, struct request *req) 112tapeblock_start_request(struct tape_device *device, struct request *req)
113{ 113{
114 struct tape_request * ccw_req; 114 struct tape_request * ccw_req;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31198c8f2718..9faea04e11e9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -3,7 +3,7 @@
3 * character device frontend for tape device driver 3 * character device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device)
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
92/* 92static int
93 * Terminate write command (we write two TMs and skip backward over last)
94 * This ensures that the tape is always correctly terminated.
95 * When the user writes afterwards a new file, he will overwrite the
96 * second TM and therefore one TM will remain to separate the
97 * two files on the tape...
98 */
99static inline void
100tapechar_terminate_write(struct tape_device *device)
101{
102 if (tape_mtop(device, MTWEOF, 1) == 0 &&
103 tape_mtop(device, MTWEOF, 1) == 0)
104 tape_mtop(device, MTBSR, 1);
105}
106
107static inline int
108tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 93tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
109{ 94{
110 struct idal_buffer *new; 95 struct idal_buffer *new;
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
137/* 122/*
138 * Tape device read function 123 * Tape device read function
139 */ 124 */
140ssize_t 125static ssize_t
141tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 126tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
142{ 127{
143 struct tape_device *device; 128 struct tape_device *device;
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
201/* 186/*
202 * Tape device write function 187 * Tape device write function
203 */ 188 */
204ssize_t 189static ssize_t
205tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 190tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
206{ 191{
207 struct tape_device *device; 192 struct tape_device *device;
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
291/* 276/*
292 * Character frontend tape device open function. 277 * Character frontend tape device open function.
293 */ 278 */
294int 279static int
295tapechar_open (struct inode *inode, struct file *filp) 280tapechar_open (struct inode *inode, struct file *filp)
296{ 281{
297 struct tape_device *device; 282 struct tape_device *device;
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp)
326 * Character frontend tape device release function. 311 * Character frontend tape device release function.
327 */ 312 */
328 313
329int 314static int
330tapechar_release(struct inode *inode, struct file *filp) 315tapechar_release(struct inode *inode, struct file *filp)
331{ 316{
332 struct tape_device *device; 317 struct tape_device *device;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c6c2e918b990..e2a8a1a04bab 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -26,9 +26,11 @@
26#include "tape_std.h" 26#include "tape_std.h"
27 27
28#define PRINTK_HEADER "TAPE_CORE: " 28#define PRINTK_HEADER "TAPE_CORE: "
29#define LONG_BUSY_TIMEOUT 180 /* seconds */
29 30
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void tape_delayed_next_request(struct work_struct *); 32static void tape_delayed_next_request(struct work_struct *);
33static void tape_long_busy_timeout(unsigned long data);
32 34
33/* 35/*
34 * One list to contain all tape devices of all disciplines, so 36 * One list to contain all tape devices of all disciplines, so
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] =
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT", 72 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS" 74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ",
73}; 77};
74 78
75static inline int 79static int
76busid_to_int(char *bus_id) 80busid_to_int(char *bus_id)
77{ 81{
78 int dec; 82 int dec;
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
252/* 256/*
253 * Stop running ccw. Has to be called with the device lock held. 257 * Stop running ccw. Has to be called with the device lock held.
254 */ 258 */
255static inline int 259static int
256__tape_cancel_io(struct tape_device *device, struct tape_request *request) 260__tape_cancel_io(struct tape_device *device, struct tape_request *request)
257{ 261{
258 int retries; 262 int retries;
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device,
346 return -EINVAL; 350 return -EINVAL;
347 } 351 }
348 352
353 init_timer(&device->lb_timeout);
354 device->lb_timeout.function = tape_long_busy_timeout;
355
349 /* Let the discipline have a go at the device. */ 356 /* Let the discipline have a go at the device. */
350 device->discipline = discipline; 357 device->discipline = discipline;
351 if (!try_module_get(discipline->owner)) { 358 if (!try_module_get(discipline->owner)) {
@@ -385,7 +392,7 @@ out:
385 return rc; 392 return rc;
386} 393}
387 394
388static inline void 395static void
389tape_cleanup_device(struct tape_device *device) 396tape_cleanup_device(struct tape_device *device)
390{ 397{
391 tapeblock_cleanup_device(device); 398 tapeblock_cleanup_device(device);
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev)
563 return ret; 570 return ret;
564} 571}
565 572
566static inline void 573static void
567__tape_discard_requests(struct tape_device *device) 574__tape_discard_requests(struct tape_device *device)
568{ 575{
569 struct tape_request * request; 576 struct tape_request * request;
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request)
703 kfree(request); 710 kfree(request);
704} 711}
705 712
706static inline int 713static int
707__tape_start_io(struct tape_device *device, struct tape_request *request) 714__tape_start_io(struct tape_device *device, struct tape_request *request)
708{ 715{
709 int rc; 716 int rc;
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
733 return rc; 740 return rc;
734} 741}
735 742
736static inline void 743static void
737__tape_start_next_request(struct tape_device *device) 744__tape_start_next_request(struct tape_device *device)
738{ 745{
739 struct list_head *l, *n; 746 struct list_head *l, *n;
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work)
801 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 808 spin_unlock_irq(get_ccwdev_lock(device->cdev));
802} 809}
803 810
804static inline void 811static void tape_long_busy_timeout(unsigned long data)
812{
813 struct tape_request *request;
814 struct tape_device *device;
815
816 device = (struct tape_device *) data;
817 spin_lock_irq(get_ccwdev_lock(device->cdev));
818 request = list_entry(device->req_queue.next, struct tape_request, list);
819 if (request->status != TAPE_REQUEST_LONG_BUSY)
820 BUG();
821 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
822 __tape_start_next_request(device);
823 device->lb_timeout.data = (unsigned long) tape_put_device(device);
824 spin_unlock_irq(get_ccwdev_lock(device->cdev));
825}
826
827static void
805__tape_end_request( 828__tape_end_request(
806 struct tape_device * device, 829 struct tape_device * device,
807 struct tape_request * request, 830 struct tape_request * request,
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
878 * and starts it if the tape is idle. Has to be called with 901 * and starts it if the tape is idle. Has to be called with
879 * the device lock held. 902 * the device lock held.
880 */ 903 */
881static inline int 904static int
882__tape_start_request(struct tape_device *device, struct tape_request *request) 905__tape_start_request(struct tape_device *device, struct tape_request *request)
883{ 906{
884 int rc; 907 int rc;
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 /* May be an unsolicited irq */ 1117 /* May be an unsolicited irq */
1095 if(request != NULL) 1118 if(request != NULL)
1096 request->rescnt = irb->scsw.count; 1119 request->rescnt = irb->scsw.count;
1097 1120 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) &&
1121 !list_empty(&device->req_queue)) {
1122 /* Not Ready to Ready after long busy ? */
1123 struct tape_request *req;
1124 req = list_entry(device->req_queue.next,
1125 struct tape_request, list);
1126 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1127 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1128 if (del_timer(&device->lb_timeout)) {
1129 device->lb_timeout.data = (unsigned long)
1130 tape_put_device(device);
1131 __tape_start_next_request(device);
1132 }
1133 return;
1134 }
1135 }
1098 if (irb->scsw.dstat != 0x0c) { 1136 if (irb->scsw.dstat != 0x0c) {
1099 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1137 /* Set the 'ONLINE' flag depending on sense byte 1 */
1100 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1138 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1142 break; 1180 break;
1143 case TAPE_IO_PENDING: 1181 case TAPE_IO_PENDING:
1144 break; 1182 break;
1183 case TAPE_IO_LONG_BUSY:
1184 device->lb_timeout.data =
1185 (unsigned long)tape_get_device_reference(device);
1186 device->lb_timeout.expires = jiffies +
1187 LONG_BUSY_TIMEOUT * HZ;
1188 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1189 add_timer(&device->lb_timeout);
1190 request->status = TAPE_REQUEST_LONG_BUSY;
1191 break;
1145 case TAPE_IO_RETRY: 1192 case TAPE_IO_RETRY:
1146 rc = __tape_start_io(device, request); 1193 rc = __tape_start_io(device, request);
1147 if (rc) 1194 if (rc)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 09844621edc0..bc33068b9ce2 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -36,7 +36,7 @@
36struct tty_driver *tty3270_driver; 36struct tty_driver *tty3270_driver;
37static int tty3270_max_index; 37static int tty3270_max_index;
38 38
39struct raw3270_fn tty3270_fn; 39static struct raw3270_fn tty3270_fn;
40 40
41struct tty3270_cell { 41struct tty3270_cell {
42 unsigned char character; 42 unsigned char character;
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *);
119/* 119/*
120 * Setup timeout for a device. On timeout trigger an update. 120 * Setup timeout for a device. On timeout trigger an update.
121 */ 121 */
122void 122static void tty3270_set_timer(struct tty3270 *tp, int expires)
123tty3270_set_timer(struct tty3270 *tp, int expires)
124{ 123{
125 if (expires == 0) { 124 if (expires == 0) {
126 if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 125 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
@@ -841,7 +840,7 @@ tty3270_del_views(void)
841 } 840 }
842} 841}
843 842
844struct raw3270_fn tty3270_fn = { 843static struct raw3270_fn tty3270_fn = {
845 .activate = tty3270_activate, 844 .activate = tty3270_activate,
846 .deactivate = tty3270_deactivate, 845 .deactivate = tty3270_deactivate,
847 .intv = (void *) tty3270_irq, 846 .intv = (void *) tty3270_irq,
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = {
1754 .set_termios = tty3270_set_termios 1753 .set_termios = tty3270_set_termios
1755}; 1754};
1756 1755
1757void 1756static void tty3270_notifier(int index, int active)
1758tty3270_notifier(int index, int active)
1759{ 1757{
1760 if (active) 1758 if (active)
1761 tty_register_device(tty3270_driver, index, NULL); 1759 tty_register_device(tty3270_driver, index, NULL);
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active)
1767 * 3270 tty registration code called from tty_init(). 1765 * 3270 tty registration code called from tty_init().
1768 * Most kernel services (incl. kmalloc) are available at this poimt. 1766 * Most kernel services (incl. kmalloc) are available at this poimt.
1769 */ 1767 */
1770int __init 1768static int __init tty3270_init(void)
1771tty3270_init(void)
1772{ 1769{
1773 struct tty_driver *driver; 1770 struct tty_driver *driver;
1774 int ret; 1771 int ret;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6cb23040954b..8432a76b961e 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -3,7 +3,7 @@
3 * character device driver for reading z/VM system service records 3 * character device driver for reading z/VM system service records
4 * 4 *
5 * 5 *
6 * Copyright (C) 2004 IBM Corporation 6 * Copyright 2004 IBM Corporation
7 * character device driver for reading z/VM system service records, 7 * character device driver for reading z/VM system service records,
8 * Version 1.0 8 * Version 1.0
9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com> 9 * Author(s): Xenia Tkatschow <xenia@us.ibm.com>
@@ -21,7 +21,7 @@
21#include <asm/cpcmd.h> 21#include <asm/cpcmd.h>
22#include <asm/debug.h> 22#include <asm/debug.h>
23#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
24#include "../net/iucv.h" 24#include <net/iucv/iucv.h>
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/cdev.h> 26#include <linux/cdev.h>
27#include <linux/device.h> 27#include <linux/device.h>
@@ -60,12 +60,11 @@ struct vmlogrdr_priv_t {
60 char system_service[8]; 60 char system_service[8];
61 char internal_name[8]; 61 char internal_name[8];
62 char recording_name[8]; 62 char recording_name[8];
63 u16 pathid; 63 struct iucv_path *path;
64 int connection_established; 64 int connection_established;
65 int iucv_path_severed; 65 int iucv_path_severed;
66 iucv_MessagePending local_interrupt_buffer; 66 struct iucv_message local_interrupt_buffer;
67 atomic_t receive_ready; 67 atomic_t receive_ready;
68 iucv_handle_t iucv_handle;
69 int minor_num; 68 int minor_num;
70 char * buffer; 69 char * buffer;
71 char * current_position; 70 char * current_position;
@@ -97,40 +96,21 @@ static struct file_operations vmlogrdr_fops = {
97}; 96};
98 97
99 98
100static u8 iucvMagic[16] = { 99static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
101 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 100static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
102 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 101static void vmlogrdr_iucv_message_pending(struct iucv_path *,
103}; 102 struct iucv_message *);
104 103
105 104
106static u8 mask[] = { 105static struct iucv_handler vmlogrdr_iucv_handler = {
107 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 106 .path_complete = vmlogrdr_iucv_path_complete,
108 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 107 .path_severed = vmlogrdr_iucv_path_severed,
109 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 108 .message_pending = vmlogrdr_iucv_message_pending,
110 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
111}; 109};
112 110
113 111
114static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 112static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
115 113static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
116
117static void
118vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data);
119static void
120vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data);
121static void
122vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data);
123
124
125static iucv_interrupt_ops_t vmlogrdr_iucvops = {
126 .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete,
127 .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered,
128 .MessagePending = vmlogrdr_iucv_MessagePending,
129};
130
131
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134 114
135/* 115/*
136 * pointer to system service private structure 116 * pointer to system service private structure
@@ -177,28 +157,29 @@ static struct cdev *vmlogrdr_cdev = NULL;
177static int recording_class_AB; 157static int recording_class_AB;
178 158
179 159
180static void 160static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
181vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
182 void * pgm_data)
183{ 161{
184 struct vmlogrdr_priv_t * logptr = pgm_data; 162 struct vmlogrdr_priv_t * logptr = path->private;
163
185 spin_lock(&logptr->priv_lock); 164 spin_lock(&logptr->priv_lock);
186 logptr->connection_established = 1; 165 logptr->connection_established = 1;
187 spin_unlock(&logptr->priv_lock); 166 spin_unlock(&logptr->priv_lock);
188 wake_up(&conn_wait_queue); 167 wake_up(&conn_wait_queue);
189 return;
190} 168}
191 169
192 170
193static void 171static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
194vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
195{ 172{
196 u8 reason = (u8) eib->ipuser[8]; 173 struct vmlogrdr_priv_t * logptr = path->private;
197 struct vmlogrdr_priv_t * logptr = pgm_data; 174 u8 reason = (u8) ipuser[8];
198 175
199 printk (KERN_ERR "vmlogrdr: connection severed with" 176 printk (KERN_ERR "vmlogrdr: connection severed with"
200 " reason %i\n", reason); 177 " reason %i\n", reason);
201 178
179 iucv_path_sever(path, NULL);
180 kfree(path);
181 logptr->path = NULL;
182
202 spin_lock(&logptr->priv_lock); 183 spin_lock(&logptr->priv_lock);
203 logptr->connection_established = 0; 184 logptr->connection_established = 0;
204 logptr->iucv_path_severed = 1; 185 logptr->iucv_path_severed = 1;
@@ -210,10 +191,10 @@ vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
210} 191}
211 192
212 193
213static void 194static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
214vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) 195 struct iucv_message *msg)
215{ 196{
216 struct vmlogrdr_priv_t * logptr = pgm_data; 197 struct vmlogrdr_priv_t * logptr = path->private;
217 198
218 /* 199 /*
219 * This function is the bottom half so it should be quick. 200 * This function is the bottom half so it should be quick.
@@ -221,15 +202,15 @@ vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
221 * the usage count 202 * the usage count
222 */ 203 */
223 spin_lock(&logptr->priv_lock); 204 spin_lock(&logptr->priv_lock);
224 memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib)); 205 memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
225 atomic_inc(&logptr->receive_ready); 206 atomic_inc(&logptr->receive_ready);
226 spin_unlock(&logptr->priv_lock); 207 spin_unlock(&logptr->priv_lock);
227 wake_up_interruptible(&read_wait_queue); 208 wake_up_interruptible(&read_wait_queue);
228} 209}
229 210
230 211
231static int 212static int vmlogrdr_get_recording_class_AB(void)
232vmlogrdr_get_recording_class_AB(void) { 213{
233 char cp_command[]="QUERY COMMAND RECORDING "; 214 char cp_command[]="QUERY COMMAND RECORDING ";
234 char cp_response[80]; 215 char cp_response[80];
235 char *tail; 216 char *tail;
@@ -259,8 +240,9 @@ vmlogrdr_get_recording_class_AB(void) {
259} 240}
260 241
261 242
262static int 243static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
263vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { 244 int action, int purge)
245{
264 246
265 char cp_command[80]; 247 char cp_command[80];
266 char cp_response[160]; 248 char cp_response[160];
@@ -318,8 +300,7 @@ vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
318} 300}
319 301
320 302
321static int 303static int vmlogrdr_open (struct inode *inode, struct file *filp)
322vmlogrdr_open (struct inode *inode, struct file *filp)
323{ 304{
324 int dev_num = 0; 305 int dev_num = 0;
325 struct vmlogrdr_priv_t * logptr = NULL; 306 struct vmlogrdr_priv_t * logptr = NULL;
@@ -329,10 +310,7 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
329 dev_num = iminor(inode); 310 dev_num = iminor(inode);
330 if (dev_num > MAXMINOR) 311 if (dev_num > MAXMINOR)
331 return -ENODEV; 312 return -ENODEV;
332
333 logptr = &sys_ser[dev_num]; 313 logptr = &sys_ser[dev_num];
334 if (logptr == NULL)
335 return -ENODEV;
336 314
337 /* 315 /*
338 * only allow for blocking reads to be open 316 * only allow for blocking reads to be open
@@ -345,52 +323,38 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
345 if (logptr->dev_in_use) { 323 if (logptr->dev_in_use) {
346 spin_unlock_bh(&logptr->priv_lock); 324 spin_unlock_bh(&logptr->priv_lock);
347 return -EBUSY; 325 return -EBUSY;
348 } else {
349 logptr->dev_in_use = 1;
350 spin_unlock_bh(&logptr->priv_lock);
351 } 326 }
352 327 logptr->dev_in_use = 1;
328 logptr->connection_established = 0;
329 logptr->iucv_path_severed = 0;
353 atomic_set(&logptr->receive_ready, 0); 330 atomic_set(&logptr->receive_ready, 0);
354 logptr->buffer_free = 1; 331 logptr->buffer_free = 1;
332 spin_unlock_bh(&logptr->priv_lock);
355 333
356 /* set the file options */ 334 /* set the file options */
357 filp->private_data = logptr; 335 filp->private_data = logptr;
358 filp->f_op = &vmlogrdr_fops; 336 filp->f_op = &vmlogrdr_fops;
359 337
360 /* start recording for this service*/ 338 /* start recording for this service*/
361 ret=0; 339 if (logptr->autorecording) {
362 if (logptr->autorecording)
363 ret = vmlogrdr_recording(logptr,1,logptr->autopurge); 340 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
364 if (ret) 341 if (ret)
365 printk (KERN_WARNING "vmlogrdr: failed to start " 342 printk (KERN_WARNING "vmlogrdr: failed to start "
366 "recording automatically\n"); 343 "recording automatically\n");
367
368 /* Register with iucv driver */
369 logptr->iucv_handle = iucv_register_program(iucvMagic,
370 logptr->system_service, mask, &vmlogrdr_iucvops,
371 logptr);
372
373 if (logptr->iucv_handle == NULL) {
374 printk (KERN_ERR "vmlogrdr: failed to register with"
375 "iucv driver\n");
376 goto not_registered;
377 } 344 }
378 345
379 /* create connection to the system service */ 346 /* create connection to the system service */
380 spin_lock_bh(&logptr->priv_lock); 347 logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
381 logptr->connection_established = 0; 348 if (!logptr->path)
382 logptr->iucv_path_severed = 0; 349 goto out_dev;
383 spin_unlock_bh(&logptr->priv_lock); 350 connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
384 351 logptr->system_service, NULL, NULL,
385 connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic, 352 logptr);
386 logptr->system_service, iucv_host, 0,
387 NULL, NULL,
388 logptr->iucv_handle, NULL);
389 if (connect_rc) { 353 if (connect_rc) {
390 printk (KERN_ERR "vmlogrdr: iucv connection to %s " 354 printk (KERN_ERR "vmlogrdr: iucv connection to %s "
391 "failed with rc %i \n", logptr->system_service, 355 "failed with rc %i \n", logptr->system_service,
392 connect_rc); 356 connect_rc);
393 goto not_connected; 357 goto out_path;
394 } 358 }
395 359
396 /* We've issued the connect and now we must wait for a 360 /* We've issued the connect and now we must wait for a
@@ -399,35 +363,28 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
399 */ 363 */
400 wait_event(conn_wait_queue, (logptr->connection_established) 364 wait_event(conn_wait_queue, (logptr->connection_established)
401 || (logptr->iucv_path_severed)); 365 || (logptr->iucv_path_severed));
402 if (logptr->iucv_path_severed) { 366 if (logptr->iucv_path_severed)
403 goto not_connected; 367 goto out_record;
404 }
405
406 return nonseekable_open(inode, filp); 368 return nonseekable_open(inode, filp);
407 369
408not_connected: 370out_record:
409 iucv_unregister_program(logptr->iucv_handle);
410 logptr->iucv_handle = NULL;
411not_registered:
412 if (logptr->autorecording) 371 if (logptr->autorecording)
413 vmlogrdr_recording(logptr,0,logptr->autopurge); 372 vmlogrdr_recording(logptr,0,logptr->autopurge);
373out_path:
374 kfree(logptr->path); /* kfree(NULL) is ok. */
375 logptr->path = NULL;
376out_dev:
414 logptr->dev_in_use = 0; 377 logptr->dev_in_use = 0;
415 return -EIO; 378 return -EIO;
416
417
418} 379}
419 380
420 381
421static int 382static int vmlogrdr_release (struct inode *inode, struct file *filp)
422vmlogrdr_release (struct inode *inode, struct file *filp)
423{ 383{
424 int ret; 384 int ret;
425 385
426 struct vmlogrdr_priv_t * logptr = filp->private_data; 386 struct vmlogrdr_priv_t * logptr = filp->private_data;
427 387
428 iucv_unregister_program(logptr->iucv_handle);
429 logptr->iucv_handle = NULL;
430
431 if (logptr->autorecording) { 388 if (logptr->autorecording) {
432 ret = vmlogrdr_recording(logptr,0,logptr->autopurge); 389 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
433 if (ret) 390 if (ret)
@@ -440,8 +397,8 @@ vmlogrdr_release (struct inode *inode, struct file *filp)
440} 397}
441 398
442 399
443static int 400static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
444vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { 401{
445 int rc, *temp; 402 int rc, *temp;
446 /* we need to keep track of two data sizes here: 403 /* we need to keep track of two data sizes here:
447 * The number of bytes we need to receive from iucv and 404 * The number of bytes we need to receive from iucv and
@@ -462,8 +419,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
462 * We need to return the total length of the record 419 * We need to return the total length of the record
463 * + size of FENCE in the first 4 bytes of the buffer. 420 * + size of FENCE in the first 4 bytes of the buffer.
464 */ 421 */
465 iucv_data_count = 422 iucv_data_count = priv->local_interrupt_buffer.length;
466 priv->local_interrupt_buffer.ln1msg2.ipbfln1f;
467 user_data_count = sizeof(int); 423 user_data_count = sizeof(int);
468 temp = (int*)priv->buffer; 424 temp = (int*)priv->buffer;
469 *temp= iucv_data_count + sizeof(FENCE); 425 *temp= iucv_data_count + sizeof(FENCE);
@@ -475,14 +431,10 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
475 */ 431 */
476 if (iucv_data_count > NET_BUFFER_SIZE) 432 if (iucv_data_count > NET_BUFFER_SIZE)
477 iucv_data_count = NET_BUFFER_SIZE; 433 iucv_data_count = NET_BUFFER_SIZE;
478 rc = iucv_receive(priv->pathid, 434 rc = iucv_message_receive(priv->path,
479 priv->local_interrupt_buffer.ipmsgid, 435 &priv->local_interrupt_buffer,
480 priv->local_interrupt_buffer.iptrgcls, 436 0, buffer, iucv_data_count,
481 buffer, 437 &priv->residual_length);
482 iucv_data_count,
483 NULL,
484 NULL,
485 &priv->residual_length);
486 spin_unlock_bh(&priv->priv_lock); 438 spin_unlock_bh(&priv->priv_lock);
487 /* An rc of 5 indicates that the record was bigger then 439 /* An rc of 5 indicates that the record was bigger then
488 * the buffer, which is OK for us. A 9 indicates that the 440 * the buffer, which is OK for us. A 9 indicates that the
@@ -514,8 +466,8 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
514} 466}
515 467
516 468
517static ssize_t 469static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
518vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) 470 size_t count, loff_t * ppos)
519{ 471{
520 int rc; 472 int rc;
521 struct vmlogrdr_priv_t * priv = filp->private_data; 473 struct vmlogrdr_priv_t * priv = filp->private_data;
@@ -547,8 +499,10 @@ vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos)
547 return count; 499 return count;
548} 500}
549 501
550static ssize_t 502static ssize_t vmlogrdr_autopurge_store(struct device * dev,
551vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { 503 struct device_attribute *attr,
504 const char * buf, size_t count)
505{
552 struct vmlogrdr_priv_t *priv = dev->driver_data; 506 struct vmlogrdr_priv_t *priv = dev->driver_data;
553 ssize_t ret = count; 507 ssize_t ret = count;
554 508
@@ -566,8 +520,10 @@ vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, con
566} 520}
567 521
568 522
569static ssize_t 523static ssize_t vmlogrdr_autopurge_show(struct device *dev,
570vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) { 524 struct device_attribute *attr,
525 char *buf)
526{
571 struct vmlogrdr_priv_t *priv = dev->driver_data; 527 struct vmlogrdr_priv_t *priv = dev->driver_data;
572 return sprintf(buf, "%u\n", priv->autopurge); 528 return sprintf(buf, "%u\n", priv->autopurge);
573} 529}
@@ -577,8 +533,10 @@ static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
577 vmlogrdr_autopurge_store); 533 vmlogrdr_autopurge_store);
578 534
579 535
580static ssize_t 536static ssize_t vmlogrdr_purge_store(struct device * dev,
581vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { 537 struct device_attribute *attr,
538 const char * buf, size_t count)
539{
582 540
583 char cp_command[80]; 541 char cp_command[80];
584 char cp_response[80]; 542 char cp_response[80];
@@ -618,9 +576,10 @@ vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const c
618static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); 576static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
619 577
620 578
621static ssize_t 579static ssize_t vmlogrdr_autorecording_store(struct device *dev,
622vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf, 580 struct device_attribute *attr,
623 size_t count) { 581 const char *buf, size_t count)
582{
624 struct vmlogrdr_priv_t *priv = dev->driver_data; 583 struct vmlogrdr_priv_t *priv = dev->driver_data;
625 ssize_t ret = count; 584 ssize_t ret = count;
626 585
@@ -638,8 +597,10 @@ vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr,
638} 597}
639 598
640 599
641static ssize_t 600static ssize_t vmlogrdr_autorecording_show(struct device *dev,
642vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) { 601 struct device_attribute *attr,
602 char *buf)
603{
643 struct vmlogrdr_priv_t *priv = dev->driver_data; 604 struct vmlogrdr_priv_t *priv = dev->driver_data;
644 return sprintf(buf, "%u\n", priv->autorecording); 605 return sprintf(buf, "%u\n", priv->autorecording);
645} 606}
@@ -649,9 +610,10 @@ static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
649 vmlogrdr_autorecording_store); 610 vmlogrdr_autorecording_store);
650 611
651 612
652static ssize_t 613static ssize_t vmlogrdr_recording_store(struct device * dev,
653vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { 614 struct device_attribute *attr,
654 615 const char * buf, size_t count)
616{
655 struct vmlogrdr_priv_t *priv = dev->driver_data; 617 struct vmlogrdr_priv_t *priv = dev->driver_data;
656 ssize_t ret; 618 ssize_t ret;
657 619
@@ -676,8 +638,9 @@ vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, con
676static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); 638static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
677 639
678 640
679static ssize_t 641static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
680vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { 642 char *buf)
643{
681 644
682 char cp_command[] = "QUERY RECORDING "; 645 char cp_command[] = "QUERY RECORDING ";
683 int len; 646 int len;
@@ -710,52 +673,63 @@ static struct device_driver vmlogrdr_driver = {
710}; 673};
711 674
712 675
713static int 676static int vmlogrdr_register_driver(void)
714vmlogrdr_register_driver(void) { 677{
715 int ret; 678 int ret;
716 679
680 /* Register with iucv driver */
681 ret = iucv_register(&vmlogrdr_iucv_handler, 1);
682 if (ret) {
683 printk (KERN_ERR "vmlogrdr: failed to register with"
684 "iucv driver\n");
685 goto out;
686 }
687
717 ret = driver_register(&vmlogrdr_driver); 688 ret = driver_register(&vmlogrdr_driver);
718 if (ret) { 689 if (ret) {
719 printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); 690 printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
720 return ret; 691 goto out_iucv;
721 } 692 }
722 693
723 ret = driver_create_file(&vmlogrdr_driver, 694 ret = driver_create_file(&vmlogrdr_driver,
724 &driver_attr_recording_status); 695 &driver_attr_recording_status);
725 if (ret) { 696 if (ret) {
726 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); 697 printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
727 goto unregdriver; 698 goto out_driver;
728 } 699 }
729 700
730 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); 701 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
731 if (IS_ERR(vmlogrdr_class)) { 702 if (IS_ERR(vmlogrdr_class)) {
732 printk(KERN_ERR "vmlogrdr: failed to create class.\n"); 703 printk(KERN_ERR "vmlogrdr: failed to create class.\n");
733 ret=PTR_ERR(vmlogrdr_class); 704 ret = PTR_ERR(vmlogrdr_class);
734 vmlogrdr_class=NULL; 705 vmlogrdr_class = NULL;
735 goto unregattr; 706 goto out_attr;
736 } 707 }
737 return 0; 708 return 0;
738 709
739unregattr: 710out_attr:
740 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); 711 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
741unregdriver: 712out_driver:
742 driver_unregister(&vmlogrdr_driver); 713 driver_unregister(&vmlogrdr_driver);
714out_iucv:
715 iucv_unregister(&vmlogrdr_iucv_handler, 1);
716out:
743 return ret; 717 return ret;
744} 718}
745 719
746 720
747static void 721static void vmlogrdr_unregister_driver(void)
748vmlogrdr_unregister_driver(void) { 722{
749 class_destroy(vmlogrdr_class); 723 class_destroy(vmlogrdr_class);
750 vmlogrdr_class = NULL; 724 vmlogrdr_class = NULL;
751 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); 725 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
752 driver_unregister(&vmlogrdr_driver); 726 driver_unregister(&vmlogrdr_driver);
753 return; 727 iucv_unregister(&vmlogrdr_iucv_handler, 1);
754} 728}
755 729
756 730
757static int 731static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
758vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { 732{
759 struct device *dev; 733 struct device *dev;
760 int ret; 734 int ret;
761 735
@@ -804,9 +778,10 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
804} 778}
805 779
806 780
807static int 781static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
808vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { 782{
809 class_device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); 783 class_device_destroy(vmlogrdr_class,
784 MKDEV(vmlogrdr_major, priv->minor_num));
810 if (priv->device != NULL) { 785 if (priv->device != NULL) {
811 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); 786 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
812 device_unregister(priv->device); 787 device_unregister(priv->device);
@@ -816,8 +791,8 @@ vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
816} 791}
817 792
818 793
819static int 794static int vmlogrdr_register_cdev(dev_t dev)
820vmlogrdr_register_cdev(dev_t dev) { 795{
821 int rc = 0; 796 int rc = 0;
822 vmlogrdr_cdev = cdev_alloc(); 797 vmlogrdr_cdev = cdev_alloc();
823 if (!vmlogrdr_cdev) { 798 if (!vmlogrdr_cdev) {
@@ -837,9 +812,10 @@ vmlogrdr_register_cdev(dev_t dev) {
837} 812}
838 813
839 814
840static void 815static void vmlogrdr_cleanup(void)
841vmlogrdr_cleanup(void) { 816{
842 int i; 817 int i;
818
843 if (vmlogrdr_cdev) { 819 if (vmlogrdr_cdev) {
844 cdev_del(vmlogrdr_cdev); 820 cdev_del(vmlogrdr_cdev);
845 vmlogrdr_cdev=NULL; 821 vmlogrdr_cdev=NULL;
@@ -856,8 +832,7 @@ vmlogrdr_cleanup(void) {
856} 832}
857 833
858 834
859static int 835static int vmlogrdr_init(void)
860vmlogrdr_init(void)
861{ 836{
862 int rc; 837 int rc;
863 int i; 838 int i;
@@ -907,8 +882,7 @@ cleanup:
907} 882}
908 883
909 884
910static void 885static void vmlogrdr_exit(void)
911vmlogrdr_exit(void)
912{ 886{
913 vmlogrdr_cleanup(); 887 vmlogrdr_cleanup();
914 printk (KERN_INFO "vmlogrdr: driver unloaded\n"); 888 printk (KERN_INFO "vmlogrdr: driver unloaded\n");
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 12c2d6b746e6..aa65df4dfced 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 43 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 44 * (Un-)blacklist the devices from-to
45 */ 45 */
46static inline void 46static void
47blacklist_range (range_action action, unsigned int from, unsigned int to, 47blacklist_range (range_action action, unsigned int from, unsigned int to,
48 unsigned int ssid) 48 unsigned int ssid)
49{ 49{
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
69 * Get devno/busid from given string. 69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c. 70 * Shamelessly grabbed from dasd_devmap.c.
71 */ 71 */
72static inline int 72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno) 73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 74{
75 int val, old_style; 75 int val, old_style;
@@ -123,10 +123,10 @@ confused:
123 return 1; 123 return 1;
124} 124}
125 125
126static inline int 126static int
127blacklist_parse_parameters (char *str, range_action action) 127blacklist_parse_parameters (char *str, range_action action)
128{ 128{
129 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; 129 int from, to, from_id0, to_id0, from_ssid, to_ssid;
130 130
131 while (*str != 0 && *str != '\n') { 131 while (*str != 0 && *str != '\n') {
132 range_action ra = action; 132 range_action ra = action;
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno)
227 * Function: blacklist_parse_proc_parameters 227 * Function: blacklist_parse_proc_parameters
228 * parse the stuff which is piped to /proc/cio_ignore 228 * parse the stuff which is piped to /proc/cio_ignore
229 */ 229 */
230static inline void 230static void
231blacklist_parse_proc_parameters (char *buf) 231blacklist_parse_proc_parameters (char *buf)
232{ 232{
233 if (strncmp (buf, "free ", 5) == 0) { 233 if (strncmp (buf, "free ", 5) == 0) {
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 38954f5cd14c..d48e3ca4752c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
53 53
54static struct bus_type ccwgroup_bus_type; 54static struct bus_type ccwgroup_bus_type;
55 55
56static inline void 56static void
57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
58{ 58{
59 int i; 59 int i;
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev)
104 kfree(gdev); 104 kfree(gdev);
105} 105}
106 106
107static inline int 107static int
108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
109{ 109{
110 char str[8]; 110 char str[8];
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev)
424 return 0; 424 return 0;
425} 425}
426 426
427static inline struct ccwgroup_device * 427static struct ccwgroup_device *
428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) 428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
429{ 429{
430 struct ccwgroup_device *gdev; 430 struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cbab8d2ce5cf..6f05a44e3817 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
93 u16 sch; /* subchannel */ 93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */ 94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */
96 } *ssd_area; 96 } __attribute__ ((packed)) *ssd_area;
97 97
98 ssd_area = page; 98 ssd_area = page;
99 99
@@ -277,7 +277,7 @@ out_unreg:
277 return 0; 277 return 0;
278} 278}
279 279
280static inline void 280static void
281s390_set_chpid_offline( __u8 chpid) 281s390_set_chpid_offline( __u8 chpid)
282{ 282{
283 char dbf_txt[15]; 283 char dbf_txt[15];
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
338 return 0x80 >> chp; 338 return 0x80 >> chp;
339} 339}
340 340
341static inline int 341static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 342s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 343{
344 struct schib schib; 344 struct schib schib;
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
444 u32 andesc[28]; 444 u32 andesc[28];
445 /* incident-specific information */ 445 /* incident-specific information */
446 u32 isinfo[28]; 446 u32 isinfo[28];
447 } *lir; 447 } __attribute__ ((packed)) *lir;
448 448
449 lir = data; 449 lir = data;
450 if (!(lir->iq&0x80)) 450 if (!(lir->iq&0x80))
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data)
461 return (u16) (lir->indesc[0]&0x000000ff); 461 return (u16) (lir->indesc[0]&0x000000ff);
462} 462}
463 463
464int 464struct chsc_sei_area {
465chsc_process_crw(void) 465 struct chsc_header request;
466 u32 reserved1;
467 u32 reserved2;
468 u32 reserved3;
469 struct chsc_header response;
470 u32 reserved4;
471 u8 flags;
472 u8 vf; /* validity flags */
473 u8 rs; /* reporting source */
474 u8 cc; /* content code */
475 u16 fla; /* full link address */
476 u16 rsid; /* reporting source id */
477 u32 reserved5;
478 u32 reserved6;
479 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
480 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed));
482
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{
485 int chpid;
486
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4)
490 return 0;
491 chpid = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else
495 s390_set_chpid_offline(chpid);
496
497 return 0;
498}
499
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
466{ 501{
467 int chpid, ret;
468 struct res_acc_data res_data; 502 struct res_acc_data res_data;
469 struct { 503 struct device *dev;
470 struct chsc_header request; 504 int status;
471 u32 reserved1; 505 int rc;
472 u32 reserved2; 506
473 u32 reserved3; 507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 struct chsc_header response; 508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 u32 reserved4; 509 if (sei_area->rs != 4)
476 u8 flags; 510 return 0;
477 u8 vf; /* validity flags */ 511 /* allocate a new channel path structure, if needed */
478 u8 rs; /* reporting source */ 512 status = get_chp_status(sei_area->rsid);
479 u8 cc; /* content code */ 513 if (status < 0)
480 u16 fla; /* full link address */ 514 new_channel_path(sei_area->rsid);
481 u16 rsid; /* reporting source id */ 515 else if (!status)
482 u32 reserved5; 516 return 0;
483 u32 reserved6; 517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
484 u32 ccdf[96]; /* content-code dependent field */ 518 memset(&res_data, 0, sizeof(struct res_acc_data));
485 /* ccdf has to be big enough for a link-incident record */ 519 res_data.chp = to_channelpath(dev);
486 } *sei_area; 520 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0)
523 /* full link address */
524 res_data.fla_mask = 0xffff;
525 else
526 /* link address */
527 res_data.fla_mask = 0xff00;
528 }
529 rc = s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533}
534
535static int chsc_process_sei(struct chsc_sei_area *sei_area)
536{
537 int rc;
538
539 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40)
541 CIO_CRW_EVENT(2, "chsc: event overflow\n");
542 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) {
545 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area);
547 break;
548 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area);
550 break;
551 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc);
554 break;
555 }
556
557 return rc;
558}
559
560int chsc_process_crw(void)
561{
562 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
487 565
488 if (!sei_page) 566 if (!sei_page)
489 return 0; 567 return 0;
490 /* 568 /* Access to sei_page is serialized through machine check handler
491 * build the chsc request block for store event information 569 * thread, so no need for locking. */
492 * and do the call
493 * This function is only called by the machine check handler thread,
494 * so we don't need locking for the sei_page.
495 */
496 sei_area = sei_page; 570 sei_area = sei_page;
497 571
498 CIO_TRACE_EVENT( 2, "prcss"); 572 CIO_TRACE_EVENT( 2, "prcss");
499 ret = 0; 573 ret = 0;
500 do { 574 do {
501 int ccode, status;
502 struct device *dev;
503 memset(sei_area, 0, sizeof(*sei_area)); 575 memset(sei_area, 0, sizeof(*sei_area));
504 memset(&res_data, 0, sizeof(struct res_acc_data));
505 sei_area->request.length = 0x0010; 576 sei_area->request.length = 0x0010;
506 sei_area->request.code = 0x000e; 577 sei_area->request.code = 0x000e;
578 if (chsc(sei_area))
579 break;
507 580
508 ccode = chsc(sei_area); 581 if (sei_area->response.code == 0x0001) {
509 if (ccode > 0) 582 CIO_CRW_EVENT(4, "chsc: sei successful\n");
510 return 0; 583 rc = chsc_process_sei(sei_area);
511 584 if (rc)
512 switch (sei_area->response.code) { 585 ret = rc;
513 /* for debug purposes, check for problems */ 586 } else {
514 case 0x0001: 587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
515 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
516 "successfully stored\n");
517 break; /* everything ok */
518 case 0x0002:
519 CIO_CRW_EVENT(2,
520 "chsc_process_crw: invalid command!\n");
521 return 0;
522 case 0x0003:
523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
524 "request block!\n");
525 return 0;
526 case 0x0005:
527 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
528 "information stored\n");
529 return 0;
530 default:
531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
532 sei_area->response.code); 588 sei_area->response.code);
533 return 0; 589 ret = 0;
534 }
535
536 /* Check if we might have lost some information. */
537 if (sei_area->flags & 0x40)
538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
539 "has been lost due to overflow!\n");
540
541 if (sei_area->rs != 4) {
542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
543 "(%04X) isn't a chpid!\n",
544 sei_area->rsid);
545 continue;
546 }
547
548 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 CIO_CRW_EVENT(4, "chsc_process_crw: "
552 "channel subsystem reports link incident,"
553 " reporting source is chpid %x\n",
554 sei_area->rsid);
555 chpid = __get_chpid_from_lir(sei_area->ccdf);
556 if (chpid < 0)
557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
558 __FUNCTION__);
559 else
560 s390_set_chpid_offline(chpid);
561 break;
562
563 case 2: /* i/o resource accessibiliy */
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports some I/O "
566 "devices may have become accessible\n");
567 pr_debug("Data received after sei: \n");
568 pr_debug("Validity flags: %x\n", sei_area->vf);
569
570 /* allocate a new channel path structure, if needed */
571 status = get_chp_status(sei_area->rsid);
572 if (status < 0)
573 new_channel_path(sei_area->rsid);
574 else if (!status)
575 break;
576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
577 res_data.chp = to_channelpath(dev);
578 pr_debug("chpid: %x", sei_area->rsid);
579 if ((sei_area->vf & 0xc0) != 0) {
580 res_data.fla = sei_area->fla;
581 if ((sei_area->vf & 0xc0) == 0xc0) {
582 pr_debug(" full link addr: %x",
583 sei_area->fla);
584 res_data.fla_mask = 0xffff;
585 } else {
586 pr_debug(" link addr: %x",
587 sei_area->fla);
588 res_data.fla_mask = 0xff00;
589 }
590 }
591 ret = s390_process_res_acc(&res_data);
592 pr_debug("\n\n");
593 put_device(dev);
594 break;
595
596 default: /* other stuff */
597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
598 sei_area->cc);
599 break; 590 break;
600 } 591 }
601 } while (sei_area->flags & 0x80); 592 } while (sei_area->flags & 0x80);
593
602 return ret; 594 return ret;
603} 595}
604 596
605static inline int 597static int
606__chp_add_new_sch(struct subchannel_id schid) 598__chp_add_new_sch(struct subchannel_id schid)
607{ 599{
608 struct schib schib; 600 struct schib schib;
609 int ret; 601 int ret;
610 602
611 if (stsch(schid, &schib)) 603 if (stsch_err(schid, &schib))
612 /* We're through */ 604 /* We're through */
613 return need_rescan ? -EAGAIN : -ENXIO; 605 return need_rescan ? -EAGAIN : -ENXIO;
614 606
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on)
709 return chp_add(chpid); 701 return chp_add(chpid);
710} 702}
711 703
712static inline int check_for_io_on_path(struct subchannel *sch, int index) 704static int check_for_io_on_path(struct subchannel *sch, int index)
713{ 705{
714 int cc; 706 int cc;
715 707
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch)
741 sch->driver->termination(&sch->dev); 733 sch->driver->termination(&sch->dev);
742} 734}
743 735
744static inline void 736static void
745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746{ 738{
747 int chp, old_lpm; 739 int chp, old_lpm;
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = {
967static void 959static void
968chsc_remove_chp_cmg_attr(struct channel_path *chp) 960chsc_remove_chp_cmg_attr(struct channel_path *chp)
969{ 961{
970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
972} 964}
973 965
974static int 966static int
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp)
976{ 968{
977 int ret; 969 int ret;
978 970
979 ret = sysfs_create_bin_file(&chp->dev.kobj, 971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
980 &chp_measurement_chars_attr);
981 if (ret) 972 if (ret)
982 return ret; 973 return ret;
983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
984 if (ret) 975 if (ret)
985 sysfs_remove_bin_file(&chp->dev.kobj, 976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
986 &chp_measurement_chars_attr);
987 return ret; 977 return ret;
988} 978}
989 979
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1042 u32 : 4; 1032 u32 : 4;
1043 u32 fmt : 4; 1033 u32 fmt : 4;
1044 u32 : 16; 1034 u32 : 16;
1045 } *secm_area; 1035 } __attribute__ ((packed)) *secm_area;
1046 int ret, ccode; 1036 int ret, ccode;
1047 1037
1048 secm_area = page; 1038 secm_area = page;
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid,
1253 struct chsc_header response; 1243 struct chsc_header response;
1254 u32 zeroes2; 1244 u32 zeroes2;
1255 struct channel_path_desc desc; 1245 struct channel_path_desc desc;
1256 } *scpd_area; 1246 } __attribute__ ((packed)) *scpd_area;
1257 1247
1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1248 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1259 if (!scpd_area) 1249 if (!scpd_area)
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1350 u32 cmg : 8; 1340 u32 cmg : 8;
1351 u32 zeroes3; 1341 u32 zeroes3;
1352 u32 data[NR_MEASUREMENT_CHARS]; 1342 u32 data[NR_MEASUREMENT_CHARS];
1353 } *scmc_area; 1343 } __attribute__ ((packed)) *scmc_area;
1354 1344
1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1345 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1356 if (!scmc_area) 1346 if (!scmc_area)
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code)
1517 u32 reserved5:4; 1507 u32 reserved5:4;
1518 u32 format2:4; 1508 u32 format2:4;
1519 u32 reserved6:24; 1509 u32 reserved6:24;
1520 } *sda_area; 1510 } __attribute__ ((packed)) *sda_area;
1521 1511
1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1512 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1523 if (!sda_area) 1513 if (!sda_area)
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void)
1569 u32 reserved4; 1559 u32 reserved4;
1570 u32 general_char[510]; 1560 u32 general_char[510];
1571 u32 chsc_char[518]; 1561 u32 chsc_char[518];
1572 } *scsc_area; 1562 } __attribute__ ((packed)) *scsc_area;
1573 1563
1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1564 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1575 if (!scsc_area) { 1565 if (!scsc_area) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index a259245780ae..0fb2b024208f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -10,17 +10,17 @@
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
13}; 13} __attribute__ ((packed));
14 14
15#define NR_MEASUREMENT_CHARS 5 15#define NR_MEASUREMENT_CHARS 5
16struct cmg_chars { 16struct cmg_chars {
17 u32 values[NR_MEASUREMENT_CHARS]; 17 u32 values[NR_MEASUREMENT_CHARS];
18}; 18} __attribute__ ((packed));
19 19
20#define NR_MEASUREMENT_ENTRIES 8 20#define NR_MEASUREMENT_ENTRIES 8
21struct cmg_entry { 21struct cmg_entry {
22 u32 values[NR_MEASUREMENT_ENTRIES]; 22 u32 values[NR_MEASUREMENT_ENTRIES];
23}; 23} __attribute__ ((packed));
24 24
25struct channel_path_desc { 25struct channel_path_desc {
26 u8 flags; 26 u8 flags;
@@ -31,7 +31,7 @@ struct channel_path_desc {
31 u8 zeroes; 31 u8 zeroes;
32 u8 chla; 32 u8 chla;
33 u8 chpp; 33 u8 chpp;
34}; 34} __attribute__ ((packed));
35 35
36struct channel_path { 36struct channel_path {
37 int id; 37 int id;
@@ -47,6 +47,9 @@ struct channel_path {
47extern void s390_process_css( void ); 47extern void s390_process_css( void );
48extern void chsc_validate_chpids(struct subchannel *); 48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int); 49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
50 53
51struct css_general_char { 54struct css_general_char {
52 u64 : 41; 55 u64 : 41;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ae1bf231d089..b3a56dc5f68a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch)
122 * Use tpi to get a pending interrupt, call the interrupt handler and 122 * Use tpi to get a pending interrupt, call the interrupt handler and
123 * return a pointer to the subchannel structure. 123 * return a pointer to the subchannel structure.
124 */ 124 */
125static inline int 125static int
126cio_tpi(void) 126cio_tpi(void)
127{ 127{
128 struct tpi_info *tpi_info; 128 struct tpi_info *tpi_info;
@@ -152,7 +152,7 @@ cio_tpi(void)
152 return 1; 152 return 1;
153} 153}
154 154
155static inline int 155static int
156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
157{ 157{
158 char dbf_text[15]; 158 char dbf_text[15];
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
585 * This device must not be known to Linux. So we simply 585 * This device must not be known to Linux. So we simply
586 * say that there is no device and return ENODEV. 586 * say that there is no device and return ENODEV.
587 */ 587 */
588 CIO_MSG_EVENT(0, "Blacklisted device detected " 588 CIO_MSG_EVENT(4, "Blacklisted device detected "
589 "at devno %04X, subchannel set %x\n", 589 "at devno %04X, subchannel set %x\n",
590 sch->schib.pmcw.dev, sch->schid.ssid); 590 sch->schib.pmcw.dev, sch->schid.ssid);
591 err = -ENODEV; 591 err = -ENODEV;
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs)
646 * Make sure that the i/o interrupt did not "overtake" 646 * Make sure that the i/o interrupt did not "overtake"
647 * the last HZ timer interrupt. 647 * the last HZ timer interrupt.
648 */ 648 */
649 account_ticks(); 649 account_ticks(S390_lowcore.int_clock);
650 /* 650 /*
651 * Get interrupt information from lowcore 651 * Get interrupt information from lowcore
652 */ 652 */
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void)
832} 832}
833 833
834#endif 834#endif
835static inline int 835static int
836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
837{ 837{
838 int retry, cc; 838 int retry, cc;
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
850 return -EBUSY; /* uhm... */ 850 return -EBUSY; /* uhm... */
851} 851}
852 852
853static inline int 853/* we can't use the normal udelay here, since it enables external interrupts */
854
855static void udelay_reset(unsigned long usecs)
856{
857 uint64_t start_cc, end_cc;
858
859 asm volatile ("STCK %0" : "=m" (start_cc));
860 do {
861 cpu_relax();
862 asm volatile ("STCK %0" : "=m" (end_cc));
863 } while (((end_cc - start_cc)/4096) < usecs);
864}
865
866static int
854__clear_subchannel_easy(struct subchannel_id schid) 867__clear_subchannel_easy(struct subchannel_id schid)
855{ 868{
856 int retry; 869 int retry;
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid)
865 if (schid_equal(&ti.schid, &schid)) 878 if (schid_equal(&ti.schid, &schid))
866 return 0; 879 return 0;
867 } 880 }
868 udelay(100); 881 udelay_reset(100);
869 } 882 }
870 return -EBUSY; 883 return -EBUSY;
871} 884}
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
882 int rc; 895 int rc;
883 896
884 pgm_check_occured = 0; 897 pgm_check_occured = 0;
885 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr); 899 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL; 900 s390_base_pgm_handler_fn = NULL;
888 901
889 /* The program check handler could have changed pgm_check_occured */ 902 /* The program check handler could have changed pgm_check_occured. */
890 barrier(); 903 barrier();
891 904
892 if (pgm_check_occured) 905 if (pgm_check_occured)
@@ -944,7 +957,7 @@ static void css_reset(void)
944 /* Reset subchannels. */ 957 /* Reset subchannels. */
945 for_each_subchannel(__shutdown_subchannel_easy, NULL); 958 for_each_subchannel(__shutdown_subchannel_easy, NULL);
946 /* Reset channel paths. */ 959 /* Reset channel paths. */
947 s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; 960 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
948 /* Enable channel report machine checks. */ 961 /* Enable channel report machine checks. */
949 __ctl_set_bit(14, 28); 962 __ctl_set_bit(14, 28);
950 /* Temporarily reenable machine checks. */ 963 /* Temporarily reenable machine checks. */
@@ -969,7 +982,7 @@ static void css_reset(void)
969 local_mcck_disable(); 982 local_mcck_disable();
970 /* Disable channel report machine checks. */ 983 /* Disable channel report machine checks. */
971 __ctl_clear_bit(14, 28); 984 __ctl_clear_bit(14, 28);
972 s390_reset_mcck_handler = NULL; 985 s390_base_mcck_handler_fn = NULL;
973} 986}
974 987
975static struct reset_call css_reset_call = { 988static struct reset_call css_reset_call = {
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 828b2d334f0a..90b22faabbf7 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -519,8 +519,8 @@ struct cmb {
519/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
520 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
521 */ 521 */
522static inline int alloc_cmb_single (struct ccw_device *cdev, 522static int alloc_cmb_single(struct ccw_device *cdev,
523 struct cmb_data *cmb_data) 523 struct cmb_data *cmb_data)
524{ 524{
525 struct cmb *cmb; 525 struct cmb *cmb;
526 struct ccw_device_private *node; 526 struct ccw_device_private *node;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9d6c02446863..fe0ace7aece8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1];
30 30
31int css_characteristics_avail = 0; 31int css_characteristics_avail = 0;
32 32
33inline int 33int
34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35{ 35{
36 struct subchannel_id schid; 36 struct subchannel_id schid;
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev)
108 } 108 }
109} 109}
110 110
111extern int css_get_ssd_info(struct subchannel *sch);
112
113
114int css_sch_device_register(struct subchannel *sch) 111int css_sch_device_register(struct subchannel *sch)
115{ 112{
116 int ret; 113 int ret;
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
187 return dev ? to_subchannel(dev) : NULL; 184 return dev ? to_subchannel(dev) : NULL;
188} 185}
189 186
190static inline int css_get_subchannel_status(struct subchannel *sch) 187static int css_get_subchannel_status(struct subchannel *sch)
191{ 188{
192 struct schib schib; 189 struct schib schib;
193 190
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
299 /* Will be done on the slow path. */ 296 /* Will be done on the slow path. */
300 return -EAGAIN; 297 return -EAGAIN;
301 } 298 }
302 if (stsch(schid, &schib) || !schib.pmcw.dnv) { 299 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) {
303 /* Unusable - ignore. */ 300 /* Unusable - ignore. */
304 return 0; 301 return 0;
305 } 302 }
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused)
417 need_reprobe); 414 need_reprobe);
418} 415}
419 416
420DECLARE_WORK(css_reprobe_work, reprobe_all); 417static DECLARE_WORK(css_reprobe_work, reprobe_all);
421 418
422/* Schedule reprobing of all unregistered subchannels. */ 419/* Schedule reprobing of all unregistered subchannels. */
423void css_schedule_reprobe(void) 420void css_schedule_reprobe(void)
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
578 575
579static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 576static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
580 577
581static inline int __init setup_css(int nr) 578static int __init setup_css(int nr)
582{ 579{
583 u32 tod_high; 580 u32 tod_high;
584 int ret; 581 int ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 3464c5b875c4..ca2bab932a8a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 144extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int);
147extern void css_reiterate_subchannels(void);
146 148
147#define __MAX_SUBCHANNEL 65535 149#define __MAX_SUBCHANNEL 65535
148#define __MAX_SSID 3 150#define __MAX_SSID 3
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 803579053c2f..e322111fb369 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type;
138 138
139static int io_subchannel_probe (struct subchannel *); 139static int io_subchannel_probe (struct subchannel *);
140static int io_subchannel_remove (struct subchannel *); 140static int io_subchannel_remove (struct subchannel *);
141void io_subchannel_irq (struct device *);
142static int io_subchannel_notify(struct device *, int); 141static int io_subchannel_notify(struct device *, int);
143static void io_subchannel_verify(struct device *); 142static void io_subchannel_verify(struct device *);
144static void io_subchannel_ioterm(struct device *); 143static void io_subchannel_ioterm(struct device *);
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
235 ssize_t ret = 0; 234 ssize_t ret = 0;
236 int chp; 235 int chp;
237 236
238 if (ssd) 237 for (chp = 0; chp < 8; chp++)
239 for (chp = 0; chp < 8; chp++) 238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
240 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
241 else
242 ret += sprintf (buf, "n/a");
243 ret += sprintf (buf+ret, "\n"); 239 ret += sprintf (buf+ret, "\n");
244 return min((ssize_t)PAGE_SIZE, ret); 240 return min((ssize_t)PAGE_SIZE, ret);
245} 241}
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = {
552 .attrs = ccwdev_attrs, 548 .attrs = ccwdev_attrs,
553}; 549};
554 550
555static inline int 551static int
556device_add_files (struct device *dev) 552device_add_files (struct device *dev)
557{ 553{
558 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
559} 555}
560 556
561static inline void 557static void
562device_remove_files(struct device *dev) 558device_remove_files(struct device *dev)
563{ 559{
564 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); 560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 29db6341d632..b66338b76579 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
76 76
77void io_subchannel_irq (struct device *pdev);
77void io_subchannel_recog_done(struct ccw_device *cdev); 78void io_subchannel_recog_done(struct ccw_device *cdev);
78 79
79int ccw_device_cancel_halt_clear(struct ccw_device *); 80int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *);
118/* qdio needs this. */ 119/* qdio needs this. */
119void ccw_device_set_timeout(struct ccw_device *, int); 120void ccw_device_set_timeout(struct ccw_device *, int);
120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 121extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
122extern struct bus_type ccw_bus_type;
121 123
122/* Channel measurement facility related */ 124/* Channel measurement facility related */
123void retry_set_schib(struct ccw_device *cdev); 125void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index eed14572fc3b..51238e7555bb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
206 * been varied online on the SE so we have to find out by magic (i. e. driving 206 * been varied online on the SE so we have to find out by magic (i. e. driving
207 * the channel subsystem to device selection and updating our path masks). 207 * the channel subsystem to device selection and updating our path masks).
208 */ 208 */
209static inline void 209static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 210__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 211{
212 int mask, i; 212 int mask, i;
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 put_device (&cdev->dev); 387 put_device (&cdev->dev);
388} 388}
389 389
390static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) 390static int cmp_pgid(struct pgid *p1, struct pgid *p2)
391{ 391{
392 char *c1; 392 char *c1;
393 char *c2; 393 char *c2;
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
842call_handler_unsol: 842call_handler_unsol:
843 if (cdev->handler) 843 if (cdev->handler)
844 cdev->handler (cdev, 0, irb); 844 cdev->handler (cdev, 0, irb);
845 if (cdev->private->flags.doverify)
846 ccw_device_online_verify(cdev, 0);
845 return; 847 return;
846 } 848 }
847 /* Accumulate status and find out if a basic sense is needed. */ 849 /* Accumulate status and find out if a basic sense is needed. */
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
892/* 894/*
893 * Got an interrupt for a basic sense. 895 * Got an interrupt for a basic sense.
894 */ 896 */
895void 897static void
896ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 898ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
897{ 899{
898 struct irb *irb; 900 struct irb *irb;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d269607336ec..d7b25b8f71d2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
302 wake_up(&cdev->private->wait_q); 302 wake_up(&cdev->private->wait_q);
303} 303}
304 304
305static inline int 305static int
306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
307{ 307{
308 int ret; 308 int ret;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index bdcf930f7beb..6b1caea622ea 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -25,7 +25,7 @@
25 * Check for any kind of channel or interface control check but don't 25 * Check for any kind of channel or interface control check but don't
26 * issue the message for the console device 26 * issue the message for the console device
27 */ 27 */
28static inline void 28static void
29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30{ 30{
31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
72/* 72/*
73 * Copy valid bits from the extended control word to device irb. 73 * Copy valid bits from the extended control word to device irb.
74 */ 74 */
75static inline void 75static void
76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) 76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
77{ 77{
78 /* 78 /*
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
94/* 94/*
95 * Check if extended status word is valid. 95 * Check if extended status word is valid.
96 */ 96 */
97static inline int 97static int
98ccw_device_accumulate_esw_valid(struct irb *irb) 98ccw_device_accumulate_esw_valid(struct irb *irb)
99{ 99{
100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb)
109/* 109/*
110 * Copy valid bits from the extended status word to device irb. 110 * Copy valid bits from the extended status word to device irb.
111 */ 111 */
112static inline void 112static void
113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) 113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114{ 114{
115 struct irb *cdev_irb; 115 struct irb *cdev_irb;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 6fd1940842eb..d726cd5777de 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL");
66/******************** HERE WE GO ***********************************/ 66/******************** HERE WE GO ***********************************/
67 67
68static const char version[] = "QDIO base support version 2"; 68static const char version[] = "QDIO base support version 2";
69extern struct bus_type ccw_bus_type;
70 69
71static int qdio_performance_stats = 0; 70static int qdio_performance_stats = 0;
72static int proc_perf_file_registration; 71static int proc_perf_file_registration;
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q)
138} 137}
139 138
140/*check ccq */ 139/*check ccq */
141static inline int 140static int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 141qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{ 142{
144 char dbf_text[15]; 143 char dbf_text[15];
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
153 return -EIO; 152 return -EIO;
154} 153}
155/* EQBS: extract buffer states */ 154/* EQBS: extract buffer states */
156static inline int 155static int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 156qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt) 157 unsigned int *start, unsigned int *cnt)
159{ 158{
@@ -188,7 +187,7 @@ again:
188} 187}
189 188
190/* SQBS: set buffer states */ 189/* SQBS: set buffer states */
191static inline int 190static int
192qdio_do_sqbs(struct qdio_q *q, unsigned char state, 191qdio_do_sqbs(struct qdio_q *q, unsigned char state,
193 unsigned int *start, unsigned int *cnt) 192 unsigned int *start, unsigned int *cnt)
194{ 193{
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 314 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
316 * an access exception 315 * an access exception
317 */ 316 */
318static inline int 317static int
319qdio_siga_output(struct qdio_q *q) 318qdio_siga_output(struct qdio_q *q)
320{ 319{
321 int cc; 320 int cc;
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q)
349 return cc; 348 return cc;
350} 349}
351 350
352static inline int 351static int
353qdio_siga_input(struct qdio_q *q) 352qdio_siga_input(struct qdio_q *q)
354{ 353{
355 int cc; 354 int cc;
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void)
421 tasklet_hi_schedule(&tiqdio_tasklet); 420 tasklet_hi_schedule(&tiqdio_tasklet);
422} 421}
423 422
424static inline void 423static void
425qdio_mark_tiq(struct qdio_q *q) 424qdio_mark_tiq(struct qdio_q *q)
426{ 425{
427 unsigned long flags; 426 unsigned long flags;
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q)
471 tasklet_schedule(&q->tasklet); 470 tasklet_schedule(&q->tasklet);
472} 471}
473 472
474static inline int 473static int
475qdio_stop_polling(struct qdio_q *q) 474qdio_stop_polling(struct qdio_q *q)
476{ 475{
477#ifdef QDIO_USE_PROCESSING_STATE 476#ifdef QDIO_USE_PROCESSING_STATE
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q)
525 * sophisticated locking outside of unmark_q, so that we don't need to 524 * sophisticated locking outside of unmark_q, so that we don't need to
526 * disable the interrupts :-) 525 * disable the interrupts :-)
527*/ 526*/
528static inline void 527static void
529qdio_unmark_q(struct qdio_q *q) 528qdio_unmark_q(struct qdio_q *q)
530{ 529{
531 unsigned long flags; 530 unsigned long flags;
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
691 return q->first_to_check; 690 return q->first_to_check;
692} 691}
693 692
694static inline int 693static int
695qdio_get_outbound_buffer_frontier(struct qdio_q *q) 694qdio_get_outbound_buffer_frontier(struct qdio_q *q)
696{ 695{
697 struct qdio_irq *irq; 696 struct qdio_irq *irq;
@@ -774,7 +773,7 @@ out:
774} 773}
775 774
776/* all buffers are processed */ 775/* all buffers are processed */
777static inline int 776static int
778qdio_is_outbound_q_done(struct qdio_q *q) 777qdio_is_outbound_q_done(struct qdio_q *q)
779{ 778{
780 int no_used; 779 int no_used;
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q)
796 return (no_used==0); 795 return (no_used==0);
797} 796}
798 797
799static inline int 798static int
800qdio_has_outbound_q_moved(struct qdio_q *q) 799qdio_has_outbound_q_moved(struct qdio_q *q)
801{ 800{
802 int i; 801 int i;
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q)
816 } 815 }
817} 816}
818 817
819static inline void 818static void
820qdio_kick_outbound_q(struct qdio_q *q) 819qdio_kick_outbound_q(struct qdio_q *q)
821{ 820{
822 int result; 821 int result;
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q)
905 } 904 }
906} 905}
907 906
908static inline void 907static void
909qdio_kick_outbound_handler(struct qdio_q *q) 908qdio_kick_outbound_handler(struct qdio_q *q)
910{ 909{
911 int start, end, real_end, count; 910 int start, end, real_end, count;
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q)
942 q->error_status_flags=0; 941 q->error_status_flags=0;
943} 942}
944 943
945static inline void 944static void
946__qdio_outbound_processing(struct qdio_q *q) 945__qdio_outbound_processing(struct qdio_q *q)
947{ 946{
948 int siga_attempts; 947 int siga_attempts;
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q)
1002/************************* INBOUND ROUTINES *******************************/ 1001/************************* INBOUND ROUTINES *******************************/
1003 1002
1004 1003
1005static inline int 1004static int
1006qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1005qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1007{ 1006{
1008 struct qdio_irq *irq; 1007 struct qdio_irq *irq;
@@ -1133,7 +1132,7 @@ out:
1133 return q->first_to_check; 1132 return q->first_to_check;
1134} 1133}
1135 1134
1136static inline int 1135static int
1137qdio_has_inbound_q_moved(struct qdio_q *q) 1136qdio_has_inbound_q_moved(struct qdio_q *q)
1138{ 1137{
1139 int i; 1138 int i;
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1167} 1166}
1168 1167
1169/* means, no more buffers to be filled */ 1168/* means, no more buffers to be filled */
1170static inline int 1169static int
1171tiqdio_is_inbound_q_done(struct qdio_q *q) 1170tiqdio_is_inbound_q_done(struct qdio_q *q)
1172{ 1171{
1173 int no_used; 1172 int no_used;
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1228 return 0; 1227 return 0;
1229} 1228}
1230 1229
1231static inline int 1230static int
1232qdio_is_inbound_q_done(struct qdio_q *q) 1231qdio_is_inbound_q_done(struct qdio_q *q)
1233{ 1232{
1234 int no_used; 1233 int no_used;
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q)
1296 } 1295 }
1297} 1296}
1298 1297
1299static inline void 1298static void
1300qdio_kick_inbound_handler(struct qdio_q *q) 1299qdio_kick_inbound_handler(struct qdio_q *q)
1301{ 1300{
1302 int count, start, end, real_end, i; 1301 int count, start, end, real_end, i;
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1343 } 1342 }
1344} 1343}
1345 1344
1346static inline void 1345static void
1347__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) 1346__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1348{ 1347{
1349 struct qdio_irq *irq_ptr; 1348 struct qdio_irq *irq_ptr;
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q)
1442 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); 1441 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1443} 1442}
1444 1443
1445static inline void 1444static void
1446__qdio_inbound_processing(struct qdio_q *q) 1445__qdio_inbound_processing(struct qdio_q *q)
1447{ 1446{
1448 int q_laps=0; 1447 int q_laps=0;
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q)
1493/************************* MAIN ROUTINES *******************************/ 1492/************************* MAIN ROUTINES *******************************/
1494 1493
1495#ifdef QDIO_USE_PROCESSING_STATE 1494#ifdef QDIO_USE_PROCESSING_STATE
1496static inline int 1495static int
1497tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) 1496tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1498{ 1497{
1499 if (!q) { 1498 if (!q) {
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1545} 1544}
1546#endif /* QDIO_USE_PROCESSING_STATE */ 1545#endif /* QDIO_USE_PROCESSING_STATE */
1547 1546
1548static inline void 1547static void
1549tiqdio_inbound_checks(void) 1548tiqdio_inbound_checks(void)
1550{ 1549{
1551 struct qdio_q *q; 1550 struct qdio_q *q;
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1949 mb(); 1948 mb();
1950} 1949}
1951 1950
1952static inline void 1951static void
1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) 1952qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1954{ 1953{
1955 char dbf_text[15]; 1954 char dbf_text[15];
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1966 1965
1967} 1966}
1968 1967
1969static inline void 1968static void
1970qdio_handle_pci(struct qdio_irq *irq_ptr) 1969qdio_handle_pci(struct qdio_irq *irq_ptr)
1971{ 1970{
1972 int i; 1971 int i;
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
2002 2001
2003static void qdio_establish_handle_irq(struct ccw_device*, int, int); 2002static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2004 2003
2005static inline void 2004static void
2006qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, 2005qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2007 int cstat, int dstat) 2006 int cstat, int dstat)
2008{ 2007{
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2229 return cc; 2228 return cc;
2230} 2229}
2231 2230
2232static inline void 2231static void
2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2232qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token) 2233 unsigned long token)
2235{ 2234{
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev)
2740 return 0; 2739 return 0;
2741} 2740}
2742 2741
2743static inline void 2742static void
2744qdio_allocate_do_dbf(struct qdio_initialize *init_data) 2743qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2745{ 2744{
2746 char dbf_text[20]; /* if a printf printed out more than 8 chars */ 2745 char dbf_text[20]; /* if a printf printed out more than 8 chars */
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2773 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); 2772 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2774} 2773}
2775 2774
2776static inline void 2775static void
2777qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) 2776qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2778{ 2777{
2779 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; 2778 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2792 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; 2791 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2793} 2792}
2794 2793
2795static inline void 2794static void
2796qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, 2795qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2797 int j, int iqfmt) 2796 int j, int iqfmt)
2798{ 2797{
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2813} 2812}
2814 2813
2815 2814
2816static inline void 2815static void
2817qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) 2816qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2818{ 2817{
2819 int i; 2818 int i;
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2839 } 2838 }
2840} 2839}
2841 2840
2842static inline void 2841static void
2843qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) 2842qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2844{ 2843{
2845 int i; 2844 int i;
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2865 } 2864 }
2866} 2865}
2867 2866
2868static inline int 2867static int
2869qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, 2868qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2870 int dstat) 2869 int dstat)
2871{ 2870{
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data)
3014 return 0; 3013 return 0;
3015} 3014}
3016 3015
3017int qdio_fill_irq(struct qdio_initialize *init_data) 3016static int qdio_fill_irq(struct qdio_initialize *init_data)
3018{ 3017{
3019 int i; 3018 int i;
3020 char dbf_text[15]; 3019 char dbf_text[15];
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3367} 3366}
3368 3367
3369/* buffers filled forwards again to make Rick happy */ 3368/* buffers filled forwards again to make Rick happy */
3370static inline void 3369static void
3371qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3370qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3372 unsigned int count, struct qdio_buffer *buffers) 3371 unsigned int count, struct qdio_buffer *buffers)
3373{ 3372{
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3386 } 3385 }
3387} 3386}
3388 3387
3389static inline void 3388static void
3390qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3389qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3391 unsigned int count, struct qdio_buffer *buffers) 3390 unsigned int count, struct qdio_buffer *buffers)
3392{ 3391{
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3407 } 3406 }
3408} 3407}
3409 3408
3410static inline void 3409static void
3411do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, 3410do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3412 unsigned int qidx, unsigned int count, 3411 unsigned int qidx, unsigned int count,
3413 struct qdio_buffer *buffers) 3412 struct qdio_buffer *buffers)
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3443 qdio_mark_q(q); 3442 qdio_mark_q(q);
3444} 3443}
3445 3444
3446static inline void 3445static void
3447do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, 3446do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3448 unsigned int qidx, unsigned int count, 3447 unsigned int qidx, unsigned int count,
3449 struct qdio_buffer *buffers) 3448 struct qdio_buffer *buffers)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 81b5899f4010..c7d1355237b6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev)
465 * Flush all requests from the request/pending queue of an AP device. 465 * Flush all requests from the request/pending queue of an AP device.
466 * @ap_dev: pointer to the AP device. 466 * @ap_dev: pointer to the AP device.
467 */ 467 */
468static inline void __ap_flush_queue(struct ap_device *ap_dev) 468static void __ap_flush_queue(struct ap_device *ap_dev)
469{ 469{
470 struct ap_message *ap_msg, *next; 470 struct ap_message *ap_msg, *next;
471 471
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
587/** 587/**
588 * Pick one of the 16 ap domains. 588 * Pick one of the 16 ap domains.
589 */ 589 */
590static inline int ap_select_domain(void) 590static int ap_select_domain(void)
591{ 591{
592 int queue_depth, device_type, count, max_count, best_domain; 592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j; 593 int rc, i, j;
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void)
825 * required, bit 2^1 is set if the poll timer needs to get armed 825 * required, bit 2^1 is set if the poll timer needs to get armed
826 * Returns 0 if the device is still present, -ENODEV if not. 826 * Returns 0 if the device is still present, -ENODEV if not.
827 */ 827 */
828static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 828static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
829{ 829{
830 struct ap_queue_status status; 830 struct ap_queue_status status;
831 struct ap_message *ap_msg; 831 struct ap_message *ap_msg;
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
872 * required, bit 2^1 is set if the poll timer needs to get armed 872 * required, bit 2^1 is set if the poll timer needs to get armed
873 * Returns 0 if the device is still present, -ENODEV if not. 873 * Returns 0 if the device is still present, -ENODEV if not.
874 */ 874 */
875static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 875static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
876{ 876{
877 struct ap_queue_status status; 877 struct ap_queue_status status;
878 struct ap_message *ap_msg; 878 struct ap_message *ap_msg;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1edc10a7a6f2..b9e59bc9435a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
791 return rc; 791 return rc;
792} 792}
793 793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 794static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg) 795 unsigned long arg)
796{ 796{
797 if (cmd == ICARSAMODEXPO) 797 if (cmd == ICARSAMODEXPO)
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = {
833 */ 833 */
834static struct proc_dir_entry *zcrypt_entry; 834static struct proc_dir_entry *zcrypt_entry;
835 835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, 836static int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len) 837 unsigned int len)
838{ 838{
839 int hl, i; 839 int hl, i;
840 840
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
845 return hl; 845 return hl;
846} 846}
847 847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, 848static int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len) 849 unsigned int len)
850{ 850{
851 int hl, inl, c, cx; 851 int hl, inl, c, cx;
852 852
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
865 return hl; 865 return hl;
866} 866}
867 867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr, 868static int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len) 869 unsigned char *addr, unsigned int len)
870{ 870{
871 int hl, inl, r, rx; 871 int hl, inl, r, rx;
872 872
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
885 return hl; 885 return hl;
886} 886}
887 887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, 888static int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len) 889 unsigned int *array, unsigned int len)
890{ 890{
891 int hl, r; 891 int hl, r;
892 892
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
943 zcrypt_qdepth_mask(workarea); 943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts", 944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES); 945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea); 946 zcrypt_perdev_reqcnt((int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts", 947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1; 949 *eof = 1;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 32e37014345c..818ffe05ac00 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
191 * 191 *
192 * Returns 0 on success or -EFAULT. 192 * Returns 0 on success or -EFAULT.
193 */ 193 */
194static inline int convert_type84(struct zcrypt_device *zdev, 194static int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply, 195 struct ap_message *reply,
196 char __user *outputdata, 196 char __user *outputdata,
197 unsigned int outputdatalength) 197 unsigned int outputdatalength)
198{ 198{
199 struct type84_hdr *t84h = reply->message; 199 struct type84_hdr *t84h = reply->message;
200 char *data; 200 char *data;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index b7153c1e15cd..252443b6bd1b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -709,7 +709,8 @@ out_free:
709 * PCIXCC/CEX2C device to the request distributor 709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer 710 * @xcRB: pointer to the send_cprb request buffer
711 */ 711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) 712static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
713 struct ica_xcRB *xcRB)
713{ 714{
714 struct ap_message ap_msg; 715 struct ap_message ap_msg;
715 struct response_type resp_type = { 716 struct response_type resp_type = {
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 52625153a4f0..f98fa465df0a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -22,13 +22,6 @@ config CTC
22 available. This option is also available as a module which will be 22 available. This option is also available as a module which will be
23 called ctc.ko. If you do not know what it is, it's safe to say "Y". 23 called ctc.ko. If you do not know what it is, it's safe to say "Y".
24 24
25config IUCV
26 tristate "IUCV support (VM only)"
27 help
28 Select this option if you want to use inter-user communication
29 under VM or VIF. If unsure, say "Y" to enable a fast communication
30 link between VM guests.
31
32config NETIUCV 25config NETIUCV
33 tristate "IUCV network device support (VM only)" 26 tristate "IUCV network device support (VM only)"
34 depends on IUCV && NETDEVICES 27 depends on IUCV && NETDEVICES
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 4777e36a922f..bbe3ab2e93d9 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -4,7 +4,6 @@
4 4
5ctc-objs := ctcmain.o ctcdbug.o 5ctc-objs := ctcmain.o ctcdbug.o
6 6
7obj-$(CONFIG_IUCV) += iucv.o
8obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
9obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
10obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o 9obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 95f4e105cb96..7809a79feec7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL");
121#define DEBUG 121#define DEBUG
122#endif 122#endif
123 123
124 char debug_buffer[255]; 124static char debug_buffer[255];
125/** 125/**
126 * Debug Facility Stuff 126 * Debug Facility Stuff
127 */ 127 */
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch );
223/* Functions */ 223/* Functions */
224static int add_claw_reads(struct net_device *dev, 224static int add_claw_reads(struct net_device *dev,
225 struct ccwbk* p_first, struct ccwbk* p_last); 225 struct ccwbk* p_first, struct ccwbk* p_last);
226static void inline ccw_check_return_code (struct ccw_device *cdev, 226static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
227 int return_code); 227static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
228static void inline ccw_check_unit_check (struct chbk * p_ch,
229 unsigned char sense );
230static int find_link(struct net_device *dev, char *host_name, char *ws_name ); 228static int find_link(struct net_device *dev, char *host_name, char *ws_name );
231static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); 229static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
232static int init_ccw_bk(struct net_device *dev); 230static int init_ccw_bk(struct net_device *dev);
233static void probe_error( struct ccwgroup_device *cgdev); 231static void probe_error( struct ccwgroup_device *cgdev);
234static struct net_device_stats *claw_stats(struct net_device *dev); 232static struct net_device_stats *claw_stats(struct net_device *dev);
235static int inline pages_to_order_of_mag(int num_of_pages); 233static int pages_to_order_of_mag(int num_of_pages);
236static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); 234static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
237#ifdef DEBUG 235#ifdef DEBUG
238static void dumpit (char *buf, int len); 236static void dumpit (char *buf, int len);
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch )
1310* of magnitude get_free_pages() has an upper order of 9 * 1308* of magnitude get_free_pages() has an upper order of 9 *
1311*--------------------------------------------------------------------*/ 1309*--------------------------------------------------------------------*/
1312 1310
1313static int inline 1311static int
1314pages_to_order_of_mag(int num_of_pages) 1312pages_to_order_of_mag(int num_of_pages)
1315{ 1313{
1316 int order_of_mag=1; /* assume 2 pages */ 1314 int order_of_mag=1; /* assume 2 pages */
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1482 * * 1480 * *
1483 *-------------------------------------------------------------------*/ 1481 *-------------------------------------------------------------------*/
1484 1482
1485static void inline 1483static void
1486ccw_check_return_code(struct ccw_device *cdev, int return_code) 1484ccw_check_return_code(struct ccw_device *cdev, int return_code)
1487{ 1485{
1488#ifdef FUNCTRACE 1486#ifdef FUNCTRACE
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1529* ccw_check_unit_check * 1527* ccw_check_unit_check *
1530*--------------------------------------------------------------------*/ 1528*--------------------------------------------------------------------*/
1531 1529
1532static void inline 1530static void
1533ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) 1531ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1534{ 1532{
1535 struct net_device *dev = p_ch->ndev; 1533 struct net_device *dev = p_ch->ndev;
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 03cc263fe0da..5a84fbbc6611 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset)
369 * @param ch The channel where this skb has been received. 369 * @param ch The channel where this skb has been received.
370 * @param pskb The received skb. 370 * @param pskb The received skb.
371 */ 371 */
372static __inline__ void 372static void
373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374{ 374{
375 struct net_device *dev = ch->netdev; 375 struct net_device *dev = ch->netdev;
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
512 * @param ch The channel, the error belongs to. 512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect. 513 * @param return_code The error code to inspect.
514 */ 514 */
515static void inline 515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg) 516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{ 517{
518 DBF_TEXT(trace, 5, __FUNCTION__); 518 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg)
547 * @param ch The channel, the sense code belongs to. 547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect. 548 * @param sense The sense code to inspect.
549 */ 549 */
550static void inline 550static void
551ccw_unit_check(struct channel *ch, unsigned char sense) 551ccw_unit_check(struct channel *ch, unsigned char sense)
552{ 552{
553 DBF_TEXT(trace, 5, __FUNCTION__); 553 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
603 } 603 }
604} 604}
605 605
606static __inline__ int 606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn) 607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{ 608{
609 DBF_TEXT(trace, 5, __FUNCTION__); 609 DBF_TEXT(trace, 5, __FUNCTION__);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index e965f03a7291..76728ae4b843 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = {
57 57
58static struct ccw_driver cu3088_driver; 58static struct ccw_driver cu3088_driver;
59 59
60struct device *cu3088_root_dev; 60static struct device *cu3088_root_dev;
61 61
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
deleted file mode 100644
index 229aeb5fc399..000000000000
--- a/drivers/s390/net/iucv.c
+++ /dev/null
@@ -1,2540 +0,0 @@
1/*
2 * IUCV network driver
3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s):
6 * Original source:
7 * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 * 2Gb awareness and general cleanup:
10 * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
11 *
12 * Documentation used:
13 * The original source
14 * CP Programming Service, IBM document # SC24-5760
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2, or (at your option)
19 * any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 *
30 */
31
32/* #define DEBUG */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36
37#include <linux/spinlock.h>
38#include <linux/kernel.h>
39#include <linux/slab.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/list.h>
43#include <linux/errno.h>
44#include <linux/err.h>
45#include <linux/device.h>
46#include <asm/atomic.h>
47#include "iucv.h"
48#include <asm/io.h>
49#include <asm/s390_ext.h>
50#include <asm/ebcdic.h>
51#include <asm/smp.h>
52#include <asm/s390_rdev.h>
53
54/* FLAGS:
55 * All flags are defined in the field IPFLAGS1 of each function
56 * and can be found in CP Programming Services.
57 * IPSRCCLS - Indicates you have specified a source class
58 * IPFGMCL - Indicates you have specified a target class
59 * IPFGPID - Indicates you have specified a pathid
60 * IPFGMID - Indicates you have specified a message ID
61 * IPANSLST - Indicates that you are using an address list for
62 * reply data
63 * IPBUFLST - Indicates that you are using an address list for
64 * message data
65 */
66
67#define IPSRCCLS 0x01
68#define IPFGMCL 0x01
69#define IPFGPID 0x02
70#define IPFGMID 0x04
71#define IPANSLST 0x08
72#define IPBUFLST 0x40
73
74static int
75iucv_bus_match (struct device *dev, struct device_driver *drv)
76{
77 return 0;
78}
79
80struct bus_type iucv_bus = {
81 .name = "iucv",
82 .match = iucv_bus_match,
83};
84
85struct device *iucv_root;
86
87/* General IUCV interrupt structure */
88typedef struct {
89 __u16 ippathid;
90 __u8 res1;
91 __u8 iptype;
92 __u32 res2;
93 __u8 ipvmid[8];
94 __u8 res3[24];
95} iucv_GeneralInterrupt;
96
97static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL;
98
99/* Spin Lock declaration */
100
101static DEFINE_SPINLOCK(iucv_lock);
102
103static int messagesDisabled = 0;
104
105/***************INTERRUPT HANDLING ***************/
106
107typedef struct {
108 struct list_head queue;
109 iucv_GeneralInterrupt data;
110} iucv_irqdata;
111
112static struct list_head iucv_irq_queue;
113static DEFINE_SPINLOCK(iucv_irq_queue_lock);
114
115/*
116 *Internal function prototypes
117 */
118static void iucv_tasklet_handler(unsigned long);
119static void iucv_irq_handler(__u16);
120
121static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
122
123/************ FUNCTION ID'S ****************************/
124
125#define ACCEPT 10
126#define CONNECT 11
127#define DECLARE_BUFFER 12
128#define PURGE 9
129#define QUERY 0
130#define QUIESCE 13
131#define RECEIVE 5
132#define REJECT 8
133#define REPLY 6
134#define RESUME 14
135#define RETRIEVE_BUFFER 2
136#define SEND 4
137#define SETMASK 16
138#define SEVER 15
139
140/**
141 * Structure: handler
142 * members: list - list management.
143 * structure: id
144 * userid - 8 char array of machine identification
145 * user_data - 16 char array for user identification
146 * mask - 24 char array used to compare the 2 previous
147 * interrupt_table - vector of interrupt functions.
148 * pgm_data - ulong, application data that is passed
149 * to the interrupt handlers
150*/
151typedef struct handler_t {
152 struct list_head list;
153 struct {
154 __u8 userid[8];
155 __u8 user_data[16];
156 __u8 mask[24];
157 } id;
158 iucv_interrupt_ops_t *interrupt_table;
159 void *pgm_data;
160} handler;
161
162/**
163 * iucv_handler_table: List of registered handlers.
164 */
165static struct list_head iucv_handler_table;
166
167/**
168 * iucv_pathid_table: an array of *handler pointing into
169 * iucv_handler_table for fast indexing by pathid;
170 */
171static handler **iucv_pathid_table;
172
173static unsigned long max_connections;
174
175/**
176 * iucv_cpuid: contains the logical cpu number of the cpu which
177 * has declared the iucv buffer by issuing DECLARE_BUFFER.
178 * If no cpu has done the initialization iucv_cpuid contains -1.
179 */
180static int iucv_cpuid = -1;
181/**
182 * register_flag: is 0 when external interrupt has not been registered
183 */
184static int register_flag;
185
186/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/
187/* Data struct 1: iparml_control
188 * Used for iucv_accept
189 * iucv_connect
190 * iucv_quiesce
191 * iucv_resume
192 * iucv_sever
193 * iucv_retrieve_buffer
194 * Data struct 2: iparml_dpl (data in parameter list)
195 * Used for iucv_send_prmmsg
196 * iucv_send2way_prmmsg
197 * iucv_send2way_prmmsg_array
198 * iucv_reply_prmmsg
199 * Data struct 3: iparml_db (data in a buffer)
200 * Used for iucv_receive
201 * iucv_receive_array
202 * iucv_reject
203 * iucv_reply
204 * iucv_reply_array
205 * iucv_send
206 * iucv_send_array
207 * iucv_send2way
208 * iucv_send2way_array
209 * iucv_declare_buffer
210 * Data struct 4: iparml_purge
211 * Used for iucv_purge
212 * iucv_query
213 * Data struct 5: iparml_set_mask
214 * Used for iucv_set_mask
215 */
216
217typedef struct {
218 __u16 ippathid;
219 __u8 ipflags1;
220 __u8 iprcode;
221 __u16 ipmsglim;
222 __u16 res1;
223 __u8 ipvmid[8];
224 __u8 ipuser[16];
225 __u8 iptarget[8];
226} iparml_control;
227
228typedef struct {
229 __u16 ippathid;
230 __u8 ipflags1;
231 __u8 iprcode;
232 __u32 ipmsgid;
233 __u32 iptrgcls;
234 __u8 iprmmsg[8];
235 __u32 ipsrccls;
236 __u32 ipmsgtag;
237 __u32 ipbfadr2;
238 __u32 ipbfln2f;
239 __u32 res;
240} iparml_dpl;
241
242typedef struct {
243 __u16 ippathid;
244 __u8 ipflags1;
245 __u8 iprcode;
246 __u32 ipmsgid;
247 __u32 iptrgcls;
248 __u32 ipbfadr1;
249 __u32 ipbfln1f;
250 __u32 ipsrccls;
251 __u32 ipmsgtag;
252 __u32 ipbfadr2;
253 __u32 ipbfln2f;
254 __u32 res;
255} iparml_db;
256
257typedef struct {
258 __u16 ippathid;
259 __u8 ipflags1;
260 __u8 iprcode;
261 __u32 ipmsgid;
262 __u8 ipaudit[3];
263 __u8 res1[5];
264 __u32 res2;
265 __u32 ipsrccls;
266 __u32 ipmsgtag;
267 __u32 res3[3];
268} iparml_purge;
269
270typedef struct {
271 __u8 ipmask;
272 __u8 res1[2];
273 __u8 iprcode;
274 __u32 res2[9];
275} iparml_set_mask;
276
277typedef struct {
278 union {
279 iparml_control p_ctrl;
280 iparml_dpl p_dpl;
281 iparml_db p_db;
282 iparml_purge p_purge;
283 iparml_set_mask p_set_mask;
284 } param;
285 atomic_t in_use;
286 __u32 res;
287} __attribute__ ((aligned(8))) iucv_param;
288#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param))
289
290static iucv_param * iucv_param_pool;
291
292MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
293MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
294MODULE_LICENSE("GPL");
295
296/*
297 * Debugging stuff
298 *******************************************************************************/
299
300
301#ifdef DEBUG
302static int debuglevel = 0;
303
304module_param(debuglevel, int, 0);
305MODULE_PARM_DESC(debuglevel,
306 "Specifies the debug level (0=off ... 3=all)");
307
308static void
309iucv_dumpit(char *title, void *buf, int len)
310{
311 int i;
312 __u8 *p = (__u8 *)buf;
313
314 if (debuglevel < 3)
315 return;
316
317 printk(KERN_DEBUG "%s\n", title);
318 printk(" ");
319 for (i = 0; i < len; i++) {
320 if (!(i % 16) && i != 0)
321 printk ("\n ");
322 else if (!(i % 4) && i != 0)
323 printk(" ");
324 printk("%02X", *p++);
325 }
326 if (len % 16)
327 printk ("\n");
328 return;
329}
330#define iucv_debug(lvl, fmt, args...) \
331do { \
332 if (debuglevel >= lvl) \
333 printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \
334} while (0)
335
336#else
337
338#define iucv_debug(lvl, fmt, args...) do { } while (0)
339#define iucv_dumpit(title, buf, len) do { } while (0)
340
341#endif
342
343/*
344 * Internal functions
345 *******************************************************************************/
346
347/**
348 * print start banner
349 */
350static void
351iucv_banner(void)
352{
353 printk(KERN_INFO "IUCV lowlevel driver initialized\n");
354}
355
356/**
357 * iucv_init - Initialization
358 *
359 * Allocates and initializes various data structures.
360 */
361static int
362iucv_init(void)
363{
364 int ret;
365
366 if (iucv_external_int_buffer)
367 return 0;
368
369 if (!MACHINE_IS_VM) {
370 printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n");
371 return -EPROTONOSUPPORT;
372 }
373
374 ret = bus_register(&iucv_bus);
375 if (ret) {
376 printk(KERN_ERR "IUCV: failed to register bus.\n");
377 return ret;
378 }
379
380 iucv_root = s390_root_dev_register("iucv");
381 if (IS_ERR(iucv_root)) {
382 printk(KERN_ERR "IUCV: failed to register iucv root.\n");
383 bus_unregister(&iucv_bus);
384 return PTR_ERR(iucv_root);
385 }
386
387 /* Note: GFP_DMA used used to get memory below 2G */
388 iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt),
389 GFP_KERNEL|GFP_DMA);
390 if (!iucv_external_int_buffer) {
391 printk(KERN_WARNING
392 "%s: Could not allocate external interrupt buffer\n",
393 __FUNCTION__);
394 s390_root_dev_unregister(iucv_root);
395 bus_unregister(&iucv_bus);
396 return -ENOMEM;
397 }
398
399 /* Initialize parameter pool */
400 iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE,
401 GFP_KERNEL|GFP_DMA);
402 if (!iucv_param_pool) {
403 printk(KERN_WARNING "%s: Could not allocate param pool\n",
404 __FUNCTION__);
405 kfree(iucv_external_int_buffer);
406 iucv_external_int_buffer = NULL;
407 s390_root_dev_unregister(iucv_root);
408 bus_unregister(&iucv_bus);
409 return -ENOMEM;
410 }
411
412 /* Initialize irq queue */
413 INIT_LIST_HEAD(&iucv_irq_queue);
414
415 /* Initialize handler table */
416 INIT_LIST_HEAD(&iucv_handler_table);
417
418 iucv_banner();
419 return 0;
420}
421
422/**
423 * iucv_exit - De-Initialization
424 *
425 * Frees everything allocated from iucv_init.
426 */
427static int iucv_retrieve_buffer (void);
428
429static void
430iucv_exit(void)
431{
432 iucv_retrieve_buffer();
433 kfree(iucv_external_int_buffer);
434 iucv_external_int_buffer = NULL;
435 kfree(iucv_param_pool);
436 iucv_param_pool = NULL;
437 s390_root_dev_unregister(iucv_root);
438 bus_unregister(&iucv_bus);
439 printk(KERN_INFO "IUCV lowlevel driver unloaded\n");
440}
441
442/**
443 * grab_param: - Get a parameter buffer from the pre-allocated pool.
444 *
445 * This function searches for an unused element in the pre-allocated pool
446 * of parameter buffers. If one is found, it marks it "in use" and returns
447 * a pointer to it. The calling function is responsible for releasing it
448 * when it has finished its usage.
449 *
450 * Returns: A pointer to iucv_param.
451 */
452static __inline__ iucv_param *
453grab_param(void)
454{
455 iucv_param *ptr;
456 static int hint = 0;
457
458 ptr = iucv_param_pool + hint;
459 do {
460 ptr++;
461 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
462 ptr = iucv_param_pool;
463 } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
464 hint = ptr - iucv_param_pool;
465
466 memset(&ptr->param, 0, sizeof(ptr->param));
467 return ptr;
468}
469
470/**
471 * release_param - Release a parameter buffer.
472 * @p: A pointer to a struct iucv_param, previously obtained by calling
473 * grab_param().
474 *
475 * This function marks the specified parameter buffer "unused".
476 */
477static __inline__ void
478release_param(void *p)
479{
480 atomic_set(&((iucv_param *)p)->in_use, 0);
481}
482
483/**
484 * iucv_add_handler: - Add a new handler
485 * @new_handler: handle that is being entered into chain.
486 *
487 * Places new handle on iucv_handler_table, if identical handler is not
488 * found.
489 *
490 * Returns: 0 on success, !0 on failure (handler already in chain).
491 */
492static int
493iucv_add_handler (handler *new)
494{
495 ulong flags;
496
497 iucv_debug(1, "entering");
498 iucv_dumpit("handler:", new, sizeof(handler));
499
500 spin_lock_irqsave (&iucv_lock, flags);
501 if (!list_empty(&iucv_handler_table)) {
502 struct list_head *lh;
503
504 /**
505 * Search list for handler with identical id. If one
506 * is found, the new handler is _not_ added.
507 */
508 list_for_each(lh, &iucv_handler_table) {
509 handler *h = list_entry(lh, handler, list);
510 if (!memcmp(&new->id, &h->id, sizeof(h->id))) {
511 iucv_debug(1, "ret 1");
512 spin_unlock_irqrestore (&iucv_lock, flags);
513 return 1;
514 }
515 }
516 }
517 /**
518 * If we get here, no handler was found.
519 */
520 INIT_LIST_HEAD(&new->list);
521 list_add(&new->list, &iucv_handler_table);
522 spin_unlock_irqrestore (&iucv_lock, flags);
523
524 iucv_debug(1, "exiting");
525 return 0;
526}
527
528/**
529 * b2f0:
530 * @code: identifier of IUCV call to CP.
531 * @parm: pointer to 40 byte iparml area passed to CP
532 *
533 * Calls CP to execute IUCV commands.
534 *
535 * Returns: return code from CP's IUCV call
536 */
537static inline ulong b2f0(__u32 code, void *parm)
538{
539 register unsigned long reg0 asm ("0");
540 register unsigned long reg1 asm ("1");
541 iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
542
543 reg0 = code;
544 reg1 = virt_to_phys(parm);
545 asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1));
546
547 iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
548
549 return (unsigned long)*((__u8 *)(parm + 3));
550}
551
552/*
553 * Name: iucv_add_pathid
554 * Purpose: Adds a path id to the system.
555 * Input: pathid - pathid that is going to be entered into system
556 * handle - address of handler that the pathid will be associated
557 * with.
558 * pgm_data - token passed in by application.
559 * Output: 0: successful addition of pathid
560 * - EINVAL - pathid entry is being used by another application
561 * - ENOMEM - storage allocation for a new pathid table failed
562*/
563static int
564__iucv_add_pathid(__u16 pathid, handler *handler)
565{
566
567 iucv_debug(1, "entering");
568
569 iucv_debug(1, "handler is pointing to %p", handler);
570
571 if (pathid > (max_connections - 1))
572 return -EINVAL;
573
574 if (iucv_pathid_table[pathid]) {
575 iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
576 printk(KERN_WARNING
577 "%s: Pathid being used, error.\n", __FUNCTION__);
578 return -EINVAL;
579 }
580 iucv_pathid_table[pathid] = handler;
581
582 iucv_debug(1, "exiting");
583 return 0;
584} /* end of add_pathid function */
585
586static int
587iucv_add_pathid(__u16 pathid, handler *handler)
588{
589 ulong flags;
590 int rc;
591
592 spin_lock_irqsave (&iucv_lock, flags);
593 rc = __iucv_add_pathid(pathid, handler);
594 spin_unlock_irqrestore (&iucv_lock, flags);
595 return rc;
596}
597
598static void
599iucv_remove_pathid(__u16 pathid)
600{
601 ulong flags;
602
603 if (pathid > (max_connections - 1))
604 return;
605
606 spin_lock_irqsave (&iucv_lock, flags);
607 iucv_pathid_table[pathid] = NULL;
608 spin_unlock_irqrestore (&iucv_lock, flags);
609}
610
611/**
612 * iucv_declare_buffer_cpuid
613 * Register at VM for subsequent IUCV operations. This is executed
614 * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
615 */
616static void
617iucv_declare_buffer_cpuid (void *result)
618{
619 iparml_db *parm;
620
621 parm = (iparml_db *)grab_param();
622 parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
623 if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1)
624 *((ulong *)result) = parm->iprcode;
625 release_param(parm);
626}
627
628/**
629 * iucv_retrieve_buffer_cpuid:
630 * Unregister IUCV usage at VM. This is always executed on the same
631 * cpu that registered the buffer to VM.
632 * Called from iucv_retrieve_buffer().
633 */
634static void
635iucv_retrieve_buffer_cpuid (void *cpu)
636{
637 iparml_control *parm;
638
639 parm = (iparml_control *)grab_param();
640 b2f0(RETRIEVE_BUFFER, parm);
641 release_param(parm);
642}
643
644/**
645 * Name: iucv_declare_buffer
646 * Purpose: Specifies the guests real address of an external
647 * interrupt.
648 * Input: void
649 * Output: iprcode - return code from b2f0 call
650 */
651static int
652iucv_declare_buffer (void)
653{
654 unsigned long flags;
655 ulong b2f0_result;
656
657 iucv_debug(1, "entering");
658 b2f0_result = -ENODEV;
659 spin_lock_irqsave (&iucv_lock, flags);
660 if (iucv_cpuid == -1) {
661 /* Reserve any cpu for use by iucv. */
662 iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
663 spin_unlock_irqrestore (&iucv_lock, flags);
664 smp_call_function_on(iucv_declare_buffer_cpuid,
665 &b2f0_result, 0, 1, iucv_cpuid);
666 if (b2f0_result) {
667 smp_put_cpu(iucv_cpuid);
668 iucv_cpuid = -1;
669 }
670 iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
671 } else {
672 spin_unlock_irqrestore (&iucv_lock, flags);
673 b2f0_result = 0;
674 }
675 iucv_debug(1, "exiting");
676 return b2f0_result;
677}
678
679/**
680 * iucv_retrieve_buffer:
681 *
682 * Terminates all use of IUCV.
683 * Returns: return code from CP
684 */
685static int
686iucv_retrieve_buffer (void)
687{
688 iucv_debug(1, "entering");
689 if (iucv_cpuid != -1) {
690 smp_call_function_on(iucv_retrieve_buffer_cpuid,
691 NULL, 0, 1, iucv_cpuid);
692 /* Release the cpu reserved by iucv_declare_buffer. */
693 smp_put_cpu(iucv_cpuid);
694 iucv_cpuid = -1;
695 }
696 iucv_debug(1, "exiting");
697 return 0;
698}
699
700/**
701 * iucv_remove_handler:
702 * @users_handler: handler to be removed
703 *
704 * Remove handler when application unregisters.
705 */
706static void
707iucv_remove_handler(handler *handler)
708{
709 unsigned long flags;
710
711 if ((!iucv_pathid_table) || (!handler))
712 return;
713
714 iucv_debug(1, "entering");
715
716 spin_lock_irqsave (&iucv_lock, flags);
717 list_del(&handler->list);
718 if (list_empty(&iucv_handler_table)) {
719 if (register_flag) {
720 unregister_external_interrupt(0x4000, iucv_irq_handler);
721 register_flag = 0;
722 }
723 }
724 spin_unlock_irqrestore (&iucv_lock, flags);
725
726 iucv_debug(1, "exiting");
727 return;
728}
729
730/**
731 * iucv_register_program:
732 * @pgmname: user identification
733 * @userid: machine identification
734 * @pgmmask: Indicates which bits in the pgmname and userid combined will be
735 * used to determine who is given control.
736 * @ops: Address of interrupt handler table.
737 * @pgm_data: Application data to be passed to interrupt handlers.
738 *
739 * Registers an application with IUCV.
740 * Returns:
741 * The address of handler, or NULL on failure.
742 * NOTE on pgmmask:
743 * If pgmname, userid and pgmmask are provided, pgmmask is entered into the
744 * handler as is.
745 * If pgmmask is NULL, the internal mask is set to all 0xff's
746 * When userid is NULL, the first 8 bytes of the internal mask are forced
747 * to 0x00.
748 * If pgmmask and userid are NULL, the first 8 bytes of the internal mask
749 * are forced to 0x00 and the last 16 bytes to 0xff.
750 */
751
752iucv_handle_t
753iucv_register_program (__u8 pgmname[16],
754 __u8 userid[8],
755 __u8 pgmmask[24],
756 iucv_interrupt_ops_t * ops, void *pgm_data)
757{
758 ulong rc = 0; /* return code from function calls */
759 handler *new_handler;
760
761 iucv_debug(1, "entering");
762
763 if (ops == NULL) {
764 /* interrupt table is not defined */
765 printk(KERN_WARNING "%s: Interrupt table is not defined, "
766 "exiting\n", __FUNCTION__);
767 return NULL;
768 }
769 if (!pgmname) {
770 printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__);
771 return NULL;
772 }
773
774 /* Allocate handler entry */
775 new_handler = kmalloc(sizeof(handler), GFP_ATOMIC);
776 if (new_handler == NULL) {
777 printk(KERN_WARNING "%s: storage allocation for new handler "
778 "failed.\n", __FUNCTION__);
779 return NULL;
780 }
781
782 if (!iucv_pathid_table) {
783 if (iucv_init()) {
784 kfree(new_handler);
785 return NULL;
786 }
787
788 max_connections = iucv_query_maxconn();
789 iucv_pathid_table = kcalloc(max_connections, sizeof(handler *),
790 GFP_ATOMIC);
791 if (iucv_pathid_table == NULL) {
792 printk(KERN_WARNING "%s: iucv_pathid_table storage "
793 "allocation failed\n", __FUNCTION__);
794 kfree(new_handler);
795 return NULL;
796 }
797 }
798 memset(new_handler, 0, sizeof (handler));
799 memcpy(new_handler->id.user_data, pgmname,
800 sizeof (new_handler->id.user_data));
801 if (userid) {
802 memcpy (new_handler->id.userid, userid,
803 sizeof (new_handler->id.userid));
804 ASCEBC (new_handler->id.userid,
805 sizeof (new_handler->id.userid));
806 EBC_TOUPPER (new_handler->id.userid,
807 sizeof (new_handler->id.userid));
808
809 if (pgmmask) {
810 memcpy (new_handler->id.mask, pgmmask,
811 sizeof (new_handler->id.mask));
812 } else {
813 memset (new_handler->id.mask, 0xFF,
814 sizeof (new_handler->id.mask));
815 }
816 } else {
817 if (pgmmask) {
818 memcpy (new_handler->id.mask, pgmmask,
819 sizeof (new_handler->id.mask));
820 } else {
821 memset (new_handler->id.mask, 0xFF,
822 sizeof (new_handler->id.mask));
823 }
824 memset (new_handler->id.userid, 0x00,
825 sizeof (new_handler->id.userid));
826 }
827 /* fill in the rest of handler */
828 new_handler->pgm_data = pgm_data;
829 new_handler->interrupt_table = ops;
830
831 /*
832 * Check if someone else is registered with same pgmname, userid
833 * and mask. If someone is already registered with same pgmname,
834 * userid and mask, registration will fail and NULL will be returned
835 * to the application.
836 * If identical handler not found, then handler is added to list.
837 */
838 rc = iucv_add_handler(new_handler);
839 if (rc) {
840 printk(KERN_WARNING "%s: Someone already registered with same "
841 "pgmname, userid, pgmmask\n", __FUNCTION__);
842 kfree (new_handler);
843 return NULL;
844 }
845
846 rc = iucv_declare_buffer();
847 if (rc) {
848 char *err = "Unknown";
849 iucv_remove_handler(new_handler);
850 kfree(new_handler);
851 switch(rc) {
852 case 0x03:
853 err = "Directory error";
854 break;
855 case 0x0a:
856 err = "Invalid length";
857 break;
858 case 0x13:
859 err = "Buffer already exists";
860 break;
861 case 0x3e:
862 err = "Buffer overlap";
863 break;
864 case 0x5c:
865 err = "Paging or storage error";
866 break;
867 }
868 printk(KERN_WARNING "%s: iucv_declare_buffer "
869 "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
870 return NULL;
871 }
872 if (!register_flag) {
873 /* request the 0x4000 external interrupt */
874 rc = register_external_interrupt (0x4000, iucv_irq_handler);
875 if (rc) {
876 iucv_remove_handler(new_handler);
877 kfree (new_handler);
878 printk(KERN_WARNING "%s: "
879 "register_external_interrupt returned %ld\n",
880 __FUNCTION__, rc);
881 return NULL;
882
883 }
884 register_flag = 1;
885 }
886 iucv_debug(1, "exiting");
887 return new_handler;
888} /* end of register function */
889
890/**
891 * iucv_unregister_program:
892 * @handle: address of handler
893 *
894 * Unregister application with IUCV.
895 * Returns:
896 * 0 on success, -EINVAL, if specified handle is invalid.
897 */
898
899int
900iucv_unregister_program (iucv_handle_t handle)
901{
902 handler *h = NULL;
903 struct list_head *lh;
904 int i;
905 ulong flags;
906
907 iucv_debug(1, "entering");
908 iucv_debug(1, "address of handler is %p", h);
909
910 /* Checking if handle is valid */
911 spin_lock_irqsave (&iucv_lock, flags);
912 list_for_each(lh, &iucv_handler_table) {
913 if ((handler *)handle == list_entry(lh, handler, list)) {
914 h = (handler *)handle;
915 break;
916 }
917 }
918 if (!h) {
919 spin_unlock_irqrestore (&iucv_lock, flags);
920 if (handle)
921 printk(KERN_WARNING
922 "%s: Handler not found in iucv_handler_table.\n",
923 __FUNCTION__);
924 else
925 printk(KERN_WARNING
926 "%s: NULL handle passed by application.\n",
927 __FUNCTION__);
928 return -EINVAL;
929 }
930
931 /**
932 * First, walk thru iucv_pathid_table and sever any pathid which is
933 * still pointing to the handler to be removed.
934 */
935 for (i = 0; i < max_connections; i++)
936 if (iucv_pathid_table[i] == h) {
937 spin_unlock_irqrestore (&iucv_lock, flags);
938 iucv_sever(i, h->id.user_data);
939 spin_lock_irqsave(&iucv_lock, flags);
940 }
941 spin_unlock_irqrestore (&iucv_lock, flags);
942
943 iucv_remove_handler(h);
944 kfree(h);
945
946 iucv_debug(1, "exiting");
947 return 0;
948}
949
950/**
951 * iucv_accept:
952 * @pathid: Path identification number
953 * @msglim_reqstd: The number of outstanding messages requested.
954 * @user_data: Data specified by the iucv_connect function.
955 * @flags1: Contains options for this path.
956 * - IPPRTY (0x20) Specifies if you want to send priority message.
957 * - IPRMDATA (0x80) Specifies whether your program can handle a message
958 * in the parameter list.
959 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
960 * established.
961 * @handle: Address of handler.
962 * @pgm_data: Application data passed to interrupt handlers.
963 * @flags1_out: Pointer to an int. If not NULL, on return the options for
964 * the path are stored at the given location:
965 * - IPPRTY (0x20) Indicates you may send a priority message.
966 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
967 * number of outstanding messages is stored at the given
968 * location.
969 *
970 * This function is issued after the user receives a Connection Pending external
971 * interrupt and now wishes to complete the IUCV communication path.
972 * Returns:
973 * return code from CP
974 */
975int
976iucv_accept(__u16 pathid, __u16 msglim_reqstd,
977 __u8 user_data[16], int flags1,
978 iucv_handle_t handle, void *pgm_data,
979 int *flags1_out, __u16 * msglim)
980{
981 ulong b2f0_result = 0;
982 ulong flags;
983 struct list_head *lh;
984 handler *h = NULL;
985 iparml_control *parm;
986
987 iucv_debug(1, "entering");
988 iucv_debug(1, "pathid = %d", pathid);
989
990 /* Checking if handle is valid */
991 spin_lock_irqsave (&iucv_lock, flags);
992 list_for_each(lh, &iucv_handler_table) {
993 if ((handler *)handle == list_entry(lh, handler, list)) {
994 h = (handler *)handle;
995 break;
996 }
997 }
998 spin_unlock_irqrestore (&iucv_lock, flags);
999
1000 if (!h) {
1001 if (handle)
1002 printk(KERN_WARNING
1003 "%s: Handler not found in iucv_handler_table.\n",
1004 __FUNCTION__);
1005 else
1006 printk(KERN_WARNING
1007 "%s: NULL handle passed by application.\n",
1008 __FUNCTION__);
1009 return -EINVAL;
1010 }
1011
1012 parm = (iparml_control *)grab_param();
1013
1014 parm->ippathid = pathid;
1015 parm->ipmsglim = msglim_reqstd;
1016 if (user_data)
1017 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1018
1019 parm->ipflags1 = (__u8)flags1;
1020 b2f0_result = b2f0(ACCEPT, parm);
1021
1022 if (!b2f0_result) {
1023 if (msglim)
1024 *msglim = parm->ipmsglim;
1025 if (pgm_data)
1026 h->pgm_data = pgm_data;
1027 if (flags1_out)
1028 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1029 }
1030 release_param(parm);
1031
1032 iucv_debug(1, "exiting");
1033 return b2f0_result;
1034}
1035
1036/**
1037 * iucv_connect:
1038 * @pathid: Path identification number
1039 * @msglim_reqstd: Number of outstanding messages requested
1040 * @user_data: 16-byte user data
1041 * @userid: 8-byte of user identification
1042 * @system_name: 8-byte identifying the system name
1043 * @flags1: Specifies options for this path:
1044 * - IPPRTY (0x20) Specifies if you want to send priority message.
1045 * - IPRMDATA (0x80) Specifies whether your program can handle a message
1046 * in the parameter list.
1047 * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
1048 * established.
1049 * - IPLOCAL (0x01) Allows an application to force the partner to be on the
1050 * local system. If local is specified then target class
1051 * cannot be specified.
1052 * @flags1_out: Pointer to an int. If not NULL, on return the options for
1053 * the path are stored at the given location:
1054 * - IPPRTY (0x20) Indicates you may send a priority message.
1055 * @msglim: Pointer to an __u16. If not NULL, on return the maximum
1056 * number of outstanding messages is stored at the given
1057 * location.
1058 * @handle: Address of handler.
1059 * @pgm_data: Application data to be passed to interrupt handlers.
1060 *
1061 * This function establishes an IUCV path. Although the connect may complete
1062 * successfully, you are not able to use the path until you receive an IUCV
1063 * Connection Complete external interrupt.
1064 * Returns: return code from CP, or one of the following
1065 * - ENOMEM
1066 * - return code from iucv_declare_buffer
1067 * - EINVAL - invalid handle passed by application
1068 * - EINVAL - pathid address is NULL
1069 * - ENOMEM - pathid table storage allocation failed
1070 * - return code from internal function add_pathid
1071 */
1072int
1073iucv_connect (__u16 *pathid, __u16 msglim_reqstd,
1074 __u8 user_data[16], __u8 userid[8],
1075 __u8 system_name[8], int flags1,
1076 int *flags1_out, __u16 * msglim,
1077 iucv_handle_t handle, void *pgm_data)
1078{
1079 iparml_control *parm;
1080 iparml_control local_parm;
1081 struct list_head *lh;
1082 ulong b2f0_result = 0;
1083 ulong flags;
1084 int add_pathid_result = 0;
1085 handler *h = NULL;
1086 __u8 no_memory[16] = "NO MEMORY";
1087
1088 iucv_debug(1, "entering");
1089
1090 /* Checking if handle is valid */
1091 spin_lock_irqsave (&iucv_lock, flags);
1092 list_for_each(lh, &iucv_handler_table) {
1093 if ((handler *)handle == list_entry(lh, handler, list)) {
1094 h = (handler *)handle;
1095 break;
1096 }
1097 }
1098 spin_unlock_irqrestore (&iucv_lock, flags);
1099
1100 if (!h) {
1101 if (handle)
1102 printk(KERN_WARNING
1103 "%s: Handler not found in iucv_handler_table.\n",
1104 __FUNCTION__);
1105 else
1106 printk(KERN_WARNING
1107 "%s: NULL handle passed by application.\n",
1108 __FUNCTION__);
1109 return -EINVAL;
1110 }
1111
1112 if (pathid == NULL) {
1113 printk(KERN_WARNING "%s: NULL pathid pointer\n",
1114 __FUNCTION__);
1115 return -EINVAL;
1116 }
1117
1118 parm = (iparml_control *)grab_param();
1119
1120 parm->ipmsglim = msglim_reqstd;
1121
1122 if (user_data)
1123 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1124
1125 if (userid) {
1126 memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid));
1127 ASCEBC(parm->ipvmid, sizeof(parm->ipvmid));
1128 EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid));
1129 }
1130
1131 if (system_name) {
1132 memcpy(parm->iptarget, system_name, sizeof(parm->iptarget));
1133 ASCEBC(parm->iptarget, sizeof(parm->iptarget));
1134 EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget));
1135 }
1136
1137 /* In order to establish an IUCV connection, the procedure is:
1138 *
1139 * b2f0(CONNECT)
1140 * take the ippathid from the b2f0 call
1141 * register the handler to the ippathid
1142 *
1143 * Unfortunately, the ConnectionEstablished message gets sent after the
1144 * b2f0(CONNECT) call but before the register is handled.
1145 *
1146 * In order for this race condition to be eliminated, the IUCV Control
1147 * Interrupts must be disabled for the above procedure.
1148 *
1149 * David Kennedy <dkennedy@linuxcare.com>
1150 */
1151
1152 /* Enable everything but IUCV Control messages */
1153 iucv_setmask(~(AllInterrupts));
1154 messagesDisabled = 1;
1155
1156 spin_lock_irqsave (&iucv_lock, flags);
1157 parm->ipflags1 = (__u8)flags1;
1158 b2f0_result = b2f0(CONNECT, parm);
1159 memcpy(&local_parm, parm, sizeof(local_parm));
1160 release_param(parm);
1161 parm = &local_parm;
1162 if (!b2f0_result)
1163 add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
1164 spin_unlock_irqrestore (&iucv_lock, flags);
1165
1166 if (b2f0_result) {
1167 iucv_setmask(~0);
1168 messagesDisabled = 0;
1169 return b2f0_result;
1170 }
1171
1172 *pathid = parm->ippathid;
1173
1174 /* Enable everything again */
1175 iucv_setmask(IUCVControlInterruptsFlag);
1176
1177 if (msglim)
1178 *msglim = parm->ipmsglim;
1179 if (flags1_out)
1180 *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
1181
1182 if (add_pathid_result) {
1183 iucv_sever(*pathid, no_memory);
1184 printk(KERN_WARNING "%s: add_pathid failed with rc ="
1185 " %d\n", __FUNCTION__, add_pathid_result);
1186 return(add_pathid_result);
1187 }
1188
1189 iucv_debug(1, "exiting");
1190 return b2f0_result;
1191}
1192
1193/**
1194 * iucv_purge:
1195 * @pathid: Path identification number
1196 * @msgid: Message ID of message to purge.
1197 * @srccls: Message class of the message to purge.
1198 * @audit: Pointer to an __u32. If not NULL, on return, information about
1199 * asynchronous errors that may have affected the normal completion
1200 * of this message ist stored at the given location.
1201 *
1202 * Cancels a message you have sent.
1203 * Returns: return code from CP
1204 */
1205int
1206iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
1207{
1208 iparml_purge *parm;
1209 ulong b2f0_result = 0;
1210
1211 iucv_debug(1, "entering");
1212 iucv_debug(1, "pathid = %d", pathid);
1213
1214 parm = (iparml_purge *)grab_param();
1215
1216 parm->ipmsgid = msgid;
1217 parm->ippathid = pathid;
1218 parm->ipsrccls = srccls;
1219 parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID);
1220 b2f0_result = b2f0(PURGE, parm);
1221
1222 if (!b2f0_result && audit) {
1223 memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit));
1224 /* parm->ipaudit has only 3 bytes */
1225 *audit >>= 8;
1226 }
1227
1228 release_param(parm);
1229
1230 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1231 iucv_debug(1, "exiting");
1232 return b2f0_result;
1233}
1234
1235/**
1236 * iucv_query_generic:
1237 * @want_maxconn: Flag, describing which value is to be returned.
1238 *
1239 * Helper function for iucv_query_maxconn() and iucv_query_bufsize().
1240 *
1241 * Returns: The buffersize, if want_maxconn is 0; the maximum number of
1242 * connections, if want_maxconn is 1 or an error-code < 0 on failure.
1243 */
1244static int
1245iucv_query_generic(int want_maxconn)
1246{
1247 register unsigned long reg0 asm ("0");
1248 register unsigned long reg1 asm ("1");
1249 iparml_purge *parm = (iparml_purge *)grab_param();
1250 int bufsize, maxconn;
1251 int ccode;
1252
1253 /**
1254 * Call b2f0 and store R0 (max buffer size),
1255 * R1 (max connections) and CC.
1256 */
1257 reg0 = QUERY;
1258 reg1 = virt_to_phys(parm);
1259 asm volatile(
1260 " .long 0xb2f01000\n"
1261 " ipm %0\n"
1262 " srl %0,28\n"
1263 : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
1264 bufsize = reg0;
1265 maxconn = reg1;
1266 release_param(parm);
1267
1268 if (ccode)
1269 return -EPERM;
1270 if (want_maxconn)
1271 return maxconn;
1272 return bufsize;
1273}
1274
1275/**
1276 * iucv_query_maxconn:
1277 *
1278 * Determines the maximum number of connections thay may be established.
1279 *
1280 * Returns: Maximum number of connections that can be.
1281 */
1282ulong
1283iucv_query_maxconn(void)
1284{
1285 return iucv_query_generic(1);
1286}
1287
1288/**
1289 * iucv_query_bufsize:
1290 *
1291 * Determines the size of the external interrupt buffer.
1292 *
1293 * Returns: Size of external interrupt buffer.
1294 */
1295ulong
1296iucv_query_bufsize (void)
1297{
1298 return iucv_query_generic(0);
1299}
1300
1301/**
1302 * iucv_quiesce:
1303 * @pathid: Path identification number
1304 * @user_data: 16-byte user data
1305 *
1306 * Temporarily suspends incoming messages on an IUCV path.
1307 * You can later reactivate the path by invoking the iucv_resume function.
1308 * Returns: return code from CP
1309 */
1310int
1311iucv_quiesce (__u16 pathid, __u8 user_data[16])
1312{
1313 iparml_control *parm;
1314 ulong b2f0_result = 0;
1315
1316 iucv_debug(1, "entering");
1317 iucv_debug(1, "pathid = %d", pathid);
1318
1319 parm = (iparml_control *)grab_param();
1320
1321 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
1322 parm->ippathid = pathid;
1323
1324 b2f0_result = b2f0(QUIESCE, parm);
1325 release_param(parm);
1326
1327 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1328 iucv_debug(1, "exiting");
1329
1330 return b2f0_result;
1331}
1332
1333/**
1334 * iucv_receive:
1335 * @pathid: Path identification number.
1336 * @buffer: Address of buffer to receive. Must be below 2G.
1337 * @buflen: Length of buffer to receive.
1338 * @msgid: Specifies the message ID.
1339 * @trgcls: Specifies target class.
1340 * @flags1_out: Receives options for path on return.
1341 * - IPNORPY (0x10) Specifies whether a reply is required
1342 * - IPPRTY (0x20) Specifies if you want to send priority message
1343 * - IPRMDATA (0x80) Specifies the data is contained in the parameter list
1344 * @residual_buffer: Receives the address of buffer updated by the number
1345 * of bytes you have received on return.
1346 * @residual_length: On return, receives one of the following values:
1347 * - 0 If the receive buffer is the same length as
1348 * the message.
1349 * - Remaining bytes in buffer If the receive buffer is longer than the
1350 * message.
1351 * - Remaining bytes in message If the receive buffer is shorter than the
1352 * message.
1353 *
1354 * This function receives messages that are being sent to you over established
1355 * paths.
1356 * Returns: return code from CP IUCV call; If the receive buffer is shorter
1357 * than the message, always 5
1358 * -EINVAL - buffer address is pointing to NULL
1359 */
1360int
1361iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls,
1362 void *buffer, ulong buflen,
1363 int *flags1_out, ulong * residual_buffer, ulong * residual_length)
1364{
1365 iparml_db *parm;
1366 ulong b2f0_result;
1367 int moved = 0; /* number of bytes moved from parmlist to buffer */
1368
1369 iucv_debug(2, "entering");
1370
1371 if (!buffer)
1372 return -EINVAL;
1373
1374 parm = (iparml_db *)grab_param();
1375
1376 parm->ipbfadr1 = (__u32) (addr_t) buffer;
1377 parm->ipbfln1f = (__u32) ((ulong) buflen);
1378 parm->ipmsgid = msgid;
1379 parm->ippathid = pathid;
1380 parm->iptrgcls = trgcls;
1381 parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL);
1382
1383 b2f0_result = b2f0(RECEIVE, parm);
1384
1385 if (!b2f0_result || b2f0_result == 5) {
1386 if (flags1_out) {
1387 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1388 *flags1_out = (parm->ipflags1 & (~0x07));
1389 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1390 }
1391
1392 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1393 if (residual_length)
1394 *residual_length = parm->ipbfln1f;
1395
1396 if (residual_buffer)
1397 *residual_buffer = parm->ipbfadr1;
1398 } else {
1399 moved = min_t (unsigned long, buflen, 8);
1400
1401 memcpy ((char *) buffer,
1402 (char *) &parm->ipbfadr1, moved);
1403
1404 if (buflen < 8)
1405 b2f0_result = 5;
1406
1407 if (residual_length)
1408 *residual_length = abs (buflen - 8);
1409
1410 if (residual_buffer)
1411 *residual_buffer = (ulong) (buffer + moved);
1412 }
1413 }
1414 release_param(parm);
1415
1416 iucv_debug(2, "exiting");
1417 return b2f0_result;
1418}
1419
1420/*
1421 * Name: iucv_receive_array
1422 * Purpose: This function receives messages that are being sent to you
1423 * over established paths.
1424 * Input: pathid - path identification number
1425 * buffer - address of array of buffers
1426 * buflen - total length of buffers
1427 * msgid - specifies the message ID.
1428 * trgcls - specifies target class
1429 * Output:
1430 * flags1_out: Options for path.
1431 * IPNORPY - 0x10 specifies whether a reply is required
1432 * IPPRTY - 0x20 specifies if you want to send priority message
1433 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
1434 * residual_buffer - address points to the current list entry IUCV
1435 * is working on.
1436 * residual_length -
1437 * Contains one of the following values, if the receive buffer is:
1438 * The same length as the message, this field is zero.
1439 * Longer than the message, this field contains the number of
1440 * bytes remaining in the buffer.
1441 * Shorter than the message, this field contains the residual
1442 * count (that is, the number of bytes remaining in the
1443 * message that does not fit into the buffer. In this case
1444 * b2f0_result = 5.
1445 * Return: b2f0_result - return code from CP
1446 * (-EINVAL) - buffer address is NULL
1447 */
1448int
1449iucv_receive_array (__u16 pathid,
1450 __u32 msgid, __u32 trgcls,
1451 iucv_array_t * buffer, ulong buflen,
1452 int *flags1_out,
1453 ulong * residual_buffer, ulong * residual_length)
1454{
1455 iparml_db *parm;
1456 ulong b2f0_result;
1457 int i = 0, moved = 0, need_to_move = 8, dyn_len;
1458
1459 iucv_debug(2, "entering");
1460
1461 if (!buffer)
1462 return -EINVAL;
1463
1464 parm = (iparml_db *)grab_param();
1465
1466 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1467 parm->ipbfln1f = (__u32) buflen;
1468 parm->ipmsgid = msgid;
1469 parm->ippathid = pathid;
1470 parm->iptrgcls = trgcls;
1471 parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL);
1472
1473 b2f0_result = b2f0(RECEIVE, parm);
1474
1475 if (!b2f0_result || b2f0_result == 5) {
1476
1477 if (flags1_out) {
1478 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1479 *flags1_out = (parm->ipflags1 & (~0x07));
1480 iucv_debug(2, "*flags1_out = %d", *flags1_out);
1481 }
1482
1483 if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
1484
1485 if (residual_length)
1486 *residual_length = parm->ipbfln1f;
1487
1488 if (residual_buffer)
1489 *residual_buffer = parm->ipbfadr1;
1490
1491 } else {
1492 /* copy msg from parmlist to users array. */
1493
1494 while ((moved < 8) && (moved < buflen)) {
1495 dyn_len =
1496 min_t (unsigned int,
1497 (buffer + i)->length, need_to_move);
1498
1499 memcpy ((char *)((ulong)((buffer + i)->address)),
1500 ((char *) &parm->ipbfadr1) + moved,
1501 dyn_len);
1502
1503 moved += dyn_len;
1504 need_to_move -= dyn_len;
1505
1506 (buffer + i)->address =
1507 (__u32)
1508 ((ulong)(__u8 *) ((ulong)(buffer + i)->address)
1509 + dyn_len);
1510
1511 (buffer + i)->length -= dyn_len;
1512 i++;
1513 }
1514
1515 if (need_to_move) /* buflen < 8 bytes */
1516 b2f0_result = 5;
1517
1518 if (residual_length)
1519 *residual_length = abs (buflen - 8);
1520
1521 if (residual_buffer) {
1522 if (!moved)
1523 *residual_buffer = (ulong) buffer;
1524 else
1525 *residual_buffer =
1526 (ulong) (buffer + (i - 1));
1527 }
1528
1529 }
1530 }
1531 release_param(parm);
1532
1533 iucv_debug(2, "exiting");
1534 return b2f0_result;
1535}
1536
1537/**
1538 * iucv_reject:
1539 * @pathid: Path identification number.
1540 * @msgid: Message ID of the message to reject.
1541 * @trgcls: Target class of the message to reject.
1542 * Returns: return code from CP
1543 *
1544 * Refuses a specified message. Between the time you are notified of a
1545 * message and the time that you complete the message, the message may
1546 * be rejected.
1547 */
1548int
1549iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls)
1550{
1551 iparml_db *parm;
1552 ulong b2f0_result = 0;
1553
1554 iucv_debug(1, "entering");
1555 iucv_debug(1, "pathid = %d", pathid);
1556
1557 parm = (iparml_db *)grab_param();
1558
1559 parm->ippathid = pathid;
1560 parm->ipmsgid = msgid;
1561 parm->iptrgcls = trgcls;
1562 parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID);
1563
1564 b2f0_result = b2f0(REJECT, parm);
1565 release_param(parm);
1566
1567 iucv_debug(1, "b2f0_result = %ld", b2f0_result);
1568 iucv_debug(1, "exiting");
1569
1570 return b2f0_result;
1571}
1572
1573/*
1574 * Name: iucv_reply
1575 * Purpose: This function responds to the two-way messages that you
1576 * receive. You must identify completely the message to
1577 * which you wish to reply. ie, pathid, msgid, and trgcls.
1578 * Input: pathid - path identification number
1579 * msgid - specifies the message ID.
1580 * trgcls - specifies target class
1581 * flags1 - option for path
1582 * IPPRTY- 0x20 - specifies if you want to send priority message
1583 * buffer - address of reply buffer
1584 * buflen - length of reply buffer
1585 * Output: ipbfadr2 - Address of buffer updated by the number
1586 * of bytes you have moved.
1587 * ipbfln2f - Contains one of the following values:
1588 * If the answer buffer is the same length as the reply, this field
1589 * contains zero.
1590 * If the answer buffer is longer than the reply, this field contains
1591 * the number of bytes remaining in the buffer.
1592 * If the answer buffer is shorter than the reply, this field contains
1593 * a residual count (that is, the number of bytes remianing in the
1594 * reply that does not fit into the buffer. In this
1595 * case b2f0_result = 5.
1596 * Return: b2f0_result - return code from CP
1597 * (-EINVAL) - buffer address is NULL
1598 */
1599int
1600iucv_reply (__u16 pathid,
1601 __u32 msgid, __u32 trgcls,
1602 int flags1,
1603 void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1604{
1605 iparml_db *parm;
1606 ulong b2f0_result;
1607
1608 iucv_debug(2, "entering");
1609
1610 if (!buffer)
1611 return -EINVAL;
1612
1613 parm = (iparml_db *)grab_param();
1614
1615 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1616 parm->ipbfln2f = (__u32) buflen; /* length of message */
1617 parm->ippathid = pathid;
1618 parm->ipmsgid = msgid;
1619 parm->iptrgcls = trgcls;
1620 parm->ipflags1 = (__u8) flags1; /* priority message */
1621
1622 b2f0_result = b2f0(REPLY, parm);
1623
1624 if ((!b2f0_result) || (b2f0_result == 5)) {
1625 if (ipbfadr2)
1626 *ipbfadr2 = parm->ipbfadr2;
1627 if (ipbfln2f)
1628 *ipbfln2f = parm->ipbfln2f;
1629 }
1630 release_param(parm);
1631
1632 iucv_debug(2, "exiting");
1633
1634 return b2f0_result;
1635}
1636
1637/*
1638 * Name: iucv_reply_array
1639 * Purpose: This function responds to the two-way messages that you
1640 * receive. You must identify completely the message to
1641 * which you wish to reply. ie, pathid, msgid, and trgcls.
1642 * The array identifies a list of addresses and lengths of
1643 * discontiguous buffers that contains the reply data.
1644 * Input: pathid - path identification number
1645 * msgid - specifies the message ID.
1646 * trgcls - specifies target class
1647 * flags1 - option for path
1648 * IPPRTY- specifies if you want to send priority message
1649 * buffer - address of array of reply buffers
1650 * buflen - total length of reply buffers
1651 * Output: ipbfadr2 - Address of buffer which IUCV is currently working on.
1652 * ipbfln2f - Contains one of the following values:
1653 * If the answer buffer is the same length as the reply, this field
1654 * contains zero.
1655 * If the answer buffer is longer than the reply, this field contains
1656 * the number of bytes remaining in the buffer.
1657 * If the answer buffer is shorter than the reply, this field contains
1658 * a residual count (that is, the number of bytes remianing in the
1659 * reply that does not fit into the buffer. In this
1660 * case b2f0_result = 5.
1661 * Return: b2f0_result - return code from CP
1662 * (-EINVAL) - buffer address is NULL
1663*/
1664int
1665iucv_reply_array (__u16 pathid,
1666 __u32 msgid, __u32 trgcls,
1667 int flags1,
1668 iucv_array_t * buffer,
1669 ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
1670{
1671 iparml_db *parm;
1672 ulong b2f0_result;
1673
1674 iucv_debug(2, "entering");
1675
1676 if (!buffer)
1677 return -EINVAL;
1678
1679 parm = (iparml_db *)grab_param();
1680
1681 parm->ipbfadr2 = (__u32) ((ulong) buffer);
1682 parm->ipbfln2f = buflen; /* length of message */
1683 parm->ippathid = pathid;
1684 parm->ipmsgid = msgid;
1685 parm->iptrgcls = trgcls;
1686 parm->ipflags1 = (IPANSLST | flags1);
1687
1688 b2f0_result = b2f0(REPLY, parm);
1689
1690 if ((!b2f0_result) || (b2f0_result == 5)) {
1691
1692 if (ipbfadr2)
1693 *ipbfadr2 = parm->ipbfadr2;
1694 if (ipbfln2f)
1695 *ipbfln2f = parm->ipbfln2f;
1696 }
1697 release_param(parm);
1698
1699 iucv_debug(2, "exiting");
1700
1701 return b2f0_result;
1702}
1703
1704/*
1705 * Name: iucv_reply_prmmsg
1706 * Purpose: This function responds to the two-way messages that you
1707 * receive. You must identify completely the message to
1708 * which you wish to reply. ie, pathid, msgid, and trgcls.
1709 * Prmmsg signifies the data is moved into the
1710 * parameter list.
1711 * Input: pathid - path identification number
1712 * msgid - specifies the message ID.
1713 * trgcls - specifies target class
1714 * flags1 - option for path
1715 * IPPRTY- specifies if you want to send priority message
1716 * prmmsg - 8-bytes of data to be placed into the parameter
1717 * list.
1718 * Output: NA
1719 * Return: b2f0_result - return code from CP
1720*/
1721int
1722iucv_reply_prmmsg (__u16 pathid,
1723 __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8])
1724{
1725 iparml_dpl *parm;
1726 ulong b2f0_result;
1727
1728 iucv_debug(2, "entering");
1729
1730 parm = (iparml_dpl *)grab_param();
1731
1732 parm->ippathid = pathid;
1733 parm->ipmsgid = msgid;
1734 parm->iptrgcls = trgcls;
1735 memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg));
1736 parm->ipflags1 = (IPRMDATA | flags1);
1737
1738 b2f0_result = b2f0(REPLY, parm);
1739 release_param(parm);
1740
1741 iucv_debug(2, "exiting");
1742
1743 return b2f0_result;
1744}
1745
1746/**
1747 * iucv_resume:
1748 * @pathid: Path identification number
1749 * @user_data: 16-byte of user data
1750 *
1751 * This function restores communication over a quiesced path.
1752 * Returns: return code from CP
1753 */
1754int
1755iucv_resume (__u16 pathid, __u8 user_data[16])
1756{
1757 iparml_control *parm;
1758 ulong b2f0_result = 0;
1759
1760 iucv_debug(1, "entering");
1761 iucv_debug(1, "pathid = %d", pathid);
1762
1763 parm = (iparml_control *)grab_param();
1764
1765 memcpy (parm->ipuser, user_data, sizeof (*user_data));
1766 parm->ippathid = pathid;
1767
1768 b2f0_result = b2f0(RESUME, parm);
1769 release_param(parm);
1770
1771 iucv_debug(1, "exiting");
1772
1773 return b2f0_result;
1774}
1775
1776/*
1777 * Name: iucv_send
1778 * Purpose: sends messages
1779 * Input: pathid - ushort, pathid
1780 * msgid - ulong *, id of message returned to caller
1781 * trgcls - ulong, target message class
1782 * srccls - ulong, source message class
1783 * msgtag - ulong, message tag
1784 * flags1 - Contains options for this path.
1785 * IPPRTY - Ox20 - specifies if you want to send a priority message.
1786 * buffer - pointer to buffer
1787 * buflen - ulong, length of buffer
1788 * Output: b2f0_result - return code from b2f0 call
1789 * msgid - returns message id
1790 */
1791int
1792iucv_send (__u16 pathid, __u32 * msgid,
1793 __u32 trgcls, __u32 srccls,
1794 __u32 msgtag, int flags1, void *buffer, ulong buflen)
1795{
1796 iparml_db *parm;
1797 ulong b2f0_result;
1798
1799 iucv_debug(2, "entering");
1800
1801 if (!buffer)
1802 return -EINVAL;
1803
1804 parm = (iparml_db *)grab_param();
1805
1806 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1807 parm->ippathid = pathid;
1808 parm->iptrgcls = trgcls;
1809 parm->ipbfln1f = (__u32) buflen; /* length of message */
1810 parm->ipsrccls = srccls;
1811 parm->ipmsgtag = msgtag;
1812 parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */
1813
1814 b2f0_result = b2f0(SEND, parm);
1815
1816 if ((!b2f0_result) && (msgid))
1817 *msgid = parm->ipmsgid;
1818 release_param(parm);
1819
1820 iucv_debug(2, "exiting");
1821
1822 return b2f0_result;
1823}
1824
1825/*
1826 * Name: iucv_send_array
1827 * Purpose: This function transmits data to another application.
1828 * The contents of buffer is the address of the array of
1829 * addresses and lengths of discontiguous buffers that hold
1830 * the message text. This is a one-way message and the
1831 * receiver will not reply to the message.
1832 * Input: pathid - path identification number
1833 * trgcls - specifies target class
1834 * srccls - specifies the source message class
1835 * msgtag - specifies a tag to be associated witht the message
1836 * flags1 - option for path
1837 * IPPRTY- specifies if you want to send priority message
1838 * buffer - address of array of send buffers
1839 * buflen - total length of send buffers
1840 * Output: msgid - specifies the message ID.
1841 * Return: b2f0_result - return code from CP
1842 * (-EINVAL) - buffer address is NULL
1843 */
1844int
1845iucv_send_array (__u16 pathid,
1846 __u32 * msgid,
1847 __u32 trgcls,
1848 __u32 srccls,
1849 __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen)
1850{
1851 iparml_db *parm;
1852 ulong b2f0_result;
1853
1854 iucv_debug(2, "entering");
1855
1856 if (!buffer)
1857 return -EINVAL;
1858
1859 parm = (iparml_db *)grab_param();
1860
1861 parm->ippathid = pathid;
1862 parm->iptrgcls = trgcls;
1863 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1864 parm->ipbfln1f = (__u32) buflen; /* length of message */
1865 parm->ipsrccls = srccls;
1866 parm->ipmsgtag = msgtag;
1867 parm->ipflags1 = (IPNORPY | IPBUFLST | flags1);
1868 b2f0_result = b2f0(SEND, parm);
1869
1870 if ((!b2f0_result) && (msgid))
1871 *msgid = parm->ipmsgid;
1872 release_param(parm);
1873
1874 iucv_debug(2, "exiting");
1875 return b2f0_result;
1876}
1877
1878/*
1879 * Name: iucv_send_prmmsg
1880 * Purpose: This function transmits data to another application.
1881 * Prmmsg specifies that the 8-bytes of data are to be moved
1882 * into the parameter list. This is a one-way message and the
1883 * receiver will not reply to the message.
1884 * Input: pathid - path identification number
1885 * trgcls - specifies target class
1886 * srccls - specifies the source message class
1887 * msgtag - specifies a tag to be associated with the message
1888 * flags1 - option for path
1889 * IPPRTY- specifies if you want to send priority message
1890 * prmmsg - 8-bytes of data to be placed into parameter list
1891 * Output: msgid - specifies the message ID.
1892 * Return: b2f0_result - return code from CP
1893*/
1894int
1895iucv_send_prmmsg (__u16 pathid,
1896 __u32 * msgid,
1897 __u32 trgcls,
1898 __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8])
1899{
1900 iparml_dpl *parm;
1901 ulong b2f0_result;
1902
1903 iucv_debug(2, "entering");
1904
1905 parm = (iparml_dpl *)grab_param();
1906
1907 parm->ippathid = pathid;
1908 parm->iptrgcls = trgcls;
1909 parm->ipsrccls = srccls;
1910 parm->ipmsgtag = msgtag;
1911 parm->ipflags1 = (IPRMDATA | IPNORPY | flags1);
1912 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
1913
1914 b2f0_result = b2f0(SEND, parm);
1915
1916 if ((!b2f0_result) && (msgid))
1917 *msgid = parm->ipmsgid;
1918 release_param(parm);
1919
1920 iucv_debug(2, "exiting");
1921
1922 return b2f0_result;
1923}
1924
1925/*
1926 * Name: iucv_send2way
1927 * Purpose: This function transmits data to another application.
1928 * Data to be transmitted is in a buffer. The receiver
1929 * of the send is expected to reply to the message and
1930 * a buffer is provided into which IUCV moves the reply
1931 * to this message.
1932 * Input: pathid - path identification number
1933 * trgcls - specifies target class
1934 * srccls - specifies the source message class
1935 * msgtag - specifies a tag associated with the message
1936 * flags1 - option for path
1937 * IPPRTY- specifies if you want to send priority message
1938 * buffer - address of send buffer
1939 * buflen - length of send buffer
1940 * ansbuf - address of buffer to reply with
1941 * anslen - length of buffer to reply with
1942 * Output: msgid - specifies the message ID.
1943 * Return: b2f0_result - return code from CP
1944 * (-EINVAL) - buffer or ansbuf address is NULL
1945 */
1946int
1947iucv_send2way (__u16 pathid,
1948 __u32 * msgid,
1949 __u32 trgcls,
1950 __u32 srccls,
1951 __u32 msgtag,
1952 int flags1,
1953 void *buffer, ulong buflen, void *ansbuf, ulong anslen)
1954{
1955 iparml_db *parm;
1956 ulong b2f0_result;
1957
1958 iucv_debug(2, "entering");
1959
1960 if (!buffer || !ansbuf)
1961 return -EINVAL;
1962
1963 parm = (iparml_db *)grab_param();
1964
1965 parm->ippathid = pathid;
1966 parm->iptrgcls = trgcls;
1967 parm->ipbfadr1 = (__u32) ((ulong) buffer);
1968 parm->ipbfln1f = (__u32) buflen; /* length of message */
1969 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
1970 parm->ipbfln2f = (__u32) anslen;
1971 parm->ipsrccls = srccls;
1972 parm->ipmsgtag = msgtag;
1973 parm->ipflags1 = flags1; /* priority message */
1974
1975 b2f0_result = b2f0(SEND, parm);
1976
1977 if ((!b2f0_result) && (msgid))
1978 *msgid = parm->ipmsgid;
1979 release_param(parm);
1980
1981 iucv_debug(2, "exiting");
1982
1983 return b2f0_result;
1984}
1985
1986/*
1987 * Name: iucv_send2way_array
1988 * Purpose: This function transmits data to another application.
1989 * The contents of buffer is the address of the array of
1990 * addresses and lengths of discontiguous buffers that hold
1991 * the message text. The receiver of the send is expected to
1992 * reply to the message and a buffer is provided into which
1993 * IUCV moves the reply to this message.
1994 * Input: pathid - path identification number
1995 * trgcls - specifies target class
1996 * srccls - specifies the source message class
1997 * msgtag - spcifies a tag to be associated with the message
1998 * flags1 - option for path
1999 * IPPRTY- specifies if you want to send priority message
2000 * buffer - address of array of send buffers
2001 * buflen - total length of send buffers
2002 * ansbuf - address of buffer to reply with
2003 * anslen - length of buffer to reply with
2004 * Output: msgid - specifies the message ID.
2005 * Return: b2f0_result - return code from CP
2006 * (-EINVAL) - buffer address is NULL
2007 */
2008int
2009iucv_send2way_array (__u16 pathid,
2010 __u32 * msgid,
2011 __u32 trgcls,
2012 __u32 srccls,
2013 __u32 msgtag,
2014 int flags1,
2015 iucv_array_t * buffer,
2016 ulong buflen, iucv_array_t * ansbuf, ulong anslen)
2017{
2018 iparml_db *parm;
2019 ulong b2f0_result;
2020
2021 iucv_debug(2, "entering");
2022
2023 if (!buffer || !ansbuf)
2024 return -EINVAL;
2025
2026 parm = (iparml_db *)grab_param();
2027
2028 parm->ippathid = pathid;
2029 parm->iptrgcls = trgcls;
2030 parm->ipbfadr1 = (__u32) ((ulong) buffer);
2031 parm->ipbfln1f = (__u32) buflen; /* length of message */
2032 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2033 parm->ipbfln2f = (__u32) anslen;
2034 parm->ipsrccls = srccls;
2035 parm->ipmsgtag = msgtag;
2036 parm->ipflags1 = (IPBUFLST | IPANSLST | flags1);
2037 b2f0_result = b2f0(SEND, parm);
2038 if ((!b2f0_result) && (msgid))
2039 *msgid = parm->ipmsgid;
2040 release_param(parm);
2041
2042 iucv_debug(2, "exiting");
2043 return b2f0_result;
2044}
2045
2046/*
2047 * Name: iucv_send2way_prmmsg
2048 * Purpose: This function transmits data to another application.
2049 * Prmmsg specifies that the 8-bytes of data are to be moved
2050 * into the parameter list. This is a two-way message and the
2051 * receiver of the message is expected to reply. A buffer
2052 * is provided into which IUCV moves the reply to this
2053 * message.
2054 * Input: pathid - path identification number
2055 * trgcls - specifies target class
2056 * srccls - specifies the source message class
2057 * msgtag - specifies a tag to be associated with the message
2058 * flags1 - option for path
2059 * IPPRTY- specifies if you want to send priority message
2060 * prmmsg - 8-bytes of data to be placed in parameter list
2061 * ansbuf - address of buffer to reply with
2062 * anslen - length of buffer to reply with
2063 * Output: msgid - specifies the message ID.
2064 * Return: b2f0_result - return code from CP
2065 * (-EINVAL) - buffer address is NULL
2066*/
2067int
2068iucv_send2way_prmmsg (__u16 pathid,
2069 __u32 * msgid,
2070 __u32 trgcls,
2071 __u32 srccls,
2072 __u32 msgtag,
2073 ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen)
2074{
2075 iparml_dpl *parm;
2076 ulong b2f0_result;
2077
2078 iucv_debug(2, "entering");
2079
2080 if (!ansbuf)
2081 return -EINVAL;
2082
2083 parm = (iparml_dpl *)grab_param();
2084
2085 parm->ippathid = pathid;
2086 parm->iptrgcls = trgcls;
2087 parm->ipsrccls = srccls;
2088 parm->ipmsgtag = msgtag;
2089 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2090 parm->ipbfln2f = (__u32) anslen;
2091 parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */
2092 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2093
2094 b2f0_result = b2f0(SEND, parm);
2095
2096 if ((!b2f0_result) && (msgid))
2097 *msgid = parm->ipmsgid;
2098 release_param(parm);
2099
2100 iucv_debug(2, "exiting");
2101
2102 return b2f0_result;
2103}
2104
2105/*
2106 * Name: iucv_send2way_prmmsg_array
2107 * Purpose: This function transmits data to another application.
2108 * Prmmsg specifies that the 8-bytes of data are to be moved
2109 * into the parameter list. This is a two-way message and the
2110 * receiver of the message is expected to reply. A buffer
2111 * is provided into which IUCV moves the reply to this
2112 * message. The contents of ansbuf is the address of the
2113 * array of addresses and lengths of discontiguous buffers
2114 * that contain the reply.
2115 * Input: pathid - path identification number
2116 * trgcls - specifies target class
2117 * srccls - specifies the source message class
2118 * msgtag - specifies a tag to be associated with the message
2119 * flags1 - option for path
2120 * IPPRTY- specifies if you want to send priority message
2121 * prmmsg - 8-bytes of data to be placed into the parameter list
2122 * ansbuf - address of buffer to reply with
2123 * anslen - length of buffer to reply with
2124 * Output: msgid - specifies the message ID.
2125 * Return: b2f0_result - return code from CP
2126 * (-EINVAL) - ansbuf address is NULL
2127 */
2128int
2129iucv_send2way_prmmsg_array (__u16 pathid,
2130 __u32 * msgid,
2131 __u32 trgcls,
2132 __u32 srccls,
2133 __u32 msgtag,
2134 int flags1,
2135 __u8 prmmsg[8],
2136 iucv_array_t * ansbuf, ulong anslen)
2137{
2138 iparml_dpl *parm;
2139 ulong b2f0_result;
2140
2141 iucv_debug(2, "entering");
2142
2143 if (!ansbuf)
2144 return -EINVAL;
2145
2146 parm = (iparml_dpl *)grab_param();
2147
2148 parm->ippathid = pathid;
2149 parm->iptrgcls = trgcls;
2150 parm->ipsrccls = srccls;
2151 parm->ipmsgtag = msgtag;
2152 parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
2153 parm->ipbfln2f = (__u32) anslen;
2154 parm->ipflags1 = (IPRMDATA | IPANSLST | flags1);
2155 memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
2156 b2f0_result = b2f0(SEND, parm);
2157 if ((!b2f0_result) && (msgid))
2158 *msgid = parm->ipmsgid;
2159 release_param(parm);
2160
2161 iucv_debug(2, "exiting");
2162 return b2f0_result;
2163}
2164
2165void
2166iucv_setmask_cpuid (void *result)
2167{
2168 iparml_set_mask *parm;
2169
2170 iucv_debug(1, "entering");
2171 parm = (iparml_set_mask *)grab_param();
2172 parm->ipmask = *((__u8*)result);
2173 *((ulong *)result) = b2f0(SETMASK, parm);
2174 release_param(parm);
2175
2176 iucv_debug(1, "b2f0_result = %ld", *((ulong *)result));
2177 iucv_debug(1, "exiting");
2178}
2179
2180/*
2181 * Name: iucv_setmask
2182 * Purpose: This function enables or disables the following IUCV
2183 * external interruptions: Nonpriority and priority message
2184 * interrupts, nonpriority and priority reply interrupts.
2185 * Input: SetMaskFlag - options for interrupts
2186 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
2187 * 0x40 - Priority_MessagePendingInterruptsFlag
2188 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
2189 * 0x10 - Priority_MessageCompletionInterruptsFlag
2190 * 0x08 - IUCVControlInterruptsFlag
2191 * Output: NA
2192 * Return: b2f0_result - return code from CP
2193*/
2194int
2195iucv_setmask (int SetMaskFlag)
2196{
2197 union {
2198 ulong result;
2199 __u8 param;
2200 } u;
2201 int cpu;
2202
2203 u.param = SetMaskFlag;
2204 cpu = get_cpu();
2205 smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid);
2206 put_cpu();
2207
2208 return u.result;
2209}
2210
2211/**
2212 * iucv_sever:
2213 * @pathid: Path identification number
2214 * @user_data: 16-byte of user data
2215 *
2216 * This function terminates an iucv path.
2217 * Returns: return code from CP
2218 */
2219int
2220iucv_sever(__u16 pathid, __u8 user_data[16])
2221{
2222 iparml_control *parm;
2223 ulong b2f0_result = 0;
2224
2225 iucv_debug(1, "entering");
2226 parm = (iparml_control *)grab_param();
2227
2228 memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
2229 parm->ippathid = pathid;
2230
2231 b2f0_result = b2f0(SEVER, parm);
2232
2233 if (!b2f0_result)
2234 iucv_remove_pathid(pathid);
2235 release_param(parm);
2236
2237 iucv_debug(1, "exiting");
2238 return b2f0_result;
2239}
2240
2241/*
2242 * Interrupt Handlers
2243 *******************************************************************************/
2244
2245/**
2246 * iucv_irq_handler:
2247 * @regs: Current registers
2248 * @code: irq code
2249 *
2250 * Handles external interrupts coming in from CP.
2251 * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
2252 */
2253static void
2254iucv_irq_handler(__u16 code)
2255{
2256 iucv_irqdata *irqdata;
2257
2258 irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC);
2259 if (!irqdata) {
2260 printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
2261 return;
2262 }
2263
2264 memcpy(&irqdata->data, iucv_external_int_buffer,
2265 sizeof(iucv_GeneralInterrupt));
2266
2267 spin_lock(&iucv_irq_queue_lock);
2268 list_add_tail(&irqdata->queue, &iucv_irq_queue);
2269 spin_unlock(&iucv_irq_queue_lock);
2270
2271 tasklet_schedule(&iucv_tasklet);
2272}
2273
2274/**
2275 * iucv_do_int:
2276 * @int_buf: Pointer to copy of external interrupt buffer
2277 *
2278 * The workhorse for handling interrupts queued by iucv_irq_handler().
2279 * This function is called from the bottom half iucv_tasklet_handler().
2280 */
2281static void
2282iucv_do_int(iucv_GeneralInterrupt * int_buf)
2283{
2284 handler *h = NULL;
2285 struct list_head *lh;
2286 ulong flags;
2287 iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */
2288 __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */
2289 int rc = 0, j = 0;
2290 __u8 no_listener[16] = "NO LISTENER";
2291
2292 iucv_debug(2, "entering, pathid %d, type %02X",
2293 int_buf->ippathid, int_buf->iptype);
2294 iucv_dumpit("External Interrupt Buffer:",
2295 int_buf, sizeof(iucv_GeneralInterrupt));
2296
2297 ASCEBC (no_listener, 16);
2298
2299 if (int_buf->iptype != 01) {
2300 if ((int_buf->ippathid) > (max_connections - 1)) {
2301 printk(KERN_WARNING "%s: Got interrupt with pathid %d"
2302 " > max_connections (%ld)\n", __FUNCTION__,
2303 int_buf->ippathid, max_connections - 1);
2304 } else {
2305 h = iucv_pathid_table[int_buf->ippathid];
2306 interrupt = h->interrupt_table;
2307 iucv_dumpit("Handler:", h, sizeof(handler));
2308 }
2309 }
2310
2311 /* end of if statement */
2312 switch (int_buf->iptype) {
2313 case 0x01: /* connection pending */
2314 if (messagesDisabled) {
2315 iucv_setmask(~0);
2316 messagesDisabled = 0;
2317 }
2318 spin_lock_irqsave(&iucv_lock, flags);
2319 list_for_each(lh, &iucv_handler_table) {
2320 h = list_entry(lh, handler, list);
2321 memcpy(temp_buff1, &(int_buf->ipvmid), 24);
2322 memcpy(temp_buff2, &(h->id.userid), 24);
2323 for (j = 0; j < 24; j++) {
2324 temp_buff1[j] &= (h->id.mask)[j];
2325 temp_buff2[j] &= (h->id.mask)[j];
2326 }
2327
2328 iucv_dumpit("temp_buff1:",
2329 temp_buff1, sizeof(temp_buff1));
2330 iucv_dumpit("temp_buff2",
2331 temp_buff2, sizeof(temp_buff2));
2332
2333 if (!memcmp (temp_buff1, temp_buff2, 24)) {
2334
2335 iucv_debug(2,
2336 "found a matching handler");
2337 break;
2338 } else
2339 h = NULL;
2340 }
2341 spin_unlock_irqrestore (&iucv_lock, flags);
2342 if (h) {
2343 /* ADD PATH TO PATHID TABLE */
2344 rc = iucv_add_pathid(int_buf->ippathid, h);
2345 if (rc) {
2346 iucv_sever (int_buf->ippathid,
2347 no_listener);
2348 iucv_debug(1,
2349 "add_pathid failed, rc = %d",
2350 rc);
2351 } else {
2352 interrupt = h->interrupt_table;
2353 if (interrupt->ConnectionPending) {
2354 EBCASC (int_buf->ipvmid, 8);
2355 interrupt->ConnectionPending(
2356 (iucv_ConnectionPending *)int_buf,
2357 h->pgm_data);
2358 } else
2359 iucv_sever(int_buf->ippathid,
2360 no_listener);
2361 }
2362 } else
2363 iucv_sever(int_buf->ippathid, no_listener);
2364 break;
2365
2366 case 0x02: /*connection complete */
2367 if (messagesDisabled) {
2368 iucv_setmask(~0);
2369 messagesDisabled = 0;
2370 }
2371 if (h) {
2372 if (interrupt->ConnectionComplete)
2373 {
2374 interrupt->ConnectionComplete(
2375 (iucv_ConnectionComplete *)int_buf,
2376 h->pgm_data);
2377 }
2378 else
2379 iucv_debug(1,
2380 "ConnectionComplete not called");
2381 } else
2382 iucv_sever(int_buf->ippathid, no_listener);
2383 break;
2384
2385 case 0x03: /* connection severed */
2386 if (messagesDisabled) {
2387 iucv_setmask(~0);
2388 messagesDisabled = 0;
2389 }
2390 if (h) {
2391 if (interrupt->ConnectionSevered)
2392 interrupt->ConnectionSevered(
2393 (iucv_ConnectionSevered *)int_buf,
2394 h->pgm_data);
2395
2396 else
2397 iucv_sever (int_buf->ippathid, no_listener);
2398 } else
2399 iucv_sever(int_buf->ippathid, no_listener);
2400 break;
2401
2402 case 0x04: /* connection quiesced */
2403 if (messagesDisabled) {
2404 iucv_setmask(~0);
2405 messagesDisabled = 0;
2406 }
2407 if (h) {
2408 if (interrupt->ConnectionQuiesced)
2409 interrupt->ConnectionQuiesced(
2410 (iucv_ConnectionQuiesced *)int_buf,
2411 h->pgm_data);
2412 else
2413 iucv_debug(1,
2414 "ConnectionQuiesced not called");
2415 }
2416 break;
2417
2418 case 0x05: /* connection resumed */
2419 if (messagesDisabled) {
2420 iucv_setmask(~0);
2421 messagesDisabled = 0;
2422 }
2423 if (h) {
2424 if (interrupt->ConnectionResumed)
2425 interrupt->ConnectionResumed(
2426 (iucv_ConnectionResumed *)int_buf,
2427 h->pgm_data);
2428 else
2429 iucv_debug(1,
2430 "ConnectionResumed not called");
2431 }
2432 break;
2433
2434 case 0x06: /* priority message complete */
2435 case 0x07: /* nonpriority message complete */
2436 if (h) {
2437 if (interrupt->MessageComplete)
2438 interrupt->MessageComplete(
2439 (iucv_MessageComplete *)int_buf,
2440 h->pgm_data);
2441 else
2442 iucv_debug(2,
2443 "MessageComplete not called");
2444 }
2445 break;
2446
2447 case 0x08: /* priority message pending */
2448 case 0x09: /* nonpriority message pending */
2449 if (h) {
2450 if (interrupt->MessagePending)
2451 interrupt->MessagePending(
2452 (iucv_MessagePending *) int_buf,
2453 h->pgm_data);
2454 else
2455 iucv_debug(2,
2456 "MessagePending not called");
2457 }
2458 break;
2459 default: /* unknown iucv type */
2460 printk(KERN_WARNING "%s: unknown iucv interrupt\n",
2461 __FUNCTION__);
2462 break;
2463 } /* end switch */
2464
2465 iucv_debug(2, "exiting pathid %d, type %02X",
2466 int_buf->ippathid, int_buf->iptype);
2467
2468 return;
2469}
2470
2471/**
2472 * iucv_tasklet_handler:
2473 *
2474 * This function loops over the queue of irq buffers and runs iucv_do_int()
2475 * on every queue element.
2476 */
2477static void
2478iucv_tasklet_handler(unsigned long ignored)
2479{
2480 struct list_head head;
2481 struct list_head *next;
2482 ulong flags;
2483
2484 spin_lock_irqsave(&iucv_irq_queue_lock, flags);
2485 list_add(&head, &iucv_irq_queue);
2486 list_del_init(&iucv_irq_queue);
2487 spin_unlock_irqrestore (&iucv_irq_queue_lock, flags);
2488
2489 next = head.next;
2490 while (next != &head) {
2491 iucv_irqdata *p = list_entry(next, iucv_irqdata, queue);
2492
2493 next = next->next;
2494 iucv_do_int(&p->data);
2495 kfree(p);
2496 }
2497
2498 return;
2499}
2500
2501subsys_initcall(iucv_init);
2502module_exit(iucv_exit);
2503
2504/**
2505 * Export all public stuff
2506 */
2507EXPORT_SYMBOL (iucv_bus);
2508EXPORT_SYMBOL (iucv_root);
2509EXPORT_SYMBOL (iucv_accept);
2510EXPORT_SYMBOL (iucv_connect);
2511#if 0
2512EXPORT_SYMBOL (iucv_purge);
2513EXPORT_SYMBOL (iucv_query_maxconn);
2514EXPORT_SYMBOL (iucv_query_bufsize);
2515EXPORT_SYMBOL (iucv_quiesce);
2516#endif
2517EXPORT_SYMBOL (iucv_receive);
2518#if 0
2519EXPORT_SYMBOL (iucv_receive_array);
2520#endif
2521EXPORT_SYMBOL (iucv_reject);
2522#if 0
2523EXPORT_SYMBOL (iucv_reply);
2524EXPORT_SYMBOL (iucv_reply_array);
2525EXPORT_SYMBOL (iucv_resume);
2526#endif
2527EXPORT_SYMBOL (iucv_reply_prmmsg);
2528EXPORT_SYMBOL (iucv_send);
2529EXPORT_SYMBOL (iucv_send2way);
2530EXPORT_SYMBOL (iucv_send2way_array);
2531EXPORT_SYMBOL (iucv_send2way_prmmsg);
2532EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
2533#if 0
2534EXPORT_SYMBOL (iucv_send_array);
2535EXPORT_SYMBOL (iucv_send_prmmsg);
2536EXPORT_SYMBOL (iucv_setmask);
2537#endif
2538EXPORT_SYMBOL (iucv_sever);
2539EXPORT_SYMBOL (iucv_register_program);
2540EXPORT_SYMBOL (iucv_unregister_program);
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
deleted file mode 100644
index 5b6b1b7241c9..000000000000
--- a/drivers/s390/net/iucv.h
+++ /dev/null
@@ -1,849 +0,0 @@
1/*
2 * drivers/s390/net/iucv.h
3 * IUCV base support.
4 *
5 * S390 version
6 * Copyright (C) 2000 IBM Corporation
7 * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
8 * Xenia Tkatschow (xenia@us.ibm.com)
9 *
10 *
11 * Functionality:
12 * To explore any of the IUCV functions, one must first register
13 * their program using iucv_register_program(). Once your program has
14 * successfully completed a register, it can exploit the other functions.
15 * For furthur reference on all IUCV functionality, refer to the
16 * CP Programming Services book, also available on the web
17 * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
18 *
19 * Definition of Return Codes
20 * -All positive return codes including zero are reflected back
21 * from CP except for iucv_register_program. The definition of each
22 * return code can be found in CP Programming Services book.
23 * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
24 * - Return Code of:
25 * (-EINVAL) Invalid value
26 * (-ENOMEM) storage allocation failed
27 * pgmask defined in iucv_register_program will be set depending on input
28 * paramters.
29 *
30 */
31
32#include <linux/types.h>
33#include <asm/debug.h>
34
35/**
36 * Debug Facility stuff
37 */
38#define IUCV_DBF_SETUP_NAME "iucv_setup"
39#define IUCV_DBF_SETUP_LEN 32
40#define IUCV_DBF_SETUP_PAGES 2
41#define IUCV_DBF_SETUP_NR_AREAS 1
42#define IUCV_DBF_SETUP_LEVEL 3
43
44#define IUCV_DBF_DATA_NAME "iucv_data"
45#define IUCV_DBF_DATA_LEN 128
46#define IUCV_DBF_DATA_PAGES 2
47#define IUCV_DBF_DATA_NR_AREAS 1
48#define IUCV_DBF_DATA_LEVEL 2
49
50#define IUCV_DBF_TRACE_NAME "iucv_trace"
51#define IUCV_DBF_TRACE_LEN 16
52#define IUCV_DBF_TRACE_PAGES 4
53#define IUCV_DBF_TRACE_NR_AREAS 1
54#define IUCV_DBF_TRACE_LEVEL 3
55
56#define IUCV_DBF_TEXT(name,level,text) \
57 do { \
58 debug_text_event(iucv_dbf_##name,level,text); \
59 } while (0)
60
61#define IUCV_DBF_HEX(name,level,addr,len) \
62 do { \
63 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
64 } while (0)
65
66DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
67
68#define IUCV_DBF_TEXT_(name,level,text...) \
69 do { \
70 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
71 sprintf(iucv_dbf_txt_buf, text); \
72 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
73 put_cpu_var(iucv_dbf_txt_buf); \
74 } while (0)
75
76#define IUCV_DBF_SPRINTF(name,level,text...) \
77 do { \
78 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
79 debug_sprintf_event(iucv_dbf_trace, level, text ); \
80 } while (0)
81
82/**
83 * some more debug stuff
84 */
85#define IUCV_HEXDUMP16(importance,header,ptr) \
86PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
87 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
88 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
89 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
90 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
91 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
92 *(((char*)ptr)+12),*(((char*)ptr)+13), \
93 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
94PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
95 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
96 *(((char*)ptr)+16),*(((char*)ptr)+17), \
97 *(((char*)ptr)+18),*(((char*)ptr)+19), \
98 *(((char*)ptr)+20),*(((char*)ptr)+21), \
99 *(((char*)ptr)+22),*(((char*)ptr)+23), \
100 *(((char*)ptr)+24),*(((char*)ptr)+25), \
101 *(((char*)ptr)+26),*(((char*)ptr)+27), \
102 *(((char*)ptr)+28),*(((char*)ptr)+29), \
103 *(((char*)ptr)+30),*(((char*)ptr)+31));
104
105static inline void
106iucv_hex_dump(unsigned char *buf, size_t len)
107{
108 size_t i;
109
110 for (i = 0; i < len; i++) {
111 if (i && !(i % 16))
112 printk("\n");
113 printk("%02x ", *(buf + i));
114 }
115 printk("\n");
116}
117/**
118 * end of debug stuff
119 */
120
121#define uchar unsigned char
122#define ushort unsigned short
123#define ulong unsigned long
124#define iucv_handle_t void *
125
126/* flags1:
127 * All flags are defined in the field IPFLAGS1 of each function
128 * and can be found in CP Programming Services.
129 * IPLOCAL - Indicates the connect can only be satisfied on the
130 * local system
131 * IPPRTY - Indicates a priority message
132 * IPQUSCE - Indicates you do not want to receive messages on a
133 * path until an iucv_resume is issued
134 * IPRMDATA - Indicates that the message is in the parameter list
135 */
136#define IPLOCAL 0x01
137#define IPPRTY 0x20
138#define IPQUSCE 0x40
139#define IPRMDATA 0x80
140
141/* flags1_out:
142 * All flags are defined in the output field of IPFLAGS1 for each function
143 * and can be found in CP Programming Services.
144 * IPNORPY - Specifies this is a one-way message and no reply is expected.
145 * IPPRTY - Indicates a priority message is permitted. Defined in flags1.
146 */
147#define IPNORPY 0x10
148
149#define Nonpriority_MessagePendingInterruptsFlag 0x80
150#define Priority_MessagePendingInterruptsFlag 0x40
151#define Nonpriority_MessageCompletionInterruptsFlag 0x20
152#define Priority_MessageCompletionInterruptsFlag 0x10
153#define IUCVControlInterruptsFlag 0x08
154#define AllInterrupts 0xf8
155/*
156 * Mapping of external interrupt buffers should be used with the corresponding
157 * interrupt types.
158 * Names: iucv_ConnectionPending -> connection pending
159 * iucv_ConnectionComplete -> connection complete
160 * iucv_ConnectionSevered -> connection severed
161 * iucv_ConnectionQuiesced -> connection quiesced
162 * iucv_ConnectionResumed -> connection resumed
163 * iucv_MessagePending -> message pending
164 * iucv_MessageComplete -> message complete
165 */
166typedef struct {
167 u16 ippathid;
168 uchar ipflags1;
169 uchar iptype;
170 u16 ipmsglim;
171 u16 res1;
172 uchar ipvmid[8];
173 uchar ipuser[16];
174 u32 res3;
175 uchar ippollfg;
176 uchar res4[3];
177} iucv_ConnectionPending;
178
179typedef struct {
180 u16 ippathid;
181 uchar ipflags1;
182 uchar iptype;
183 u16 ipmsglim;
184 u16 res1;
185 uchar res2[8];
186 uchar ipuser[16];
187 u32 res3;
188 uchar ippollfg;
189 uchar res4[3];
190} iucv_ConnectionComplete;
191
192typedef struct {
193 u16 ippathid;
194 uchar res1;
195 uchar iptype;
196 u32 res2;
197 uchar res3[8];
198 uchar ipuser[16];
199 u32 res4;
200 uchar ippollfg;
201 uchar res5[3];
202} iucv_ConnectionSevered;
203
204typedef struct {
205 u16 ippathid;
206 uchar res1;
207 uchar iptype;
208 u32 res2;
209 uchar res3[8];
210 uchar ipuser[16];
211 u32 res4;
212 uchar ippollfg;
213 uchar res5[3];
214} iucv_ConnectionQuiesced;
215
216typedef struct {
217 u16 ippathid;
218 uchar res1;
219 uchar iptype;
220 u32 res2;
221 uchar res3[8];
222 uchar ipuser[16];
223 u32 res4;
224 uchar ippollfg;
225 uchar res5[3];
226} iucv_ConnectionResumed;
227
228typedef struct {
229 u16 ippathid;
230 uchar ipflags1;
231 uchar iptype;
232 u32 ipmsgid;
233 u32 iptrgcls;
234 union u2 {
235 u32 iprmmsg1_u32;
236 uchar iprmmsg1[4];
237 } ln1msg1;
238 union u1 {
239 u32 ipbfln1f;
240 uchar iprmmsg2[4];
241 } ln1msg2;
242 u32 res1[3];
243 u32 ipbfln2f;
244 uchar ippollfg;
245 uchar res2[3];
246} iucv_MessagePending;
247
248typedef struct {
249 u16 ippathid;
250 uchar ipflags1;
251 uchar iptype;
252 u32 ipmsgid;
253 u32 ipaudit;
254 uchar iprmmsg[8];
255 u32 ipsrccls;
256 u32 ipmsgtag;
257 u32 res;
258 u32 ipbfln2f;
259 uchar ippollfg;
260 uchar res2[3];
261} iucv_MessageComplete;
262
263/*
264 * iucv_interrupt_ops_t: Is a vector of functions that handle
265 * IUCV interrupts.
266 * Parameter list:
267 * eib - is a pointer to a 40-byte area described
268 * with one of the structures above.
269 * pgm_data - this data is strictly for the
270 * interrupt handler that is passed by
271 * the application. This may be an address
272 * or token.
273*/
274typedef struct {
275 void (*ConnectionPending) (iucv_ConnectionPending * eib,
276 void *pgm_data);
277 void (*ConnectionComplete) (iucv_ConnectionComplete * eib,
278 void *pgm_data);
279 void (*ConnectionSevered) (iucv_ConnectionSevered * eib,
280 void *pgm_data);
281 void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib,
282 void *pgm_data);
283 void (*ConnectionResumed) (iucv_ConnectionResumed * eib,
284 void *pgm_data);
285 void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data);
286 void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data);
287} iucv_interrupt_ops_t;
288
289/*
290 *iucv_array_t : Defines buffer array.
291 * Inside the array may be 31- bit addresses and 31-bit lengths.
292*/
293typedef struct {
294 u32 address;
295 u32 length;
296} iucv_array_t __attribute__ ((aligned (8)));
297
298extern struct bus_type iucv_bus;
299extern struct device *iucv_root;
300
301/* -prototypes- */
302/*
303 * Name: iucv_register_program
304 * Purpose: Registers an application with IUCV
305 * Input: prmname - user identification
306 * userid - machine identification
307 * pgmmask - indicates which bits in the prmname and userid combined will be
308 * used to determine who is given control
309 * ops - address of vector of interrupt handlers
310 * pgm_data- application data passed to interrupt handlers
311 * Output: NA
312 * Return: address of handler
313 * (0) - Error occurred, registration not completed.
314 * NOTE: Exact cause of failure will be recorded in syslog.
315*/
316iucv_handle_t iucv_register_program (uchar pgmname[16],
317 uchar userid[8],
318 uchar pgmmask[24],
319 iucv_interrupt_ops_t * ops,
320 void *pgm_data);
321
322/*
323 * Name: iucv_unregister_program
324 * Purpose: Unregister application with IUCV
325 * Input: address of handler
326 * Output: NA
327 * Return: (0) - Normal return
328 * (-EINVAL) - Internal error, wild pointer
329*/
330int iucv_unregister_program (iucv_handle_t handle);
331
332/*
333 * Name: iucv_accept
334 * Purpose: This function is issued after the user receives a Connection Pending external
335 * interrupt and now wishes to complete the IUCV communication path.
336 * Input: pathid - u16 , Path identification number
337 * msglim_reqstd - u16, The number of outstanding messages requested.
338 * user_data - uchar[16], Data specified by the iucv_connect function.
339 * flags1 - int, Contains options for this path.
340 * -IPPRTY - 0x20- Specifies if you want to send priority message.
341 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
342 * in the parameter list.
343 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
344 * established.
345 * handle - iucv_handle_t, Address of handler.
346 * pgm_data - void *, Application data passed to interrupt handlers.
347 * flags1_out - int * Contains information about the path
348 * - IPPRTY - 0x20, Indicates you may send priority messages.
349 * msglim - *u16, Number of outstanding messages.
350 * Output: return code from CP IUCV call.
351*/
352
353int iucv_accept (u16 pathid,
354 u16 msglim_reqstd,
355 uchar user_data[16],
356 int flags1,
357 iucv_handle_t handle,
358 void *pgm_data, int *flags1_out, u16 * msglim);
359
360/*
361 * Name: iucv_connect
362 * Purpose: This function establishes an IUCV path. Although the connect may complete
363 * successfully, you are not able to use the path until you receive an IUCV
364 * Connection Complete external interrupt.
365 * Input: pathid - u16 *, Path identification number
366 * msglim_reqstd - u16, Number of outstanding messages requested
367 * user_data - uchar[16], 16-byte user data
368 * userid - uchar[8], User identification
369 * system_name - uchar[8], 8-byte identifying the system name
370 * flags1 - int, Contains options for this path.
371 * -IPPRTY - 0x20, Specifies if you want to send priority message.
372 * -IPRMDATA - 0x80, Specifies whether your program can handle a message
373 * in the parameter list.
374 * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
375 * established.
376 * -IPLOCAL - 0X01, Allows an application to force the partner to be on
377 * the local system. If local is specified then target class cannot be
378 * specified.
379 * flags1_out - int * Contains information about the path
380 * - IPPRTY - 0x20, Indicates you may send priority messages.
381 * msglim - * u16, Number of outstanding messages
382 * handle - iucv_handle_t, Address of handler
383 * pgm_data - void *, Application data passed to interrupt handlers
384 * Output: return code from CP IUCV call
385 * rc - return code from iucv_declare_buffer
386 * -EINVAL - Invalid handle passed by application
387 * -EINVAL - Pathid address is NULL
388 * add_pathid_result - Return code from internal function add_pathid
389*/
390int
391 iucv_connect (u16 * pathid,
392 u16 msglim_reqstd,
393 uchar user_data[16],
394 uchar userid[8],
395 uchar system_name[8],
396 int flags1,
397 int *flags1_out,
398 u16 * msglim, iucv_handle_t handle, void *pgm_data);
399
400/*
401 * Name: iucv_purge
402 * Purpose: This function cancels a message that you have sent.
403 * Input: pathid - Path identification number.
404 * msgid - Specifies the message ID of the message to be purged.
405 * srccls - Specifies the source message class.
406 * Output: audit - Contains information about asynchronous error
407 * that may have affected the normal completion
408 * of this message.
409 * Return: Return code from CP IUCV call.
410*/
411int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit);
412/*
413 * Name: iucv_query_maxconn
414 * Purpose: This function determines the maximum number of communication paths you
415 * may establish.
416 * Return: maxconn - ulong, Maximum number of connection the virtual machine may
417 * establish.
418*/
419ulong iucv_query_maxconn (void);
420
421/*
422 * Name: iucv_query_bufsize
423 * Purpose: This function determines how large an external interrupt
424 * buffer IUCV requires to store information.
425 * Return: bufsize - ulong, Size of external interrupt buffer.
426 */
427ulong iucv_query_bufsize (void);
428
429/*
430 * Name: iucv_quiesce
431 * Purpose: This function temporarily suspends incoming messages on an
432 * IUCV path. You can later reactivate the path by invoking
433 * the iucv_resume function.
434 * Input: pathid - Path identification number
435 * user_data - 16-bytes of user data
436 * Output: NA
437 * Return: Return code from CP IUCV call.
438*/
439int iucv_quiesce (u16 pathid, uchar user_data[16]);
440
441/*
442 * Name: iucv_receive
443 * Purpose: This function receives messages that are being sent to you
444 * over established paths. Data will be returned in buffer for length of
445 * buflen.
446 * Input:
447 * pathid - Path identification number.
448 * buffer - Address of buffer to receive.
449 * buflen - Length of buffer to receive.
450 * msgid - Specifies the message ID.
451 * trgcls - Specifies target class.
452 * Output:
453 * flags1_out: int *, Contains information about this path.
454 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
455 * expected.
456 * IPPRTY - 0x20 Specifies if you want to send priority message.
457 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
458 * residual_buffer - address of buffer updated by the number
459 * of bytes you have received.
460 * residual_length -
461 * Contains one of the following values, if the receive buffer is:
462 * The same length as the message, this field is zero.
463 * Longer than the message, this field contains the number of
464 * bytes remaining in the buffer.
465 * Shorter than the message, this field contains the residual
466 * count (that is, the number of bytes remaining in the
467 * message that does not fit into the buffer. In this
468 * case b2f0_result = 5.
469 * Return: Return code from CP IUCV call.
470 * (-EINVAL) - buffer address is pointing to NULL
471*/
472int iucv_receive (u16 pathid,
473 u32 msgid,
474 u32 trgcls,
475 void *buffer,
476 ulong buflen,
477 int *flags1_out,
478 ulong * residual_buffer, ulong * residual_length);
479
480 /*
481 * Name: iucv_receive_array
482 * Purpose: This function receives messages that are being sent to you
483 * over established paths. Data will be returned in first buffer for
484 * length of first buffer.
485 * Input: pathid - Path identification number.
486 * msgid - specifies the message ID.
487 * trgcls - Specifies target class.
488 * buffer - Address of array of buffers.
489 * buflen - Total length of buffers.
490 * Output:
491 * flags1_out: int *, Contains information about this path.
492 * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
493 * expected.
494 * IPPRTY - 0x20 Specifies if you want to send priority message.
495 * IPRMDATA - 0x80 specifies the data is contained in the parameter list
496 * residual_buffer - address points to the current list entry IUCV
497 * is working on.
498 * residual_length -
499 * Contains one of the following values, if the receive buffer is:
500 * The same length as the message, this field is zero.
501 * Longer than the message, this field contains the number of
502 * bytes remaining in the buffer.
503 * Shorter than the message, this field contains the residual
504 * count (that is, the number of bytes remaining in the
505 * message that does not fit into the buffer. In this
506 * case b2f0_result = 5.
507 * Return: Return code from CP IUCV call.
508 * (-EINVAL) - Buffer address is NULL.
509 */
510int iucv_receive_array (u16 pathid,
511 u32 msgid,
512 u32 trgcls,
513 iucv_array_t * buffer,
514 ulong buflen,
515 int *flags1_out,
516 ulong * residual_buffer, ulong * residual_length);
517
518/*
519 * Name: iucv_reject
520 * Purpose: The reject function refuses a specified message. Between the
521 * time you are notified of a message and the time that you
522 * complete the message, the message may be rejected.
523 * Input: pathid - Path identification number.
524 * msgid - Specifies the message ID.
525 * trgcls - Specifies target class.
526 * Output: NA
527 * Return: Return code from CP IUCV call.
528*/
529int iucv_reject (u16 pathid, u32 msgid, u32 trgcls);
530
531/*
532 * Name: iucv_reply
533 * Purpose: This function responds to the two-way messages that you
534 * receive. You must identify completely the message to
535 * which you wish to reply. ie, pathid, msgid, and trgcls.
536 * Input: pathid - Path identification number.
537 * msgid - Specifies the message ID.
538 * trgcls - Specifies target class.
539 * flags1 - Option for path.
540 * IPPRTY- 0x20, Specifies if you want to send priority message.
541 * buffer - Address of reply buffer.
542 * buflen - Length of reply buffer.
543 * Output: residual_buffer - Address of buffer updated by the number
544 * of bytes you have moved.
545 * residual_length - Contains one of the following values:
546 * If the answer buffer is the same length as the reply, this field
547 * contains zero.
548 * If the answer buffer is longer than the reply, this field contains
549 * the number of bytes remaining in the buffer.
550 * If the answer buffer is shorter than the reply, this field contains
551 * a residual count (that is, the number of bytes remianing in the
552 * reply that does not fit into the buffer. In this
553 * case b2f0_result = 5.
554 * Return: Return code from CP IUCV call.
555 * (-EINVAL) - Buffer address is NULL.
556*/
557int iucv_reply (u16 pathid,
558 u32 msgid,
559 u32 trgcls,
560 int flags1,
561 void *buffer, ulong buflen, ulong * residual_buffer,
562 ulong * residual_length);
563
564/*
565 * Name: iucv_reply_array
566 * Purpose: This function responds to the two-way messages that you
567 * receive. You must identify completely the message to
568 * which you wish to reply. ie, pathid, msgid, and trgcls.
569 * The array identifies a list of addresses and lengths of
570 * discontiguous buffers that contains the reply data.
571 * Input: pathid - Path identification number
572 * msgid - Specifies the message ID.
573 * trgcls - Specifies target class.
574 * flags1 - Option for path.
575 * IPPRTY- 0x20, Specifies if you want to send priority message.
576 * buffer - Address of array of reply buffers.
577 * buflen - Total length of reply buffers.
578 * Output: residual_buffer - Address of buffer which IUCV is currently working on.
579 * residual_length - Contains one of the following values:
580 * If the answer buffer is the same length as the reply, this field
581 * contains zero.
582 * If the answer buffer is longer than the reply, this field contains
583 * the number of bytes remaining in the buffer.
584 * If the answer buffer is shorter than the reply, this field contains
585 * a residual count (that is, the number of bytes remianing in the
586 * reply that does not fit into the buffer. In this
587 * case b2f0_result = 5.
588 * Return: Return code from CP IUCV call.
589 * (-EINVAL) - Buffer address is NULL.
590*/
591int iucv_reply_array (u16 pathid,
592 u32 msgid,
593 u32 trgcls,
594 int flags1,
595 iucv_array_t * buffer,
596 ulong buflen, ulong * residual_address,
597 ulong * residual_length);
598
599/*
600 * Name: iucv_reply_prmmsg
601 * Purpose: This function responds to the two-way messages that you
602 * receive. You must identify completely the message to
603 * which you wish to reply. ie, pathid, msgid, and trgcls.
604 * Prmmsg signifies the data is moved into the
605 * parameter list.
606 * Input: pathid - Path identification number.
607 * msgid - Specifies the message ID.
608 * trgcls - Specifies target class.
609 * flags1 - Option for path.
610 * IPPRTY- 0x20 Specifies if you want to send priority message.
611 * prmmsg - 8-bytes of data to be placed into the parameter.
612 * list.
613 * Output: NA
614 * Return: Return code from CP IUCV call.
615*/
616int iucv_reply_prmmsg (u16 pathid,
617 u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]);
618
619/*
620 * Name: iucv_resume
621 * Purpose: This function restores communications over a quiesced path
622 * Input: pathid - Path identification number.
623 * user_data - 16-bytes of user data.
624 * Output: NA
625 * Return: Return code from CP IUCV call.
626*/
627int iucv_resume (u16 pathid, uchar user_data[16]);
628
629/*
630 * Name: iucv_send
631 * Purpose: This function transmits data to another application.
632 * Data to be transmitted is in a buffer and this is a
633 * one-way message and the receiver will not reply to the
634 * message.
635 * Input: pathid - Path identification number.
636 * trgcls - Specifies target class.
637 * srccls - Specifies the source message class.
638 * msgtag - Specifies a tag to be associated with the message.
639 * flags1 - Option for path.
640 * IPPRTY- 0x20 Specifies if you want to send priority message.
641 * buffer - Address of send buffer.
642 * buflen - Length of send buffer.
643 * Output: msgid - Specifies the message ID.
644 * Return: Return code from CP IUCV call.
645 * (-EINVAL) - Buffer address is NULL.
646*/
647int iucv_send (u16 pathid,
648 u32 * msgid,
649 u32 trgcls,
650 u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen);
651
652/*
653 * Name: iucv_send_array
654 * Purpose: This function transmits data to another application.
655 * The contents of buffer is the address of the array of
656 * addresses and lengths of discontiguous buffers that hold
657 * the message text. This is a one-way message and the
658 * receiver will not reply to the message.
659 * Input: pathid - Path identification number.
660 * trgcls - Specifies target class.
661 * srccls - Specifies the source message class.
662 * msgtag - Specifies a tag to be associated witht the message.
663 * flags1 - Option for path.
664 * IPPRTY- specifies if you want to send priority message.
665 * buffer - Address of array of send buffers.
666 * buflen - Total length of send buffers.
667 * Output: msgid - Specifies the message ID.
668 * Return: Return code from CP IUCV call.
669 * (-EINVAL) - Buffer address is NULL.
670*/
671int iucv_send_array (u16 pathid,
672 u32 * msgid,
673 u32 trgcls,
674 u32 srccls,
675 u32 msgtag,
676 int flags1, iucv_array_t * buffer, ulong buflen);
677
678/*
679 * Name: iucv_send_prmmsg
680 * Purpose: This function transmits data to another application.
681 * Prmmsg specifies that the 8-bytes of data are to be moved
682 * into the parameter list. This is a one-way message and the
683 * receiver will not reply to the message.
684 * Input: pathid - Path identification number.
685 * trgcls - Specifies target class.
686 * srccls - Specifies the source message class.
687 * msgtag - Specifies a tag to be associated with the message.
688 * flags1 - Option for path.
689 * IPPRTY- 0x20 specifies if you want to send priority message.
690 * prmmsg - 8-bytes of data to be placed into parameter list.
691 * Output: msgid - Specifies the message ID.
692 * Return: Return code from CP IUCV call.
693*/
694int iucv_send_prmmsg (u16 pathid,
695 u32 * msgid,
696 u32 trgcls,
697 u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]);
698
699/*
700 * Name: iucv_send2way
701 * Purpose: This function transmits data to another application.
702 * Data to be transmitted is in a buffer. The receiver
703 * of the send is expected to reply to the message and
704 * a buffer is provided into which IUCV moves the reply
705 * to this message.
706 * Input: pathid - Path identification number.
707 * trgcls - Specifies target class.
708 * srccls - Specifies the source message class.
709 * msgtag - Specifies a tag associated with the message.
710 * flags1 - Option for path.
711 * IPPRTY- 0x20 Specifies if you want to send priority message.
712 * buffer - Address of send buffer.
713 * buflen - Length of send buffer.
714 * ansbuf - Address of buffer into which IUCV moves the reply of
715 * this message.
716 * anslen - Address of length of buffer.
717 * Output: msgid - Specifies the message ID.
718 * Return: Return code from CP IUCV call.
719 * (-EINVAL) - Buffer or ansbuf address is NULL.
720*/
721int iucv_send2way (u16 pathid,
722 u32 * msgid,
723 u32 trgcls,
724 u32 srccls,
725 u32 msgtag,
726 int flags1,
727 void *buffer, ulong buflen, void *ansbuf, ulong anslen);
728
729/*
730 * Name: iucv_send2way_array
731 * Purpose: This function transmits data to another application.
732 * The contents of buffer is the address of the array of
733 * addresses and lengths of discontiguous buffers that hold
734 * the message text. The receiver of the send is expected to
735 * reply to the message and a buffer is provided into which
736 * IUCV moves the reply to this message.
737 * Input: pathid - Path identification number.
738 * trgcls - Specifies target class.
739 * srccls - Specifies the source message class.
740 * msgtag - Specifies a tag to be associated with the message.
741 * flags1 - Option for path.
742 * IPPRTY- 0x20 Specifies if you want to send priority message.
743 * buffer - Sddress of array of send buffers.
744 * buflen - Total length of send buffers.
745 * ansbuf - Address of array of buffer into which IUCV moves the reply
746 * of this message.
747 * anslen - Address of length reply buffers.
748 * Output: msgid - Specifies the message ID.
749 * Return: Return code from CP IUCV call.
750 * (-EINVAL) - Buffer address is NULL.
751*/
752int iucv_send2way_array (u16 pathid,
753 u32 * msgid,
754 u32 trgcls,
755 u32 srccls,
756 u32 msgtag,
757 int flags1,
758 iucv_array_t * buffer,
759 ulong buflen, iucv_array_t * ansbuf, ulong anslen);
760
761/*
762 * Name: iucv_send2way_prmmsg
763 * Purpose: This function transmits data to another application.
764 * Prmmsg specifies that the 8-bytes of data are to be moved
765 * into the parameter list. This is a two-way message and the
766 * receiver of the message is expected to reply. A buffer
767 * is provided into which IUCV moves the reply to this
768 * message.
769 * Input: pathid - Rath identification number.
770 * trgcls - Specifies target class.
771 * srccls - Specifies the source message class.
772 * msgtag - Specifies a tag to be associated with the message.
773 * flags1 - Option for path.
774 * IPPRTY- 0x20 Specifies if you want to send priority message.
775 * prmmsg - 8-bytes of data to be placed in parameter list.
776 * ansbuf - Address of buffer into which IUCV moves the reply of
777 * this message.
778 * anslen - Address of length of buffer.
779 * Output: msgid - Specifies the message ID.
780 * Return: Return code from CP IUCV call.
781 * (-EINVAL) - Buffer address is NULL.
782*/
783int iucv_send2way_prmmsg (u16 pathid,
784 u32 * msgid,
785 u32 trgcls,
786 u32 srccls,
787 u32 msgtag,
788 ulong flags1,
789 uchar prmmsg[8], void *ansbuf, ulong anslen);
790
791/*
792 * Name: iucv_send2way_prmmsg_array
793 * Purpose: This function transmits data to another application.
794 * Prmmsg specifies that the 8-bytes of data are to be moved
795 * into the parameter list. This is a two-way message and the
796 * receiver of the message is expected to reply. A buffer
797 * is provided into which IUCV moves the reply to this
798 * message. The contents of ansbuf is the address of the
799 * array of addresses and lengths of discontiguous buffers
800 * that contain the reply.
801 * Input: pathid - Path identification number.
802 * trgcls - Specifies target class.
803 * srccls - Specifies the source message class.
804 * msgtag - Specifies a tag to be associated with the message.
805 * flags1 - Option for path.
806 * IPPRTY- 0x20 specifies if you want to send priority message.
807 * prmmsg - 8-bytes of data to be placed into the parameter list.
808 * ansbuf - Address of array of buffer into which IUCV moves the reply
809 * of this message.
810 * anslen - Address of length of reply buffers.
811 * Output: msgid - Specifies the message ID.
812 * Return: Return code from CP IUCV call.
813 * (-EINVAL) - Ansbuf address is NULL.
814*/
815int iucv_send2way_prmmsg_array (u16 pathid,
816 u32 * msgid,
817 u32 trgcls,
818 u32 srccls,
819 u32 msgtag,
820 int flags1,
821 uchar prmmsg[8],
822 iucv_array_t * ansbuf, ulong anslen);
823
824/*
825 * Name: iucv_setmask
826 * Purpose: This function enables or disables the following IUCV
827 * external interruptions: Nonpriority and priority message
828 * interrupts, nonpriority and priority reply interrupts.
829 * Input: SetMaskFlag - options for interrupts
830 * 0x80 - Nonpriority_MessagePendingInterruptsFlag
831 * 0x40 - Priority_MessagePendingInterruptsFlag
832 * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
833 * 0x10 - Priority_MessageCompletionInterruptsFlag
834 * 0x08 - IUCVControlInterruptsFlag
835 * Output: NA
836 * Return: Return code from CP IUCV call.
837*/
838int iucv_setmask (int SetMaskFlag);
839
840/*
841 * Name: iucv_sever
842 * Purpose: This function terminates an IUCV path.
843 * Input: pathid - Path identification number.
844 * user_data - 16-bytes of user data.
845 * Output: NA
846 * Return: Return code from CP IUCV call.
847 * (-EINVAL) - Interal error, wild pointer.
848*/
849int iucv_sever (u16 pathid, uchar user_data[16]);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index e5665b6743a1..b97dd15bdb9a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
828/** 828/**
829 * Emit buffer of a lan comand. 829 * Emit buffer of a lan comand.
830 */ 830 */
831void 831static void
832lcs_lancmd_timeout(unsigned long data) 832lcs_lancmd_timeout(unsigned long data)
833{ 833{
834 struct lcs_reply *reply, *list_reply, *r; 834 struct lcs_reply *reply, *list_reply, *r;
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1360 return 0; 1360 return 0;
1361} 1361}
1362 1362
1363void 1363static void
1364lcs_schedule_recovery(struct lcs_card *card) 1364lcs_schedule_recovery(struct lcs_card *card)
1365{ 1365{
1366 LCS_DBF_TEXT(2, trace, "startrec"); 1366 LCS_DBF_TEXT(2, trace, "startrec");
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
1990 1990
1991} 1991}
1992 1992
1993DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 1993static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
1994 1994
1995static ssize_t 1995static ssize_t
1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index d7d1cc0a5c8e..6387b483f2bf 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * IUCV network driver 2 * IUCV network driver
3 * 3 *
4 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation 4 * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) 5 * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6 * 6 *
7 * Sysfs integration and all bugs therein by Cornelia Huck 7 * Sysfs integration and all bugs therein by Cornelia Huck
@@ -58,13 +58,94 @@
58#include <asm/io.h> 58#include <asm/io.h>
59#include <asm/uaccess.h> 59#include <asm/uaccess.h>
60 60
61#include "iucv.h" 61#include <net/iucv/iucv.h>
62#include "fsm.h" 62#include "fsm.h"
63 63
64MODULE_AUTHOR 64MODULE_AUTHOR
65 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); 65 ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
66MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); 66MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
67 67
68/**
69 * Debug Facility stuff
70 */
71#define IUCV_DBF_SETUP_NAME "iucv_setup"
72#define IUCV_DBF_SETUP_LEN 32
73#define IUCV_DBF_SETUP_PAGES 2
74#define IUCV_DBF_SETUP_NR_AREAS 1
75#define IUCV_DBF_SETUP_LEVEL 3
76
77#define IUCV_DBF_DATA_NAME "iucv_data"
78#define IUCV_DBF_DATA_LEN 128
79#define IUCV_DBF_DATA_PAGES 2
80#define IUCV_DBF_DATA_NR_AREAS 1
81#define IUCV_DBF_DATA_LEVEL 2
82
83#define IUCV_DBF_TRACE_NAME "iucv_trace"
84#define IUCV_DBF_TRACE_LEN 16
85#define IUCV_DBF_TRACE_PAGES 4
86#define IUCV_DBF_TRACE_NR_AREAS 1
87#define IUCV_DBF_TRACE_LEVEL 3
88
89#define IUCV_DBF_TEXT(name,level,text) \
90 do { \
91 debug_text_event(iucv_dbf_##name,level,text); \
92 } while (0)
93
94#define IUCV_DBF_HEX(name,level,addr,len) \
95 do { \
96 debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
97 } while (0)
98
99DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
100
101#define IUCV_DBF_TEXT_(name,level,text...) \
102 do { \
103 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
104 sprintf(iucv_dbf_txt_buf, text); \
105 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
106 put_cpu_var(iucv_dbf_txt_buf); \
107 } while (0)
108
109#define IUCV_DBF_SPRINTF(name,level,text...) \
110 do { \
111 debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
112 debug_sprintf_event(iucv_dbf_trace, level, text ); \
113 } while (0)
114
115/**
116 * some more debug stuff
117 */
118#define IUCV_HEXDUMP16(importance,header,ptr) \
119PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
120 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
121 *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
122 *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
123 *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
124 *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
125 *(((char*)ptr)+12),*(((char*)ptr)+13), \
126 *(((char*)ptr)+14),*(((char*)ptr)+15)); \
127PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
128 "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
129 *(((char*)ptr)+16),*(((char*)ptr)+17), \
130 *(((char*)ptr)+18),*(((char*)ptr)+19), \
131 *(((char*)ptr)+20),*(((char*)ptr)+21), \
132 *(((char*)ptr)+22),*(((char*)ptr)+23), \
133 *(((char*)ptr)+24),*(((char*)ptr)+25), \
134 *(((char*)ptr)+26),*(((char*)ptr)+27), \
135 *(((char*)ptr)+28),*(((char*)ptr)+29), \
136 *(((char*)ptr)+30),*(((char*)ptr)+31));
137
138static inline void iucv_hex_dump(unsigned char *buf, size_t len)
139{
140 size_t i;
141
142 for (i = 0; i < len; i++) {
143 if (i && !(i % 16))
144 printk("\n");
145 printk("%02x ", *(buf + i));
146 }
147 printk("\n");
148}
68 149
69#define PRINTK_HEADER " iucv: " /* for debugging */ 150#define PRINTK_HEADER " iucv: " /* for debugging */
70 151
@@ -73,6 +154,25 @@ static struct device_driver netiucv_driver = {
73 .bus = &iucv_bus, 154 .bus = &iucv_bus,
74}; 155};
75 156
157static int netiucv_callback_connreq(struct iucv_path *,
158 u8 ipvmid[8], u8 ipuser[16]);
159static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
160static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
161static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
162static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
163static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
164static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
165
166static struct iucv_handler netiucv_handler = {
167 .path_pending = netiucv_callback_connreq,
168 .path_complete = netiucv_callback_connack,
169 .path_severed = netiucv_callback_connrej,
170 .path_quiesced = netiucv_callback_connsusp,
171 .path_resumed = netiucv_callback_connres,
172 .message_pending = netiucv_callback_rx,
173 .message_complete = netiucv_callback_txdone
174};
175
76/** 176/**
77 * Per connection profiling data 177 * Per connection profiling data
78 */ 178 */
@@ -92,9 +192,8 @@ struct connection_profile {
92 * Representation of one iucv connection 192 * Representation of one iucv connection
93 */ 193 */
94struct iucv_connection { 194struct iucv_connection {
95 struct iucv_connection *next; 195 struct list_head list;
96 iucv_handle_t handle; 196 struct iucv_path *path;
97 __u16 pathid;
98 struct sk_buff *rx_buff; 197 struct sk_buff *rx_buff;
99 struct sk_buff *tx_buff; 198 struct sk_buff *tx_buff;
100 struct sk_buff_head collect_queue; 199 struct sk_buff_head collect_queue;
@@ -112,12 +211,9 @@ struct iucv_connection {
112/** 211/**
113 * Linked list of all connection structs. 212 * Linked list of all connection structs.
114 */ 213 */
115struct iucv_connection_struct { 214static struct list_head iucv_connection_list =
116 struct iucv_connection *iucv_connections; 215 LIST_HEAD_INIT(iucv_connection_list);
117 rwlock_t iucv_rwlock; 216static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
118};
119
120static struct iucv_connection_struct iucv_conns;
121 217
122/** 218/**
123 * Representation of event-data for the 219 * Representation of event-data for the
@@ -142,11 +238,11 @@ struct netiucv_priv {
142/** 238/**
143 * Link level header for a packet. 239 * Link level header for a packet.
144 */ 240 */
145typedef struct ll_header_t { 241struct ll_header {
146 __u16 next; 242 u16 next;
147} ll_header; 243};
148 244
149#define NETIUCV_HDRLEN (sizeof(ll_header)) 245#define NETIUCV_HDRLEN (sizeof(struct ll_header))
150#define NETIUCV_BUFSIZE_MAX 32768 246#define NETIUCV_BUFSIZE_MAX 32768
151#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX 247#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
152#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) 248#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
@@ -158,36 +254,26 @@ typedef struct ll_header_t {
158 * Compatibility macros for busy handling 254 * Compatibility macros for busy handling
159 * of network devices. 255 * of network devices.
160 */ 256 */
161static __inline__ void netiucv_clear_busy(struct net_device *dev) 257static inline void netiucv_clear_busy(struct net_device *dev)
162{ 258{
163 clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy)); 259 struct netiucv_priv *priv = netdev_priv(dev);
260 clear_bit(0, &priv->tbusy);
164 netif_wake_queue(dev); 261 netif_wake_queue(dev);
165} 262}
166 263
167static __inline__ int netiucv_test_and_set_busy(struct net_device *dev) 264static inline int netiucv_test_and_set_busy(struct net_device *dev)
168{ 265{
266 struct netiucv_priv *priv = netdev_priv(dev);
169 netif_stop_queue(dev); 267 netif_stop_queue(dev);
170 return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy); 268 return test_and_set_bit(0, &priv->tbusy);
171} 269}
172 270
173static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 271static u8 iucvMagic[16] = {
174static __u8 iucvMagic[16] = {
175 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 272 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
176 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 273 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
177}; 274};
178 275
179/** 276/**
180 * This mask means the 16-byte IUCV "magic" and the origin userid must
181 * match exactly as specified in order to give connection_pending()
182 * control.
183 */
184static __u8 netiucv_mask[] = {
185 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
186 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
187 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
188};
189
190/**
191 * Convert an iucv userId to its printable 277 * Convert an iucv userId to its printable
192 * form (strip whitespace at end). 278 * form (strip whitespace at end).
193 * 279 *
@@ -195,8 +281,7 @@ static __u8 netiucv_mask[] = {
195 * 281 *
196 * @returns The printable string (static data!!) 282 * @returns The printable string (static data!!)
197 */ 283 */
198static __inline__ char * 284static inline char *netiucv_printname(char *name)
199netiucv_printname(char *name)
200{ 285{
201 static char tmp[9]; 286 static char tmp[9];
202 char *p = tmp; 287 char *p = tmp;
@@ -379,8 +464,7 @@ static debug_info_t *iucv_dbf_trace = NULL;
379 464
380DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); 465DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
381 466
382static void 467static void iucv_unregister_dbf_views(void)
383iucv_unregister_dbf_views(void)
384{ 468{
385 if (iucv_dbf_setup) 469 if (iucv_dbf_setup)
386 debug_unregister(iucv_dbf_setup); 470 debug_unregister(iucv_dbf_setup);
@@ -389,8 +473,7 @@ iucv_unregister_dbf_views(void)
389 if (iucv_dbf_trace) 473 if (iucv_dbf_trace)
390 debug_unregister(iucv_dbf_trace); 474 debug_unregister(iucv_dbf_trace);
391} 475}
392static int 476static int iucv_register_dbf_views(void)
393iucv_register_dbf_views(void)
394{ 477{
395 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, 478 iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
396 IUCV_DBF_SETUP_PAGES, 479 IUCV_DBF_SETUP_PAGES,
@@ -422,125 +505,111 @@ iucv_register_dbf_views(void)
422 return 0; 505 return 0;
423} 506}
424 507
425/** 508/*
426 * Callback-wrappers, called from lowlevel iucv layer. 509 * Callback-wrappers, called from lowlevel iucv layer.
427 *****************************************************************************/ 510 */
428 511
429static void 512static void netiucv_callback_rx(struct iucv_path *path,
430netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data) 513 struct iucv_message *msg)
431{ 514{
432 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 515 struct iucv_connection *conn = path->private;
433 struct iucv_event ev; 516 struct iucv_event ev;
434 517
435 ev.conn = conn; 518 ev.conn = conn;
436 ev.data = (void *)eib; 519 ev.data = msg;
437
438 fsm_event(conn->fsm, CONN_EVENT_RX, &ev); 520 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
439} 521}
440 522
441static void 523static void netiucv_callback_txdone(struct iucv_path *path,
442netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data) 524 struct iucv_message *msg)
443{ 525{
444 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 526 struct iucv_connection *conn = path->private;
445 struct iucv_event ev; 527 struct iucv_event ev;
446 528
447 ev.conn = conn; 529 ev.conn = conn;
448 ev.data = (void *)eib; 530 ev.data = msg;
449 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); 531 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
450} 532}
451 533
452static void 534static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
453netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
454{ 535{
455 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 536 struct iucv_connection *conn = path->private;
456 struct iucv_event ev;
457 537
458 ev.conn = conn; 538 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
459 ev.data = (void *)eib;
460 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
461} 539}
462 540
463static void 541static int netiucv_callback_connreq(struct iucv_path *path,
464netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data) 542 u8 ipvmid[8], u8 ipuser[16])
465{ 543{
466 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 544 struct iucv_connection *conn = path->private;
467 struct iucv_event ev; 545 struct iucv_event ev;
546 int rc;
468 547
469 ev.conn = conn; 548 if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
470 ev.data = (void *)eib; 549 /* ipuser must match iucvMagic. */
471 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); 550 return -EINVAL;
551 rc = -EINVAL;
552 read_lock_bh(&iucv_connection_rwlock);
553 list_for_each_entry(conn, &iucv_connection_list, list) {
554 if (strncmp(ipvmid, conn->userid, 8))
555 continue;
556 /* Found a matching connection for this path. */
557 conn->path = path;
558 ev.conn = conn;
559 ev.data = path;
560 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
561 rc = 0;
562 }
563 read_unlock_bh(&iucv_connection_rwlock);
564 return rc;
472} 565}
473 566
474static void 567static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
475netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
476{ 568{
477 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 569 struct iucv_connection *conn = path->private;
478 struct iucv_event ev;
479 570
480 ev.conn = conn; 571 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
481 ev.data = (void *)eib;
482 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
483} 572}
484 573
485static void 574static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
486netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
487{ 575{
488 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 576 struct iucv_connection *conn = path->private;
489 struct iucv_event ev;
490 577
491 ev.conn = conn; 578 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
492 ev.data = (void *)eib;
493 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
494} 579}
495 580
496static void 581static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
497netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
498{ 582{
499 struct iucv_connection *conn = (struct iucv_connection *)pgm_data; 583 struct iucv_connection *conn = path->private;
500 struct iucv_event ev;
501 584
502 ev.conn = conn; 585 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
503 ev.data = (void *)eib; 586}
504 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
505}
506
507static iucv_interrupt_ops_t netiucv_ops = {
508 .ConnectionPending = netiucv_callback_connreq,
509 .ConnectionComplete = netiucv_callback_connack,
510 .ConnectionSevered = netiucv_callback_connrej,
511 .ConnectionQuiesced = netiucv_callback_connsusp,
512 .ConnectionResumed = netiucv_callback_connres,
513 .MessagePending = netiucv_callback_rx,
514 .MessageComplete = netiucv_callback_txdone
515};
516 587
517/** 588/**
518 * Dummy NOP action for all statemachines 589 * Dummy NOP action for all statemachines
519 */ 590 */
520static void 591static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
521fsm_action_nop(fsm_instance *fi, int event, void *arg)
522{ 592{
523} 593}
524 594
525/** 595/*
526 * Actions of the connection statemachine 596 * Actions of the connection statemachine
527 *****************************************************************************/ 597 */
528 598
529/** 599/**
530 * Helper function for conn_action_rx() 600 * netiucv_unpack_skb
531 * Unpack a just received skb and hand it over to 601 * @conn: The connection where this skb has been received.
532 * upper layers. 602 * @pskb: The received skb.
533 * 603 *
534 * @param conn The connection where this skb has been received. 604 * Unpack a just received skb and hand it over to upper layers.
535 * @param pskb The received skb. 605 * Helper function for conn_action_rx.
536 */ 606 */
537//static __inline__ void 607static void netiucv_unpack_skb(struct iucv_connection *conn,
538static void 608 struct sk_buff *pskb)
539netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
540{ 609{
541 struct net_device *dev = conn->netdev; 610 struct net_device *dev = conn->netdev;
542 struct netiucv_priv *privptr = dev->priv; 611 struct netiucv_priv *privptr = netdev_priv(dev);
543 __u16 offset = 0; 612 u16 offset = 0;
544 613
545 skb_put(pskb, NETIUCV_HDRLEN); 614 skb_put(pskb, NETIUCV_HDRLEN);
546 pskb->dev = dev; 615 pskb->dev = dev;
@@ -549,7 +618,7 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
549 618
550 while (1) { 619 while (1) {
551 struct sk_buff *skb; 620 struct sk_buff *skb;
552 ll_header *header = (ll_header *)pskb->data; 621 struct ll_header *header = (struct ll_header *) pskb->data;
553 622
554 if (!header->next) 623 if (!header->next)
555 break; 624 break;
@@ -595,40 +664,37 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
595 } 664 }
596} 665}
597 666
598static void 667static void conn_action_rx(fsm_instance *fi, int event, void *arg)
599conn_action_rx(fsm_instance *fi, int event, void *arg)
600{ 668{
601 struct iucv_event *ev = (struct iucv_event *)arg; 669 struct iucv_event *ev = arg;
602 struct iucv_connection *conn = ev->conn; 670 struct iucv_connection *conn = ev->conn;
603 iucv_MessagePending *eib = (iucv_MessagePending *)ev->data; 671 struct iucv_message *msg = ev->data;
604 struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv; 672 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
605
606 __u32 msglen = eib->ln1msg2.ipbfln1f;
607 int rc; 673 int rc;
608 674
609 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 675 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
610 676
611 if (!conn->netdev) { 677 if (!conn->netdev) {
612 /* FRITZ: How to tell iucv LL to drop the msg? */ 678 iucv_message_reject(conn->path, msg);
613 PRINT_WARN("Received data for unlinked connection\n"); 679 PRINT_WARN("Received data for unlinked connection\n");
614 IUCV_DBF_TEXT(data, 2, 680 IUCV_DBF_TEXT(data, 2,
615 "Received data for unlinked connection\n"); 681 "Received data for unlinked connection\n");
616 return; 682 return;
617 } 683 }
618 if (msglen > conn->max_buffsize) { 684 if (msg->length > conn->max_buffsize) {
619 /* FRITZ: How to tell iucv LL to drop the msg? */ 685 iucv_message_reject(conn->path, msg);
620 privptr->stats.rx_dropped++; 686 privptr->stats.rx_dropped++;
621 PRINT_WARN("msglen %d > max_buffsize %d\n", 687 PRINT_WARN("msglen %d > max_buffsize %d\n",
622 msglen, conn->max_buffsize); 688 msg->length, conn->max_buffsize);
623 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", 689 IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
624 msglen, conn->max_buffsize); 690 msg->length, conn->max_buffsize);
625 return; 691 return;
626 } 692 }
627 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; 693 conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
628 conn->rx_buff->len = 0; 694 conn->rx_buff->len = 0;
629 rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls, 695 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
630 conn->rx_buff->data, msglen, NULL, NULL, NULL); 696 msg->length, NULL);
631 if (rc || msglen < 5) { 697 if (rc || msg->length < 5) {
632 privptr->stats.rx_errors++; 698 privptr->stats.rx_errors++;
633 PRINT_WARN("iucv_receive returned %08x\n", rc); 699 PRINT_WARN("iucv_receive returned %08x\n", rc);
634 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); 700 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
@@ -637,26 +703,26 @@ conn_action_rx(fsm_instance *fi, int event, void *arg)
637 netiucv_unpack_skb(conn, conn->rx_buff); 703 netiucv_unpack_skb(conn, conn->rx_buff);
638} 704}
639 705
640static void 706static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
641conn_action_txdone(fsm_instance *fi, int event, void *arg)
642{ 707{
643 struct iucv_event *ev = (struct iucv_event *)arg; 708 struct iucv_event *ev = arg;
644 struct iucv_connection *conn = ev->conn; 709 struct iucv_connection *conn = ev->conn;
645 iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; 710 struct iucv_message *msg = ev->data;
711 struct iucv_message txmsg;
646 struct netiucv_priv *privptr = NULL; 712 struct netiucv_priv *privptr = NULL;
647 /* Shut up, gcc! skb is always below 2G. */ 713 u32 single_flag = msg->tag;
648 __u32 single_flag = eib->ipmsgtag; 714 u32 txbytes = 0;
649 __u32 txbytes = 0; 715 u32 txpackets = 0;
650 __u32 txpackets = 0; 716 u32 stat_maxcq = 0;
651 __u32 stat_maxcq = 0;
652 struct sk_buff *skb; 717 struct sk_buff *skb;
653 unsigned long saveflags; 718 unsigned long saveflags;
654 ll_header header; 719 struct ll_header header;
720 int rc;
655 721
656 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 722 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
657 723
658 if (conn && conn->netdev && conn->netdev->priv) 724 if (conn && conn->netdev)
659 privptr = (struct netiucv_priv *)conn->netdev->priv; 725 privptr = netdev_priv(conn->netdev);
660 conn->prof.tx_pending--; 726 conn->prof.tx_pending--;
661 if (single_flag) { 727 if (single_flag) {
662 if ((skb = skb_dequeue(&conn->commit_queue))) { 728 if ((skb = skb_dequeue(&conn->commit_queue))) {
@@ -688,56 +754,55 @@ conn_action_txdone(fsm_instance *fi, int event, void *arg)
688 conn->prof.maxmulti = conn->collect_len; 754 conn->prof.maxmulti = conn->collect_len;
689 conn->collect_len = 0; 755 conn->collect_len = 0;
690 spin_unlock_irqrestore(&conn->collect_lock, saveflags); 756 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
691 if (conn->tx_buff->len) { 757 if (conn->tx_buff->len == 0) {
692 int rc; 758 fsm_newstate(fi, CONN_STATE_IDLE);
693 759 return;
694 header.next = 0; 760 }
695 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
696 NETIUCV_HDRLEN);
697 761
698 conn->prof.send_stamp = xtime; 762 header.next = 0;
699 rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0, 763 memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
764 conn->prof.send_stamp = xtime;
765 txmsg.class = 0;
766 txmsg.tag = 0;
767 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
700 conn->tx_buff->data, conn->tx_buff->len); 768 conn->tx_buff->data, conn->tx_buff->len);
701 conn->prof.doios_multi++; 769 conn->prof.doios_multi++;
702 conn->prof.txlen += conn->tx_buff->len; 770 conn->prof.txlen += conn->tx_buff->len;
703 conn->prof.tx_pending++; 771 conn->prof.tx_pending++;
704 if (conn->prof.tx_pending > conn->prof.tx_max_pending) 772 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
705 conn->prof.tx_max_pending = conn->prof.tx_pending; 773 conn->prof.tx_max_pending = conn->prof.tx_pending;
706 if (rc) { 774 if (rc) {
707 conn->prof.tx_pending--; 775 conn->prof.tx_pending--;
708 fsm_newstate(fi, CONN_STATE_IDLE);
709 if (privptr)
710 privptr->stats.tx_errors += txpackets;
711 PRINT_WARN("iucv_send returned %08x\n", rc);
712 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
713 } else {
714 if (privptr) {
715 privptr->stats.tx_packets += txpackets;
716 privptr->stats.tx_bytes += txbytes;
717 }
718 if (stat_maxcq > conn->prof.maxcqueue)
719 conn->prof.maxcqueue = stat_maxcq;
720 }
721 } else
722 fsm_newstate(fi, CONN_STATE_IDLE); 776 fsm_newstate(fi, CONN_STATE_IDLE);
777 if (privptr)
778 privptr->stats.tx_errors += txpackets;
779 PRINT_WARN("iucv_send returned %08x\n", rc);
780 IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
781 } else {
782 if (privptr) {
783 privptr->stats.tx_packets += txpackets;
784 privptr->stats.tx_bytes += txbytes;
785 }
786 if (stat_maxcq > conn->prof.maxcqueue)
787 conn->prof.maxcqueue = stat_maxcq;
788 }
723} 789}
724 790
725static void 791static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
726conn_action_connaccept(fsm_instance *fi, int event, void *arg)
727{ 792{
728 struct iucv_event *ev = (struct iucv_event *)arg; 793 struct iucv_event *ev = arg;
729 struct iucv_connection *conn = ev->conn; 794 struct iucv_connection *conn = ev->conn;
730 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; 795 struct iucv_path *path = ev->data;
731 struct net_device *netdev = conn->netdev; 796 struct net_device *netdev = conn->netdev;
732 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 797 struct netiucv_priv *privptr = netdev_priv(netdev);
733 int rc; 798 int rc;
734 __u16 msglimit;
735 __u8 udata[16];
736 799
737 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 800 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
738 801
739 rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0, 802 conn->path = path;
740 conn->handle, conn, NULL, &msglimit); 803 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
804 path->flags = 0;
805 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
741 if (rc) { 806 if (rc) {
742 PRINT_WARN("%s: IUCV accept failed with error %d\n", 807 PRINT_WARN("%s: IUCV accept failed with error %d\n",
743 netdev->name, rc); 808 netdev->name, rc);
@@ -745,183 +810,126 @@ conn_action_connaccept(fsm_instance *fi, int event, void *arg)
745 return; 810 return;
746 } 811 }
747 fsm_newstate(fi, CONN_STATE_IDLE); 812 fsm_newstate(fi, CONN_STATE_IDLE);
748 conn->pathid = eib->ippathid; 813 netdev->tx_queue_len = conn->path->msglim;
749 netdev->tx_queue_len = msglimit;
750 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); 814 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
751} 815}
752 816
753static void 817static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
754conn_action_connreject(fsm_instance *fi, int event, void *arg)
755{ 818{
756 struct iucv_event *ev = (struct iucv_event *)arg; 819 struct iucv_event *ev = arg;
757 struct iucv_connection *conn = ev->conn; 820 struct iucv_path *path = ev->data;
758 struct net_device *netdev = conn->netdev;
759 iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
760 __u8 udata[16];
761 821
762 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 822 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
763 823 iucv_path_sever(path, NULL);
764 iucv_sever(eib->ippathid, udata);
765 if (eib->ippathid != conn->pathid) {
766 PRINT_INFO("%s: IR Connection Pending; "
767 "pathid %d does not match original pathid %d\n",
768 netdev->name, eib->ippathid, conn->pathid);
769 IUCV_DBF_TEXT_(data, 2,
770 "connreject: IR pathid %d, conn. pathid %d\n",
771 eib->ippathid, conn->pathid);
772 iucv_sever(conn->pathid, udata);
773 }
774} 824}
775 825
776static void 826static void conn_action_connack(fsm_instance *fi, int event, void *arg)
777conn_action_connack(fsm_instance *fi, int event, void *arg)
778{ 827{
779 struct iucv_event *ev = (struct iucv_event *)arg; 828 struct iucv_connection *conn = arg;
780 struct iucv_connection *conn = ev->conn;
781 iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
782 struct net_device *netdev = conn->netdev; 829 struct net_device *netdev = conn->netdev;
783 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 830 struct netiucv_priv *privptr = netdev_priv(netdev);
784 831
785 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 832 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
786
787 fsm_deltimer(&conn->timer); 833 fsm_deltimer(&conn->timer);
788 fsm_newstate(fi, CONN_STATE_IDLE); 834 fsm_newstate(fi, CONN_STATE_IDLE);
789 if (eib->ippathid != conn->pathid) { 835 netdev->tx_queue_len = conn->path->msglim;
790 PRINT_INFO("%s: IR Connection Complete; "
791 "pathid %d does not match original pathid %d\n",
792 netdev->name, eib->ippathid, conn->pathid);
793 IUCV_DBF_TEXT_(data, 2,
794 "connack: IR pathid %d, conn. pathid %d\n",
795 eib->ippathid, conn->pathid);
796 conn->pathid = eib->ippathid;
797 }
798 netdev->tx_queue_len = eib->ipmsglim;
799 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); 836 fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
800} 837}
801 838
802static void 839static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
803conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
804{ 840{
805 struct iucv_connection *conn = (struct iucv_connection *)arg; 841 struct iucv_connection *conn = arg;
806 __u8 udata[16];
807 842
808 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 843 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
809
810 fsm_deltimer(&conn->timer); 844 fsm_deltimer(&conn->timer);
811 iucv_sever(conn->pathid, udata); 845 iucv_path_sever(conn->path, NULL);
812 fsm_newstate(fi, CONN_STATE_STARTWAIT); 846 fsm_newstate(fi, CONN_STATE_STARTWAIT);
813} 847}
814 848
815static void 849static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
816conn_action_connsever(fsm_instance *fi, int event, void *arg)
817{ 850{
818 struct iucv_event *ev = (struct iucv_event *)arg; 851 struct iucv_connection *conn = arg;
819 struct iucv_connection *conn = ev->conn;
820 struct net_device *netdev = conn->netdev; 852 struct net_device *netdev = conn->netdev;
821 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 853 struct netiucv_priv *privptr = netdev_priv(netdev);
822 __u8 udata[16];
823 854
824 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 855 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
825 856
826 fsm_deltimer(&conn->timer); 857 fsm_deltimer(&conn->timer);
827 iucv_sever(conn->pathid, udata); 858 iucv_path_sever(conn->path, NULL);
828 PRINT_INFO("%s: Remote dropped connection\n", netdev->name); 859 PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
829 IUCV_DBF_TEXT(data, 2, 860 IUCV_DBF_TEXT(data, 2,
830 "conn_action_connsever: Remote dropped connection\n"); 861 "conn_action_connsever: Remote dropped connection\n");
831 fsm_newstate(fi, CONN_STATE_STARTWAIT); 862 fsm_newstate(fi, CONN_STATE_STARTWAIT);
832 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); 863 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
833} 864}
834 865
835static void 866static void conn_action_start(fsm_instance *fi, int event, void *arg)
836conn_action_start(fsm_instance *fi, int event, void *arg)
837{ 867{
838 struct iucv_event *ev = (struct iucv_event *)arg; 868 struct iucv_connection *conn = arg;
839 struct iucv_connection *conn = ev->conn;
840 __u16 msglimit;
841 int rc; 869 int rc;
842 870
843 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 871 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
844 872
845 if (!conn->handle) { 873 fsm_newstate(fi, CONN_STATE_STARTWAIT);
846 IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
847 conn->handle =
848 iucv_register_program(iucvMagic, conn->userid,
849 netiucv_mask,
850 &netiucv_ops, conn);
851 fsm_newstate(fi, CONN_STATE_STARTWAIT);
852 if (!conn->handle) {
853 fsm_newstate(fi, CONN_STATE_REGERR);
854 conn->handle = NULL;
855 IUCV_DBF_TEXT(setup, 2,
856 "NULL from iucv_register_program\n");
857 return;
858 }
859
860 PRINT_DEBUG("%s('%s'): registered successfully\n",
861 conn->netdev->name, conn->userid);
862 }
863
864 PRINT_DEBUG("%s('%s'): connecting ...\n", 874 PRINT_DEBUG("%s('%s'): connecting ...\n",
865 conn->netdev->name, conn->userid); 875 conn->netdev->name, conn->userid);
866 876
867 /* We must set the state before calling iucv_connect because the callback 877 /*
868 * handler could be called at any point after the connection request is 878 * We must set the state before calling iucv_connect because the
869 * sent */ 879 * callback handler could be called at any point after the connection
880 * request is sent
881 */
870 882
871 fsm_newstate(fi, CONN_STATE_SETUPWAIT); 883 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
872 rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, 884 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
873 conn->userid, iucv_host, 0, NULL, &msglimit, 885 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
874 conn->handle, conn); 886 NULL, iucvMagic, conn);
875 switch (rc) { 887 switch (rc) {
876 case 0: 888 case 0:
877 conn->netdev->tx_queue_len = msglimit; 889 conn->netdev->tx_queue_len = conn->path->msglim;
878 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, 890 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
879 CONN_EVENT_TIMER, conn); 891 CONN_EVENT_TIMER, conn);
880 return; 892 return;
881 case 11: 893 case 11:
882 PRINT_INFO("%s: User %s is currently not available.\n", 894 PRINT_INFO("%s: User %s is currently not available.\n",
883 conn->netdev->name, 895 conn->netdev->name,
884 netiucv_printname(conn->userid)); 896 netiucv_printname(conn->userid));
885 fsm_newstate(fi, CONN_STATE_STARTWAIT); 897 fsm_newstate(fi, CONN_STATE_STARTWAIT);
886 return; 898 break;
887 case 12: 899 case 12:
888 PRINT_INFO("%s: User %s is currently not ready.\n", 900 PRINT_INFO("%s: User %s is currently not ready.\n",
889 conn->netdev->name, 901 conn->netdev->name,
890 netiucv_printname(conn->userid)); 902 netiucv_printname(conn->userid));
891 fsm_newstate(fi, CONN_STATE_STARTWAIT); 903 fsm_newstate(fi, CONN_STATE_STARTWAIT);
892 return; 904 break;
893 case 13: 905 case 13:
894 PRINT_WARN("%s: Too many IUCV connections.\n", 906 PRINT_WARN("%s: Too many IUCV connections.\n",
895 conn->netdev->name); 907 conn->netdev->name);
896 fsm_newstate(fi, CONN_STATE_CONNERR); 908 fsm_newstate(fi, CONN_STATE_CONNERR);
897 break; 909 break;
898 case 14: 910 case 14:
899 PRINT_WARN( 911 PRINT_WARN("%s: User %s has too many IUCV connections.\n",
900 "%s: User %s has too many IUCV connections.\n", 912 conn->netdev->name,
901 conn->netdev->name, 913 netiucv_printname(conn->userid));
902 netiucv_printname(conn->userid)); 914 fsm_newstate(fi, CONN_STATE_CONNERR);
903 fsm_newstate(fi, CONN_STATE_CONNERR); 915 break;
904 break; 916 case 15:
905 case 15: 917 PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
906 PRINT_WARN( 918 conn->netdev->name);
907 "%s: No IUCV authorization in CP directory.\n", 919 fsm_newstate(fi, CONN_STATE_CONNERR);
908 conn->netdev->name); 920 break;
909 fsm_newstate(fi, CONN_STATE_CONNERR); 921 default:
910 break; 922 PRINT_WARN("%s: iucv_connect returned error %d\n",
911 default: 923 conn->netdev->name, rc);
912 PRINT_WARN("%s: iucv_connect returned error %d\n", 924 fsm_newstate(fi, CONN_STATE_CONNERR);
913 conn->netdev->name, rc); 925 break;
914 fsm_newstate(fi, CONN_STATE_CONNERR);
915 break;
916 } 926 }
917 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); 927 IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
918 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); 928 kfree(conn->path);
919 iucv_unregister_program(conn->handle); 929 conn->path = NULL;
920 conn->handle = NULL;
921} 930}
922 931
923static void 932static void netiucv_purge_skb_queue(struct sk_buff_head *q)
924netiucv_purge_skb_queue(struct sk_buff_head *q)
925{ 933{
926 struct sk_buff *skb; 934 struct sk_buff *skb;
927 935
@@ -931,36 +939,34 @@ netiucv_purge_skb_queue(struct sk_buff_head *q)
931 } 939 }
932} 940}
933 941
934static void 942static void conn_action_stop(fsm_instance *fi, int event, void *arg)
935conn_action_stop(fsm_instance *fi, int event, void *arg)
936{ 943{
937 struct iucv_event *ev = (struct iucv_event *)arg; 944 struct iucv_event *ev = arg;
938 struct iucv_connection *conn = ev->conn; 945 struct iucv_connection *conn = ev->conn;
939 struct net_device *netdev = conn->netdev; 946 struct net_device *netdev = conn->netdev;
940 struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; 947 struct netiucv_priv *privptr = netdev_priv(netdev);
941 948
942 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 949 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
943 950
944 fsm_deltimer(&conn->timer); 951 fsm_deltimer(&conn->timer);
945 fsm_newstate(fi, CONN_STATE_STOPPED); 952 fsm_newstate(fi, CONN_STATE_STOPPED);
946 netiucv_purge_skb_queue(&conn->collect_queue); 953 netiucv_purge_skb_queue(&conn->collect_queue);
947 if (conn->handle) 954 if (conn->path) {
948 IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); 955 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
949 iucv_unregister_program(conn->handle); 956 iucv_path_sever(conn->path, iucvMagic);
950 conn->handle = NULL; 957 kfree(conn->path);
958 conn->path = NULL;
959 }
951 netiucv_purge_skb_queue(&conn->commit_queue); 960 netiucv_purge_skb_queue(&conn->commit_queue);
952 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); 961 fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
953} 962}
954 963
955static void 964static void conn_action_inval(fsm_instance *fi, int event, void *arg)
956conn_action_inval(fsm_instance *fi, int event, void *arg)
957{ 965{
958 struct iucv_event *ev = (struct iucv_event *)arg; 966 struct iucv_connection *conn = arg;
959 struct iucv_connection *conn = ev->conn;
960 struct net_device *netdev = conn->netdev; 967 struct net_device *netdev = conn->netdev;
961 968
962 PRINT_WARN("%s: Cannot connect without username\n", 969 PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
963 netdev->name);
964 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); 970 IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
965} 971}
966 972
@@ -999,29 +1005,27 @@ static const fsm_node conn_fsm[] = {
999static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); 1005static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1000 1006
1001 1007
1002/** 1008/*
1003 * Actions for interface - statemachine. 1009 * Actions for interface - statemachine.
1004 *****************************************************************************/ 1010 */
1005 1011
1006/** 1012/**
1007 * Startup connection by sending CONN_EVENT_START to it. 1013 * dev_action_start
1014 * @fi: An instance of an interface statemachine.
1015 * @event: The event, just happened.
1016 * @arg: Generic pointer, casted from struct net_device * upon call.
1008 * 1017 *
1009 * @param fi An instance of an interface statemachine. 1018 * Startup connection by sending CONN_EVENT_START to it.
1010 * @param event The event, just happened.
1011 * @param arg Generic pointer, casted from struct net_device * upon call.
1012 */ 1019 */
1013static void 1020static void dev_action_start(fsm_instance *fi, int event, void *arg)
1014dev_action_start(fsm_instance *fi, int event, void *arg)
1015{ 1021{
1016 struct net_device *dev = (struct net_device *)arg; 1022 struct net_device *dev = arg;
1017 struct netiucv_priv *privptr = dev->priv; 1023 struct netiucv_priv *privptr = netdev_priv(dev);
1018 struct iucv_event ev;
1019 1024
1020 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1025 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1021 1026
1022 ev.conn = privptr->conn;
1023 fsm_newstate(fi, DEV_STATE_STARTWAIT); 1027 fsm_newstate(fi, DEV_STATE_STARTWAIT);
1024 fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev); 1028 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1025} 1029}
1026 1030
1027/** 1031/**
@@ -1034,8 +1038,8 @@ dev_action_start(fsm_instance *fi, int event, void *arg)
1034static void 1038static void
1035dev_action_stop(fsm_instance *fi, int event, void *arg) 1039dev_action_stop(fsm_instance *fi, int event, void *arg)
1036{ 1040{
1037 struct net_device *dev = (struct net_device *)arg; 1041 struct net_device *dev = arg;
1038 struct netiucv_priv *privptr = dev->priv; 1042 struct netiucv_priv *privptr = netdev_priv(dev);
1039 struct iucv_event ev; 1043 struct iucv_event ev;
1040 1044
1041 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1045 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -1057,8 +1061,8 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
1057static void 1061static void
1058dev_action_connup(fsm_instance *fi, int event, void *arg) 1062dev_action_connup(fsm_instance *fi, int event, void *arg)
1059{ 1063{
1060 struct net_device *dev = (struct net_device *)arg; 1064 struct net_device *dev = arg;
1061 struct netiucv_priv *privptr = dev->priv; 1065 struct netiucv_priv *privptr = netdev_priv(dev);
1062 1066
1063 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1067 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1064 1068
@@ -1131,11 +1135,13 @@ static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1131 * 1135 *
1132 * @return 0 on success, -ERRNO on failure. (Never fails.) 1136 * @return 0 on success, -ERRNO on failure. (Never fails.)
1133 */ 1137 */
1134static int 1138static int netiucv_transmit_skb(struct iucv_connection *conn,
1135netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { 1139 struct sk_buff *skb)
1140{
1141 struct iucv_message msg;
1136 unsigned long saveflags; 1142 unsigned long saveflags;
1137 ll_header header; 1143 struct ll_header header;
1138 int rc = 0; 1144 int rc;
1139 1145
1140 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { 1146 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1141 int l = skb->len + NETIUCV_HDRLEN; 1147 int l = skb->len + NETIUCV_HDRLEN;
@@ -1145,11 +1151,12 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1145 (conn->max_buffsize - NETIUCV_HDRLEN)) { 1151 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1146 rc = -EBUSY; 1152 rc = -EBUSY;
1147 IUCV_DBF_TEXT(data, 2, 1153 IUCV_DBF_TEXT(data, 2,
1148 "EBUSY from netiucv_transmit_skb\n"); 1154 "EBUSY from netiucv_transmit_skb\n");
1149 } else { 1155 } else {
1150 atomic_inc(&skb->users); 1156 atomic_inc(&skb->users);
1151 skb_queue_tail(&conn->collect_queue, skb); 1157 skb_queue_tail(&conn->collect_queue, skb);
1152 conn->collect_len += l; 1158 conn->collect_len += l;
1159 rc = 0;
1153 } 1160 }
1154 spin_unlock_irqrestore(&conn->collect_lock, saveflags); 1161 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1155 } else { 1162 } else {
@@ -1188,9 +1195,10 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1188 fsm_newstate(conn->fsm, CONN_STATE_TX); 1195 fsm_newstate(conn->fsm, CONN_STATE_TX);
1189 conn->prof.send_stamp = xtime; 1196 conn->prof.send_stamp = xtime;
1190 1197
1191 rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, 1198 msg.tag = 1;
1192 0, nskb->data, nskb->len); 1199 msg.class = 0;
1193 /* Shut up, gcc! nskb is always below 2G. */ 1200 rc = iucv_message_send(conn->path, &msg, 0, 0,
1201 nskb->data, nskb->len);
1194 conn->prof.doios_single++; 1202 conn->prof.doios_single++;
1195 conn->prof.txlen += skb->len; 1203 conn->prof.txlen += skb->len;
1196 conn->prof.tx_pending++; 1204 conn->prof.tx_pending++;
@@ -1200,7 +1208,7 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1200 struct netiucv_priv *privptr; 1208 struct netiucv_priv *privptr;
1201 fsm_newstate(conn->fsm, CONN_STATE_IDLE); 1209 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1202 conn->prof.tx_pending--; 1210 conn->prof.tx_pending--;
1203 privptr = (struct netiucv_priv *)conn->netdev->priv; 1211 privptr = netdev_priv(conn->netdev);
1204 if (privptr) 1212 if (privptr)
1205 privptr->stats.tx_errors++; 1213 privptr->stats.tx_errors++;
1206 if (copied) 1214 if (copied)
@@ -1226,9 +1234,9 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1226 return rc; 1234 return rc;
1227} 1235}
1228 1236
1229/** 1237/*
1230 * Interface API for upper network layers 1238 * Interface API for upper network layers
1231 *****************************************************************************/ 1239 */
1232 1240
1233/** 1241/**
1234 * Open an interface. 1242 * Open an interface.
@@ -1238,9 +1246,11 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1238 * 1246 *
1239 * @return 0 on success, -ERRNO on failure. (Never fails.) 1247 * @return 0 on success, -ERRNO on failure. (Never fails.)
1240 */ 1248 */
1241static int 1249static int netiucv_open(struct net_device *dev)
1242netiucv_open(struct net_device *dev) { 1250{
1243 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev); 1251 struct netiucv_priv *priv = netdev_priv(dev);
1252
1253 fsm_event(priv->fsm, DEV_EVENT_START, dev);
1244 return 0; 1254 return 0;
1245} 1255}
1246 1256
@@ -1252,9 +1262,11 @@ netiucv_open(struct net_device *dev) {
1252 * 1262 *
1253 * @return 0 on success, -ERRNO on failure. (Never fails.) 1263 * @return 0 on success, -ERRNO on failure. (Never fails.)
1254 */ 1264 */
1255static int 1265static int netiucv_close(struct net_device *dev)
1256netiucv_close(struct net_device *dev) { 1266{
1257 fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev); 1267 struct netiucv_priv *priv = netdev_priv(dev);
1268
1269 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1258 return 0; 1270 return 0;
1259} 1271}
1260 1272
@@ -1271,8 +1283,8 @@ netiucv_close(struct net_device *dev) {
1271 */ 1283 */
1272static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) 1284static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1273{ 1285{
1274 int rc = 0; 1286 struct netiucv_priv *privptr = netdev_priv(dev);
1275 struct netiucv_priv *privptr = dev->priv; 1287 int rc;
1276 1288
1277 IUCV_DBF_TEXT(trace, 4, __FUNCTION__); 1289 IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1278 /** 1290 /**
@@ -1312,40 +1324,41 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1312 return -EBUSY; 1324 return -EBUSY;
1313 } 1325 }
1314 dev->trans_start = jiffies; 1326 dev->trans_start = jiffies;
1315 if (netiucv_transmit_skb(privptr->conn, skb)) 1327 rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1316 rc = 1;
1317 netiucv_clear_busy(dev); 1328 netiucv_clear_busy(dev);
1318 return rc; 1329 return rc;
1319} 1330}
1320 1331
1321/** 1332/**
1322 * Returns interface statistics of a device. 1333 * netiucv_stats
1334 * @dev: Pointer to interface struct.
1323 * 1335 *
1324 * @param dev Pointer to interface struct. 1336 * Returns interface statistics of a device.
1325 * 1337 *
1326 * @return Pointer to stats struct of this interface. 1338 * Returns pointer to stats struct of this interface.
1327 */ 1339 */
1328static struct net_device_stats * 1340static struct net_device_stats *netiucv_stats (struct net_device * dev)
1329netiucv_stats (struct net_device * dev)
1330{ 1341{
1342 struct netiucv_priv *priv = netdev_priv(dev);
1343
1331 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1344 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1332 return &((struct netiucv_priv *)dev->priv)->stats; 1345 return &priv->stats;
1333} 1346}
1334 1347
1335/** 1348/**
1336 * Sets MTU of an interface. 1349 * netiucv_change_mtu
1350 * @dev: Pointer to interface struct.
1351 * @new_mtu: The new MTU to use for this interface.
1337 * 1352 *
1338 * @param dev Pointer to interface struct. 1353 * Sets MTU of an interface.
1339 * @param new_mtu The new MTU to use for this interface.
1340 * 1354 *
1341 * @return 0 on success, -EINVAL if MTU is out of valid range. 1355 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1342 * (valid range is 576 .. NETIUCV_MTU_MAX). 1356 * (valid range is 576 .. NETIUCV_MTU_MAX).
1343 */ 1357 */
1344static int 1358static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1345netiucv_change_mtu (struct net_device * dev, int new_mtu)
1346{ 1359{
1347 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1360 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1348 if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) { 1361 if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1349 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); 1362 IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1350 return -EINVAL; 1363 return -EINVAL;
1351 } 1364 }
@@ -1353,12 +1366,12 @@ netiucv_change_mtu (struct net_device * dev, int new_mtu)
1353 return 0; 1366 return 0;
1354} 1367}
1355 1368
1356/** 1369/*
1357 * attributes in sysfs 1370 * attributes in sysfs
1358 *****************************************************************************/ 1371 */
1359 1372
1360static ssize_t 1373static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1361user_show (struct device *dev, struct device_attribute *attr, char *buf) 1374 char *buf)
1362{ 1375{
1363 struct netiucv_priv *priv = dev->driver_data; 1376 struct netiucv_priv *priv = dev->driver_data;
1364 1377
@@ -1366,8 +1379,8 @@ user_show (struct device *dev, struct device_attribute *attr, char *buf)
1366 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); 1379 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1367} 1380}
1368 1381
1369static ssize_t 1382static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1370user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1383 const char *buf, size_t count)
1371{ 1384{
1372 struct netiucv_priv *priv = dev->driver_data; 1385 struct netiucv_priv *priv = dev->driver_data;
1373 struct net_device *ndev = priv->conn->netdev; 1386 struct net_device *ndev = priv->conn->netdev;
@@ -1375,80 +1388,70 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
1375 char *tmp; 1388 char *tmp;
1376 char username[9]; 1389 char username[9];
1377 int i; 1390 int i;
1378 struct iucv_connection **clist = &iucv_conns.iucv_connections; 1391 struct iucv_connection *cp;
1379 unsigned long flags;
1380 1392
1381 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1393 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1382 if (count>9) { 1394 if (count > 9) {
1383 PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); 1395 PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
1384 IUCV_DBF_TEXT_(setup, 2, 1396 IUCV_DBF_TEXT_(setup, 2,
1385 "%d is length of username\n", (int)count); 1397 "%d is length of username\n", (int) count);
1386 return -EINVAL; 1398 return -EINVAL;
1387 } 1399 }
1388 1400
1389 tmp = strsep((char **) &buf, "\n"); 1401 tmp = strsep((char **) &buf, "\n");
1390 for (i=0, p=tmp; i<8 && *p; i++, p++) { 1402 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1391 if (isalnum(*p) || (*p == '$')) 1403 if (isalnum(*p) || (*p == '$')) {
1392 username[i]= toupper(*p); 1404 username[i]= toupper(*p);
1393 else if (*p == '\n') { 1405 continue;
1406 }
1407 if (*p == '\n') {
1394 /* trailing lf, grr */ 1408 /* trailing lf, grr */
1395 break; 1409 break;
1396 } else {
1397 PRINT_WARN("netiucv: Invalid char %c in username!\n",
1398 *p);
1399 IUCV_DBF_TEXT_(setup, 2,
1400 "username: invalid character %c\n",
1401 *p);
1402 return -EINVAL;
1403 } 1410 }
1411 PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
1412 IUCV_DBF_TEXT_(setup, 2,
1413 "username: invalid character %c\n", *p);
1414 return -EINVAL;
1404 } 1415 }
1405 while (i<8) 1416 while (i < 8)
1406 username[i++] = ' '; 1417 username[i++] = ' ';
1407 username[8] = '\0'; 1418 username[8] = '\0';
1408 1419
1409 if (memcmp(username, priv->conn->userid, 9)) { 1420 if (memcmp(username, priv->conn->userid, 9) &&
1410 /* username changed */ 1421 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1411 if (ndev->flags & (IFF_UP | IFF_RUNNING)) { 1422 /* username changed while the interface is active. */
1412 PRINT_WARN( 1423 PRINT_WARN("netiucv: device %s active, connected to %s\n",
1413 "netiucv: device %s active, connected to %s\n", 1424 dev->bus_id, priv->conn->userid);
1414 dev->bus_id, priv->conn->userid); 1425 PRINT_WARN("netiucv: user cannot be updated\n");
1415 PRINT_WARN("netiucv: user cannot be updated\n"); 1426 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1416 IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); 1427 return -EBUSY;
1417 return -EBUSY; 1428 }
1429 read_lock_bh(&iucv_connection_rwlock);
1430 list_for_each_entry(cp, &iucv_connection_list, list) {
1431 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1432 read_unlock_bh(&iucv_connection_rwlock);
1433 PRINT_WARN("netiucv: Connection to %s already "
1434 "exists\n", username);
1435 return -EEXIST;
1418 } 1436 }
1419 } 1437 }
1420 read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1438 read_unlock_bh(&iucv_connection_rwlock);
1421 while (*clist) {
1422 if (!strncmp(username, (*clist)->userid, 9) ||
1423 ((*clist)->netdev != ndev))
1424 break;
1425 clist = &((*clist)->next);
1426 }
1427 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
1428 if (*clist) {
1429 PRINT_WARN("netiucv: Connection to %s already exists\n",
1430 username);
1431 return -EEXIST;
1432 }
1433 memcpy(priv->conn->userid, username, 9); 1439 memcpy(priv->conn->userid, username, 9);
1434
1435 return count; 1440 return count;
1436
1437} 1441}
1438 1442
1439static DEVICE_ATTR(user, 0644, user_show, user_write); 1443static DEVICE_ATTR(user, 0644, user_show, user_write);
1440 1444
1441static ssize_t 1445static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1442buffer_show (struct device *dev, struct device_attribute *attr, char *buf) 1446 char *buf)
1443{ 1447{ struct netiucv_priv *priv = dev->driver_data;
1444 struct netiucv_priv *priv = dev->driver_data;
1445 1448
1446 IUCV_DBF_TEXT(trace, 5, __FUNCTION__); 1449 IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1447 return sprintf(buf, "%d\n", priv->conn->max_buffsize); 1450 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1448} 1451}
1449 1452
1450static ssize_t 1453static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1451buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1454 const char *buf, size_t count)
1452{ 1455{
1453 struct netiucv_priv *priv = dev->driver_data; 1456 struct netiucv_priv *priv = dev->driver_data;
1454 struct net_device *ndev = priv->conn->netdev; 1457 struct net_device *ndev = priv->conn->netdev;
@@ -1502,8 +1505,8 @@ buffer_write (struct device *dev, struct device_attribute *attr, const char *buf
1502 1505
1503static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); 1506static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1504 1507
1505static ssize_t 1508static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1506dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) 1509 char *buf)
1507{ 1510{
1508 struct netiucv_priv *priv = dev->driver_data; 1511 struct netiucv_priv *priv = dev->driver_data;
1509 1512
@@ -1513,8 +1516,8 @@ dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
1513 1516
1514static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); 1517static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1515 1518
1516static ssize_t 1519static ssize_t conn_fsm_show (struct device *dev,
1517conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) 1520 struct device_attribute *attr, char *buf)
1518{ 1521{
1519 struct netiucv_priv *priv = dev->driver_data; 1522 struct netiucv_priv *priv = dev->driver_data;
1520 1523
@@ -1524,8 +1527,8 @@ conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
1524 1527
1525static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); 1528static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1526 1529
1527static ssize_t 1530static ssize_t maxmulti_show (struct device *dev,
1528maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) 1531 struct device_attribute *attr, char *buf)
1529{ 1532{
1530 struct netiucv_priv *priv = dev->driver_data; 1533 struct netiucv_priv *priv = dev->driver_data;
1531 1534
@@ -1533,8 +1536,9 @@ maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
1533 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); 1536 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1534} 1537}
1535 1538
1536static ssize_t 1539static ssize_t maxmulti_write (struct device *dev,
1537maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1540 struct device_attribute *attr,
1541 const char *buf, size_t count)
1538{ 1542{
1539 struct netiucv_priv *priv = dev->driver_data; 1543 struct netiucv_priv *priv = dev->driver_data;
1540 1544
@@ -1545,8 +1549,8 @@ maxmulti_write (struct device *dev, struct device_attribute *attr, const char *b
1545 1549
1546static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); 1550static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1547 1551
1548static ssize_t 1552static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1549maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) 1553 char *buf)
1550{ 1554{
1551 struct netiucv_priv *priv = dev->driver_data; 1555 struct netiucv_priv *priv = dev->driver_data;
1552 1556
@@ -1554,8 +1558,8 @@ maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
1554 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); 1558 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1555} 1559}
1556 1560
1557static ssize_t 1561static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1558maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1562 const char *buf, size_t count)
1559{ 1563{
1560 struct netiucv_priv *priv = dev->driver_data; 1564 struct netiucv_priv *priv = dev->driver_data;
1561 1565
@@ -1566,8 +1570,8 @@ maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf,
1566 1570
1567static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); 1571static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1568 1572
1569static ssize_t 1573static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1570sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) 1574 char *buf)
1571{ 1575{
1572 struct netiucv_priv *priv = dev->driver_data; 1576 struct netiucv_priv *priv = dev->driver_data;
1573 1577
@@ -1575,8 +1579,8 @@ sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
1575 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); 1579 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1576} 1580}
1577 1581
1578static ssize_t 1582static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1579sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1583 const char *buf, size_t count)
1580{ 1584{
1581 struct netiucv_priv *priv = dev->driver_data; 1585 struct netiucv_priv *priv = dev->driver_data;
1582 1586
@@ -1587,8 +1591,8 @@ sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf,
1587 1591
1588static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); 1592static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1589 1593
1590static ssize_t 1594static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1591mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) 1595 char *buf)
1592{ 1596{
1593 struct netiucv_priv *priv = dev->driver_data; 1597 struct netiucv_priv *priv = dev->driver_data;
1594 1598
@@ -1596,8 +1600,8 @@ mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
1596 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); 1600 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1597} 1601}
1598 1602
1599static ssize_t 1603static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1600mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1604 const char *buf, size_t count)
1601{ 1605{
1602 struct netiucv_priv *priv = dev->driver_data; 1606 struct netiucv_priv *priv = dev->driver_data;
1603 1607
@@ -1608,8 +1612,8 @@ mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf,
1608 1612
1609static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); 1613static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1610 1614
1611static ssize_t 1615static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1612txlen_show (struct device *dev, struct device_attribute *attr, char *buf) 1616 char *buf)
1613{ 1617{
1614 struct netiucv_priv *priv = dev->driver_data; 1618 struct netiucv_priv *priv = dev->driver_data;
1615 1619
@@ -1617,8 +1621,8 @@ txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
1617 return sprintf(buf, "%ld\n", priv->conn->prof.txlen); 1621 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1618} 1622}
1619 1623
1620static ssize_t 1624static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1621txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1625 const char *buf, size_t count)
1622{ 1626{
1623 struct netiucv_priv *priv = dev->driver_data; 1627 struct netiucv_priv *priv = dev->driver_data;
1624 1628
@@ -1629,8 +1633,8 @@ txlen_write (struct device *dev, struct device_attribute *attr, const char *buf,
1629 1633
1630static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); 1634static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1631 1635
1632static ssize_t 1636static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1633txtime_show (struct device *dev, struct device_attribute *attr, char *buf) 1637 char *buf)
1634{ 1638{
1635 struct netiucv_priv *priv = dev->driver_data; 1639 struct netiucv_priv *priv = dev->driver_data;
1636 1640
@@ -1638,8 +1642,8 @@ txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
1638 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); 1642 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1639} 1643}
1640 1644
1641static ssize_t 1645static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1642txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1646 const char *buf, size_t count)
1643{ 1647{
1644 struct netiucv_priv *priv = dev->driver_data; 1648 struct netiucv_priv *priv = dev->driver_data;
1645 1649
@@ -1650,8 +1654,8 @@ txtime_write (struct device *dev, struct device_attribute *attr, const char *buf
1650 1654
1651static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); 1655static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1652 1656
1653static ssize_t 1657static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1654txpend_show (struct device *dev, struct device_attribute *attr, char *buf) 1658 char *buf)
1655{ 1659{
1656 struct netiucv_priv *priv = dev->driver_data; 1660 struct netiucv_priv *priv = dev->driver_data;
1657 1661
@@ -1659,8 +1663,8 @@ txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
1659 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); 1663 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1660} 1664}
1661 1665
1662static ssize_t 1666static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1663txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1667 const char *buf, size_t count)
1664{ 1668{
1665 struct netiucv_priv *priv = dev->driver_data; 1669 struct netiucv_priv *priv = dev->driver_data;
1666 1670
@@ -1671,8 +1675,8 @@ txpend_write (struct device *dev, struct device_attribute *attr, const char *buf
1671 1675
1672static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); 1676static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1673 1677
1674static ssize_t 1678static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1675txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) 1679 char *buf)
1676{ 1680{
1677 struct netiucv_priv *priv = dev->driver_data; 1681 struct netiucv_priv *priv = dev->driver_data;
1678 1682
@@ -1680,8 +1684,8 @@ txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
1680 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); 1684 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1681} 1685}
1682 1686
1683static ssize_t 1687static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1684txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 1688 const char *buf, size_t count)
1685{ 1689{
1686 struct netiucv_priv *priv = dev->driver_data; 1690 struct netiucv_priv *priv = dev->driver_data;
1687 1691
@@ -1721,8 +1725,7 @@ static struct attribute_group netiucv_stat_attr_group = {
1721 .attrs = netiucv_stat_attrs, 1725 .attrs = netiucv_stat_attrs,
1722}; 1726};
1723 1727
1724static inline int 1728static inline int netiucv_add_files(struct device *dev)
1725netiucv_add_files(struct device *dev)
1726{ 1729{
1727 int ret; 1730 int ret;
1728 1731
@@ -1736,18 +1739,16 @@ netiucv_add_files(struct device *dev)
1736 return ret; 1739 return ret;
1737} 1740}
1738 1741
1739static inline void 1742static inline void netiucv_remove_files(struct device *dev)
1740netiucv_remove_files(struct device *dev)
1741{ 1743{
1742 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1744 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1743 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); 1745 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1744 sysfs_remove_group(&dev->kobj, &netiucv_attr_group); 1746 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1745} 1747}
1746 1748
1747static int 1749static int netiucv_register_device(struct net_device *ndev)
1748netiucv_register_device(struct net_device *ndev)
1749{ 1750{
1750 struct netiucv_priv *priv = ndev->priv; 1751 struct netiucv_priv *priv = netdev_priv(ndev);
1751 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); 1752 struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 int ret; 1753 int ret;
1753 1754
@@ -1786,8 +1787,7 @@ out_unreg:
1786 return ret; 1787 return ret;
1787} 1788}
1788 1789
1789static void 1790static void netiucv_unregister_device(struct device *dev)
1790netiucv_unregister_device(struct device *dev)
1791{ 1791{
1792 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1792 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1793 netiucv_remove_files(dev); 1793 netiucv_remove_files(dev);
@@ -1798,107 +1798,89 @@ netiucv_unregister_device(struct device *dev)
1798 * Allocate and initialize a new connection structure. 1798 * Allocate and initialize a new connection structure.
1799 * Add it to the list of netiucv connections; 1799 * Add it to the list of netiucv connections;
1800 */ 1800 */
1801static struct iucv_connection * 1801static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1802netiucv_new_connection(struct net_device *dev, char *username) 1802 char *username)
1803{ 1803{
1804 unsigned long flags; 1804 struct iucv_connection *conn;
1805 struct iucv_connection **clist = &iucv_conns.iucv_connections;
1806 struct iucv_connection *conn =
1807 kzalloc(sizeof(struct iucv_connection), GFP_KERNEL);
1808
1809 if (conn) {
1810 skb_queue_head_init(&conn->collect_queue);
1811 skb_queue_head_init(&conn->commit_queue);
1812 spin_lock_init(&conn->collect_lock);
1813 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1814 conn->netdev = dev;
1815
1816 conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1817 GFP_KERNEL | GFP_DMA);
1818 if (!conn->rx_buff) {
1819 kfree(conn);
1820 return NULL;
1821 }
1822 conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1823 GFP_KERNEL | GFP_DMA);
1824 if (!conn->tx_buff) {
1825 kfree_skb(conn->rx_buff);
1826 kfree(conn);
1827 return NULL;
1828 }
1829 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1830 conn_event_names, NR_CONN_STATES,
1831 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1832 GFP_KERNEL);
1833 if (!conn->fsm) {
1834 kfree_skb(conn->tx_buff);
1835 kfree_skb(conn->rx_buff);
1836 kfree(conn);
1837 return NULL;
1838 }
1839 fsm_settimer(conn->fsm, &conn->timer);
1840 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1841
1842 if (username) {
1843 memcpy(conn->userid, username, 9);
1844 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1845 }
1846 1805
1847 write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1806 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1848 conn->next = *clist; 1807 if (!conn)
1849 *clist = conn; 1808 goto out;
1850 write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1809 skb_queue_head_init(&conn->collect_queue);
1810 skb_queue_head_init(&conn->commit_queue);
1811 spin_lock_init(&conn->collect_lock);
1812 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1813 conn->netdev = dev;
1814
1815 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1816 if (!conn->rx_buff)
1817 goto out_conn;
1818 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1819 if (!conn->tx_buff)
1820 goto out_rx;
1821 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1822 conn_event_names, NR_CONN_STATES,
1823 NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1824 GFP_KERNEL);
1825 if (!conn->fsm)
1826 goto out_tx;
1827
1828 fsm_settimer(conn->fsm, &conn->timer);
1829 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1830
1831 if (username) {
1832 memcpy(conn->userid, username, 9);
1833 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1851 } 1834 }
1835
1836 write_lock_bh(&iucv_connection_rwlock);
1837 list_add_tail(&conn->list, &iucv_connection_list);
1838 write_unlock_bh(&iucv_connection_rwlock);
1852 return conn; 1839 return conn;
1840
1841out_tx:
1842 kfree_skb(conn->tx_buff);
1843out_rx:
1844 kfree_skb(conn->rx_buff);
1845out_conn:
1846 kfree(conn);
1847out:
1848 return NULL;
1853} 1849}
1854 1850
1855/** 1851/**
1856 * Release a connection structure and remove it from the 1852 * Release a connection structure and remove it from the
1857 * list of netiucv connections. 1853 * list of netiucv connections.
1858 */ 1854 */
1859static void 1855static void netiucv_remove_connection(struct iucv_connection *conn)
1860netiucv_remove_connection(struct iucv_connection *conn)
1861{ 1856{
1862 struct iucv_connection **clist = &iucv_conns.iucv_connections;
1863 unsigned long flags;
1864
1865 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1857 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1866 if (conn == NULL) 1858 write_lock_bh(&iucv_connection_rwlock);
1867 return; 1859 list_del_init(&conn->list);
1868 write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1860 write_unlock_bh(&iucv_connection_rwlock);
1869 while (*clist) { 1861 if (conn->path) {
1870 if (*clist == conn) { 1862 iucv_path_sever(conn->path, iucvMagic);
1871 *clist = conn->next; 1863 kfree(conn->path);
1872 write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1864 conn->path = NULL;
1873 if (conn->handle) {
1874 iucv_unregister_program(conn->handle);
1875 conn->handle = NULL;
1876 }
1877 fsm_deltimer(&conn->timer);
1878 kfree_fsm(conn->fsm);
1879 kfree_skb(conn->rx_buff);
1880 kfree_skb(conn->tx_buff);
1881 return;
1882 }
1883 clist = &((*clist)->next);
1884 } 1865 }
1885 write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1866 fsm_deltimer(&conn->timer);
1867 kfree_fsm(conn->fsm);
1868 kfree_skb(conn->rx_buff);
1869 kfree_skb(conn->tx_buff);
1886} 1870}
1887 1871
1888/** 1872/**
1889 * Release everything of a net device. 1873 * Release everything of a net device.
1890 */ 1874 */
1891static void 1875static void netiucv_free_netdevice(struct net_device *dev)
1892netiucv_free_netdevice(struct net_device *dev)
1893{ 1876{
1894 struct netiucv_priv *privptr; 1877 struct netiucv_priv *privptr = netdev_priv(dev);
1895 1878
1896 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1879 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1897 1880
1898 if (!dev) 1881 if (!dev)
1899 return; 1882 return;
1900 1883
1901 privptr = (struct netiucv_priv *)dev->priv;
1902 if (privptr) { 1884 if (privptr) {
1903 if (privptr->conn) 1885 if (privptr->conn)
1904 netiucv_remove_connection(privptr->conn); 1886 netiucv_remove_connection(privptr->conn);
@@ -1913,11 +1895,8 @@ netiucv_free_netdevice(struct net_device *dev)
1913/** 1895/**
1914 * Initialize a net device. (Called from kernel in alloc_netdev()) 1896 * Initialize a net device. (Called from kernel in alloc_netdev())
1915 */ 1897 */
1916static void 1898static void netiucv_setup_netdevice(struct net_device *dev)
1917netiucv_setup_netdevice(struct net_device *dev)
1918{ 1899{
1919 memset(dev->priv, 0, sizeof(struct netiucv_priv));
1920
1921 dev->mtu = NETIUCV_MTU_DEFAULT; 1900 dev->mtu = NETIUCV_MTU_DEFAULT;
1922 dev->hard_start_xmit = netiucv_tx; 1901 dev->hard_start_xmit = netiucv_tx;
1923 dev->open = netiucv_open; 1902 dev->open = netiucv_open;
@@ -1936,8 +1915,7 @@ netiucv_setup_netdevice(struct net_device *dev)
1936/** 1915/**
1937 * Allocate and initialize everything of a net device. 1916 * Allocate and initialize everything of a net device.
1938 */ 1917 */
1939static struct net_device * 1918static struct net_device *netiucv_init_netdevice(char *username)
1940netiucv_init_netdevice(char *username)
1941{ 1919{
1942 struct netiucv_priv *privptr; 1920 struct netiucv_priv *privptr;
1943 struct net_device *dev; 1921 struct net_device *dev;
@@ -1946,40 +1924,40 @@ netiucv_init_netdevice(char *username)
1946 netiucv_setup_netdevice); 1924 netiucv_setup_netdevice);
1947 if (!dev) 1925 if (!dev)
1948 return NULL; 1926 return NULL;
1949 if (dev_alloc_name(dev, dev->name) < 0) { 1927 if (dev_alloc_name(dev, dev->name) < 0)
1950 free_netdev(dev); 1928 goto out_netdev;
1951 return NULL;
1952 }
1953 1929
1954 privptr = (struct netiucv_priv *)dev->priv; 1930 privptr = netdev_priv(dev);
1955 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 1931 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1956 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, 1932 dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1957 dev_fsm, DEV_FSM_LEN, GFP_KERNEL); 1933 dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1958 if (!privptr->fsm) { 1934 if (!privptr->fsm)
1959 free_netdev(dev); 1935 goto out_netdev;
1960 return NULL; 1936
1961 }
1962 privptr->conn = netiucv_new_connection(dev, username); 1937 privptr->conn = netiucv_new_connection(dev, username);
1963 if (!privptr->conn) { 1938 if (!privptr->conn) {
1964 kfree_fsm(privptr->fsm);
1965 free_netdev(dev);
1966 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); 1939 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1967 return NULL; 1940 goto out_fsm;
1968 } 1941 }
1969 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); 1942 fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1970
1971 return dev; 1943 return dev;
1944
1945out_fsm:
1946 kfree_fsm(privptr->fsm);
1947out_netdev:
1948 free_netdev(dev);
1949 return NULL;
1972} 1950}
1973 1951
1974static ssize_t 1952static ssize_t conn_write(struct device_driver *drv,
1975conn_write(struct device_driver *drv, const char *buf, size_t count) 1953 const char *buf, size_t count)
1976{ 1954{
1977 char *p; 1955 const char *p;
1978 char username[9]; 1956 char username[9];
1979 int i, ret; 1957 int i, rc;
1980 struct net_device *dev; 1958 struct net_device *dev;
1981 struct iucv_connection **clist = &iucv_conns.iucv_connections; 1959 struct netiucv_priv *priv;
1982 unsigned long flags; 1960 struct iucv_connection *cp;
1983 1961
1984 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 1962 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1985 if (count>9) { 1963 if (count>9) {
@@ -1988,83 +1966,82 @@ conn_write(struct device_driver *drv, const char *buf, size_t count)
1988 return -EINVAL; 1966 return -EINVAL;
1989 } 1967 }
1990 1968
1991 for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { 1969 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1992 if (isalnum(*p) || (*p == '$')) 1970 if (isalnum(*p) || *p == '$') {
1993 username[i]= toupper(*p); 1971 username[i] = toupper(*p);
1994 else if (*p == '\n') { 1972 continue;
1973 }
1974 if (*p == '\n')
1995 /* trailing lf, grr */ 1975 /* trailing lf, grr */
1996 break; 1976 break;
1997 } else { 1977 PRINT_WARN("netiucv: Invalid character in username!\n");
1998 PRINT_WARN("netiucv: Invalid character in username!\n"); 1978 IUCV_DBF_TEXT_(setup, 2,
1999 IUCV_DBF_TEXT_(setup, 2, 1979 "conn_write: invalid character %c\n", *p);
2000 "conn_write: invalid character %c\n", *p); 1980 return -EINVAL;
2001 return -EINVAL;
2002 }
2003 } 1981 }
2004 while (i<8) 1982 while (i < 8)
2005 username[i++] = ' '; 1983 username[i++] = ' ';
2006 username[8] = '\0'; 1984 username[8] = '\0';
2007 1985
2008 read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 1986 read_lock_bh(&iucv_connection_rwlock);
2009 while (*clist) { 1987 list_for_each_entry(cp, &iucv_connection_list, list) {
2010 if (!strncmp(username, (*clist)->userid, 9)) 1988 if (!strncmp(username, cp->userid, 9)) {
2011 break; 1989 read_unlock_bh(&iucv_connection_rwlock);
2012 clist = &((*clist)->next); 1990 PRINT_WARN("netiucv: Connection to %s already "
2013 } 1991 "exists\n", username);
2014 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 1992 return -EEXIST;
2015 if (*clist) { 1993 }
2016 PRINT_WARN("netiucv: Connection to %s already exists\n",
2017 username);
2018 return -EEXIST;
2019 } 1994 }
1995 read_unlock_bh(&iucv_connection_rwlock);
1996
2020 dev = netiucv_init_netdevice(username); 1997 dev = netiucv_init_netdevice(username);
2021 if (!dev) { 1998 if (!dev) {
2022 PRINT_WARN( 1999 PRINT_WARN("netiucv: Could not allocate network device "
2023 "netiucv: Could not allocate network device structure " 2000 "structure for user '%s'\n",
2024 "for user '%s'\n", netiucv_printname(username)); 2001 netiucv_printname(username));
2025 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); 2002 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2026 return -ENODEV; 2003 return -ENODEV;
2027 } 2004 }
2028 2005
2029 if ((ret = netiucv_register_device(dev))) { 2006 rc = netiucv_register_device(dev);
2007 if (rc) {
2030 IUCV_DBF_TEXT_(setup, 2, 2008 IUCV_DBF_TEXT_(setup, 2,
2031 "ret %d from netiucv_register_device\n", ret); 2009 "ret %d from netiucv_register_device\n", rc);
2032 goto out_free_ndev; 2010 goto out_free_ndev;
2033 } 2011 }
2034 2012
2035 /* sysfs magic */ 2013 /* sysfs magic */
2036 SET_NETDEV_DEV(dev, 2014 priv = netdev_priv(dev);
2037 (struct device*)((struct netiucv_priv*)dev->priv)->dev); 2015 SET_NETDEV_DEV(dev, priv->dev);
2038 2016
2039 if ((ret = register_netdev(dev))) { 2017 rc = register_netdev(dev);
2040 netiucv_unregister_device((struct device*) 2018 if (rc)
2041 ((struct netiucv_priv*)dev->priv)->dev); 2019 goto out_unreg;
2042 goto out_free_ndev;
2043 }
2044 2020
2045 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); 2021 PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2046 2022
2047 return count; 2023 return count;
2048 2024
2025out_unreg:
2026 netiucv_unregister_device(priv->dev);
2049out_free_ndev: 2027out_free_ndev:
2050 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); 2028 PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2051 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); 2029 IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2052 netiucv_free_netdevice(dev); 2030 netiucv_free_netdevice(dev);
2053 return ret; 2031 return rc;
2054} 2032}
2055 2033
2056DRIVER_ATTR(connection, 0200, NULL, conn_write); 2034static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 2035
2058static ssize_t 2036static ssize_t remove_write (struct device_driver *drv,
2059remove_write (struct device_driver *drv, const char *buf, size_t count) 2037 const char *buf, size_t count)
2060{ 2038{
2061 struct iucv_connection **clist = &iucv_conns.iucv_connections; 2039 struct iucv_connection *cp;
2062 unsigned long flags;
2063 struct net_device *ndev; 2040 struct net_device *ndev;
2064 struct netiucv_priv *priv; 2041 struct netiucv_priv *priv;
2065 struct device *dev; 2042 struct device *dev;
2066 char name[IFNAMSIZ]; 2043 char name[IFNAMSIZ];
2067 char *p; 2044 const char *p;
2068 int i; 2045 int i;
2069 2046
2070 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2047 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -2072,33 +2049,27 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2072 if (count >= IFNAMSIZ) 2049 if (count >= IFNAMSIZ)
2073 count = IFNAMSIZ - 1;; 2050 count = IFNAMSIZ - 1;;
2074 2051
2075 for (i=0, p=(char *)buf; i<count && *p; i++, p++) { 2052 for (i = 0, p = buf; i < count && *p; i++, p++) {
2076 if ((*p == '\n') || (*p == ' ')) { 2053 if (*p == '\n' || *p == ' ')
2077 /* trailing lf, grr */ 2054 /* trailing lf, grr */
2078 break; 2055 break;
2079 } else { 2056 name[i] = *p;
2080 name[i]=*p;
2081 }
2082 } 2057 }
2083 name[i] = '\0'; 2058 name[i] = '\0';
2084 2059
2085 read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); 2060 read_lock_bh(&iucv_connection_rwlock);
2086 while (*clist) { 2061 list_for_each_entry(cp, &iucv_connection_list, list) {
2087 ndev = (*clist)->netdev; 2062 ndev = cp->netdev;
2088 priv = (struct netiucv_priv*)ndev->priv; 2063 priv = netdev_priv(ndev);
2089 dev = priv->dev; 2064 dev = priv->dev;
2090 2065 if (strncmp(name, ndev->name, count))
2091 if (strncmp(name, ndev->name, count)) { 2066 continue;
2092 clist = &((*clist)->next); 2067 read_unlock_bh(&iucv_connection_rwlock);
2093 continue;
2094 }
2095 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
2096 if (ndev->flags & (IFF_UP | IFF_RUNNING)) { 2068 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2097 PRINT_WARN( 2069 PRINT_WARN("netiucv: net device %s active with peer "
2098 "netiucv: net device %s active with peer %s\n", 2070 "%s\n", ndev->name, priv->conn->userid);
2099 ndev->name, priv->conn->userid);
2100 PRINT_WARN("netiucv: %s cannot be removed\n", 2071 PRINT_WARN("netiucv: %s cannot be removed\n",
2101 ndev->name); 2072 ndev->name);
2102 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); 2073 IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2103 return -EBUSY; 2074 return -EBUSY;
2104 } 2075 }
@@ -2106,75 +2077,94 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2106 netiucv_unregister_device(dev); 2077 netiucv_unregister_device(dev);
2107 return count; 2078 return count;
2108 } 2079 }
2109 read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); 2080 read_unlock_bh(&iucv_connection_rwlock);
2110 PRINT_WARN("netiucv: net device %s unknown\n", name); 2081 PRINT_WARN("netiucv: net device %s unknown\n", name);
2111 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); 2082 IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2112 return -EINVAL; 2083 return -EINVAL;
2113} 2084}
2114 2085
2115DRIVER_ATTR(remove, 0200, NULL, remove_write); 2086static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 2087
2117static void 2088static struct attribute * netiucv_drv_attrs[] = {
2118netiucv_banner(void) 2089 &driver_attr_connection.attr,
2090 &driver_attr_remove.attr,
2091 NULL,
2092};
2093
2094static struct attribute_group netiucv_drv_attr_group = {
2095 .attrs = netiucv_drv_attrs,
2096};
2097
2098static void netiucv_banner(void)
2119{ 2099{
2120 PRINT_INFO("NETIUCV driver initialized\n"); 2100 PRINT_INFO("NETIUCV driver initialized\n");
2121} 2101}
2122 2102
2123static void __exit 2103static void __exit netiucv_exit(void)
2124netiucv_exit(void)
2125{ 2104{
2105 struct iucv_connection *cp;
2106 struct net_device *ndev;
2107 struct netiucv_priv *priv;
2108 struct device *dev;
2109
2126 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2110 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2127 while (iucv_conns.iucv_connections) { 2111 while (!list_empty(&iucv_connection_list)) {
2128 struct net_device *ndev = iucv_conns.iucv_connections->netdev; 2112 cp = list_entry(iucv_connection_list.next,
2129 struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; 2113 struct iucv_connection, list);
2130 struct device *dev = priv->dev; 2114 list_del(&cp->list);
2115 ndev = cp->netdev;
2116 priv = netdev_priv(ndev);
2117 dev = priv->dev;
2131 2118
2132 unregister_netdev(ndev); 2119 unregister_netdev(ndev);
2133 netiucv_unregister_device(dev); 2120 netiucv_unregister_device(dev);
2134 } 2121 }
2135 2122
2136 driver_remove_file(&netiucv_driver, &driver_attr_connection); 2123 sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2137 driver_remove_file(&netiucv_driver, &driver_attr_remove);
2138 driver_unregister(&netiucv_driver); 2124 driver_unregister(&netiucv_driver);
2125 iucv_unregister(&netiucv_handler, 1);
2139 iucv_unregister_dbf_views(); 2126 iucv_unregister_dbf_views();
2140 2127
2141 PRINT_INFO("NETIUCV driver unloaded\n"); 2128 PRINT_INFO("NETIUCV driver unloaded\n");
2142 return; 2129 return;
2143} 2130}
2144 2131
2145static int __init 2132static int __init netiucv_init(void)
2146netiucv_init(void)
2147{ 2133{
2148 int ret; 2134 int rc;
2149 2135
2150 ret = iucv_register_dbf_views(); 2136 rc = iucv_register_dbf_views();
2151 if (ret) { 2137 if (rc)
2152 PRINT_WARN("netiucv_init failed, " 2138 goto out;
2153 "iucv_register_dbf_views rc = %d\n", ret); 2139 rc = iucv_register(&netiucv_handler, 1);
2154 return ret; 2140 if (rc)
2155 } 2141 goto out_dbf;
2156 IUCV_DBF_TEXT(trace, 3, __FUNCTION__); 2142 IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2157 ret = driver_register(&netiucv_driver); 2143 rc = driver_register(&netiucv_driver);
2158 if (ret) { 2144 if (rc) {
2159 PRINT_ERR("NETIUCV: failed to register driver.\n"); 2145 PRINT_ERR("NETIUCV: failed to register driver.\n");
2160 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret); 2146 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2161 iucv_unregister_dbf_views(); 2147 goto out_iucv;
2162 return ret;
2163 } 2148 }
2164 2149
2165 /* Add entry for specifying connections. */ 2150 rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
2166 ret = driver_create_file(&netiucv_driver, &driver_attr_connection); 2151 if (rc) {
2167 if (!ret) { 2152 PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
2168 ret = driver_create_file(&netiucv_driver, &driver_attr_remove); 2153 IUCV_DBF_TEXT_(setup, 2,
2169 netiucv_banner(); 2154 "ret %d - netiucv_drv_attr_group\n", rc);
2170 rwlock_init(&iucv_conns.iucv_rwlock); 2155 goto out_driver;
2171 } else {
2172 PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
2173 IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
2174 driver_unregister(&netiucv_driver);
2175 iucv_unregister_dbf_views();
2176 } 2156 }
2177 return ret; 2157 netiucv_banner();
2158 return rc;
2159
2160out_driver:
2161 driver_unregister(&netiucv_driver);
2162out_iucv:
2163 iucv_unregister(&netiucv_handler, 1);
2164out_dbf:
2165 iucv_unregister_dbf_views();
2166out:
2167 return rc;
2178} 2168}
2179 2169
2180module_init(netiucv_init); 2170module_init(netiucv_init);
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 6bb558a9a032..7c735e1fe063 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 49 return buffers_needed;
50} 50}
51 51
52static inline void 52static void
53qeth_eddp_free_context(struct qeth_eddp_context *ctx) 53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 54{
55 int i; 55 int i;
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 91 }
92} 92}
93 93
94static inline int 94static int
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx) 96 struct qeth_eddp_context *ctx)
97{ 97{
@@ -196,7 +196,7 @@ out:
196 return flush_cnt; 196 return flush_cnt;
197} 197}
198 198
199static inline void 199static void
200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201 struct qeth_eddp_data *eddp, int data_len) 201 struct qeth_eddp_data *eddp, int data_len)
202{ 202{
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
256 ctx->offset += eddp->thl; 256 ctx->offset += eddp->thl;
257} 257}
258 258
259static inline void 259static void
260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
261 __wsum *hcsum) 261 __wsum *hcsum)
262{ 262{
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
302 } 302 }
303} 303}
304 304
305static inline void 305static void
306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307 struct qeth_eddp_data *eddp, int data_len, 307 struct qeth_eddp_data *eddp, int data_len,
308 __wsum hcsum) 308 __wsum hcsum)
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350} 350}
351 351
352static inline __wsum 352static __wsum
353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354{ 354{
355 __wsum phcsum; /* pseudo header checksum */ 355 __wsum phcsum; /* pseudo header checksum */
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364} 364}
365 365
366static inline __wsum 366static __wsum
367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368{ 368{
369 __be32 proto; 369 __be32 proto;
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
381 return phcsum; 381 return phcsum;
382} 382}
383 383
384static inline struct qeth_eddp_data * 384static struct qeth_eddp_data *
385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386{ 386{
387 struct qeth_eddp_data *eddp; 387 struct qeth_eddp_data *eddp;
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
399 return eddp; 399 return eddp;
400} 400}
401 401
402static inline void 402static void
403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404 struct qeth_eddp_data *eddp) 404 struct qeth_eddp_data *eddp)
405{ 405{
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
464 } 464 }
465} 465}
466 466
467static inline int 467static int
468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr) 469 struct sk_buff *skb, struct qeth_hdr *qhdr)
470{ 470{
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
505 return 0; 505 return 0;
506} 506}
507 507
508static inline void 508static void
509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
510 int hdr_len) 510 int hdr_len)
511{ 511{
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
529 (skb_shinfo(skb)->gso_segs + 1); 529 (skb_shinfo(skb)->gso_segs + 1);
530} 530}
531 531
532static inline struct qeth_eddp_context * 532static struct qeth_eddp_context *
533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534 int hdr_len) 534 int hdr_len)
535{ 535{
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
581 return ctx; 581 return ctx;
582} 582}
583 583
584static inline struct qeth_eddp_context * 584static struct qeth_eddp_context *
585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586 struct qeth_hdr *qhdr) 586 struct qeth_hdr *qhdr)
587{ 587{
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
625 } 625 }
626 return NULL; 626 return NULL;
627} 627}
628
629
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index d2efa5ff125d..2257e45594b3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
651 return 0; 651 return 0;
652} 652}
653 653
654static inline int 654static int
655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, 655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
656 int same_type) 656 int same_type)
657{ 657{
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
795 return rc; 795 return rc;
796} 796}
797 797
798static inline void 798static void
799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) 799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
800{ 800{
801 struct qeth_ipaddr *addr, *tmp; 801 struct qeth_ipaddr *addr, *tmp;
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *);
882static void qeth_add_multicast_ipv6(struct qeth_card *); 882static void qeth_add_multicast_ipv6(struct qeth_card *);
883#endif 883#endif
884 884
885static inline int 885static int
886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) 886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
887{ 887{
888 unsigned long flags; 888 unsigned long flags;
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
920 wake_up(&card->wait_q); 920 wake_up(&card->wait_q);
921} 921}
922 922
923static inline int 923static int
924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
925{ 925{
926 unsigned long flags; 926 unsigned long flags;
@@ -1764,9 +1764,9 @@ out:
1764 qeth_release_buffer(channel,iob); 1764 qeth_release_buffer(channel,iob);
1765} 1765}
1766 1766
1767static inline void 1767static void
1768qeth_prepare_control_data(struct qeth_card *card, int len, 1768qeth_prepare_control_data(struct qeth_card *card, int len,
1769struct qeth_cmd_buffer *iob) 1769 struct qeth_cmd_buffer *iob)
1770{ 1770{
1771 qeth_setup_ccw(&card->write,iob->data,len); 1771 qeth_setup_ccw(&card->write,iob->data,len);
1772 iob->callback = qeth_release_buffer; 1772 iob->callback = qeth_release_buffer;
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2160 return 0; 2160 return 0;
2161} 2161}
2162 2162
2163static inline struct sk_buff * 2163static struct sk_buff *
2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) 2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2165{ 2165{
2166 struct sk_buff* skb; 2166 struct sk_buff* skb;
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2179 return skb; 2179 return skb;
2180} 2180}
2181 2181
2182static inline struct sk_buff * 2182static struct sk_buff *
2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2184 struct qdio_buffer_element **__element, int *__offset, 2184 struct qdio_buffer_element **__element, int *__offset,
2185 struct qeth_hdr **hdr) 2185 struct qeth_hdr **hdr)
@@ -2264,7 +2264,7 @@ no_mem:
2264 return NULL; 2264 return NULL;
2265} 2265}
2266 2266
2267static inline __be16 2267static __be16
2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev) 2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2269{ 2269{
2270 struct qeth_card *card; 2270 struct qeth_card *card;
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2297 return htons(ETH_P_802_2); 2297 return htons(ETH_P_802_2);
2298} 2298}
2299 2299
2300static inline void 2300static void
2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, 2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2302 struct qeth_hdr *hdr) 2302 struct qeth_hdr *hdr)
2303{ 2303{
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2351 fake_llc->ethertype = ETH_P_IP; 2351 fake_llc->ethertype = ETH_P_IP;
2352} 2352}
2353 2353
2354static inline void 2354static void
2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, 2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2356 struct qeth_hdr *hdr) 2356 struct qeth_hdr *hdr)
2357{ 2357{
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2421} 2421}
2422 2422
2423static inline __u16 2423static __u16
2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2425 struct qeth_hdr *hdr) 2425 struct qeth_hdr *hdr)
2426{ 2426{
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2476 return vlan_id; 2476 return vlan_id;
2477} 2477}
2478 2478
2479static inline void 2479static void
2480qeth_process_inbound_buffer(struct qeth_card *card, 2480qeth_process_inbound_buffer(struct qeth_card *card,
2481 struct qeth_qdio_buffer *buf, int index) 2481 struct qeth_qdio_buffer *buf, int index)
2482{ 2482{
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2528 } 2528 }
2529} 2529}
2530 2530
2531static inline struct qeth_buffer_pool_entry * 2531static struct qeth_buffer_pool_entry *
2532qeth_get_buffer_pool_entry(struct qeth_card *card) 2532qeth_get_buffer_pool_entry(struct qeth_card *card)
2533{ 2533{
2534 struct qeth_buffer_pool_entry *entry; 2534 struct qeth_buffer_pool_entry *entry;
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card)
2543 return NULL; 2543 return NULL;
2544} 2544}
2545 2545
2546static inline void 2546static void
2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2548{ 2548{
2549 struct qeth_buffer_pool_entry *pool_entry; 2549 struct qeth_buffer_pool_entry *pool_entry;
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2570 buf->state = QETH_QDIO_BUF_EMPTY; 2570 buf->state = QETH_QDIO_BUF_EMPTY;
2571} 2571}
2572 2572
2573static inline void 2573static void
2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2575 struct qeth_qdio_out_buffer *buf) 2575 struct qeth_qdio_out_buffer *buf)
2576{ 2576{
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2596} 2596}
2597 2597
2598static inline void 2598static void
2599qeth_queue_input_buffer(struct qeth_card *card, int index) 2599qeth_queue_input_buffer(struct qeth_card *card, int index)
2600{ 2600{
2601 struct qeth_qdio_q *queue = card->qdio.in_q; 2601 struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2699 card->perf_stats.inbound_start_time; 2699 card->perf_stats.inbound_start_time;
2700} 2700}
2701 2701
2702static inline int 2702static int
2703qeth_handle_send_error(struct qeth_card *card, 2703qeth_handle_send_error(struct qeth_card *card,
2704 struct qeth_qdio_out_buffer *buffer, 2704 struct qeth_qdio_out_buffer *buffer,
2705 unsigned int qdio_err, unsigned int siga_err) 2705 unsigned int qdio_err, unsigned int siga_err)
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2821 * Switched to packing state if the number of used buffers on a queue 2821 * Switched to packing state if the number of used buffers on a queue
2822 * reaches a certain limit. 2822 * reaches a certain limit.
2823 */ 2823 */
2824static inline void 2824static void
2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2826{ 2826{
2827 if (!queue->do_pack) { 2827 if (!queue->do_pack) {
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2842 * In that case 1 is returned to inform the caller. If no buffer 2842 * In that case 1 is returned to inform the caller. If no buffer
2843 * has to be flushed, zero is returned. 2843 * has to be flushed, zero is returned.
2844 */ 2844 */
2845static inline int 2845static int
2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2847{ 2847{
2848 struct qeth_qdio_out_buffer *buffer; 2848 struct qeth_qdio_out_buffer *buffer;
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2877 * Checks if there is a packing buffer and prepares it to be flushed. 2877 * Checks if there is a packing buffer and prepares it to be flushed.
2878 * In that case returns 1, otherwise zero. 2878 * In that case returns 1, otherwise zero.
2879 */ 2879 */
2880static inline int 2880static int
2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) 2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2882{ 2882{
2883 struct qeth_qdio_out_buffer *buffer; 2883 struct qeth_qdio_out_buffer *buffer;
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2894 return 0; 2894 return 0;
2895} 2895}
2896 2896
2897static inline void 2897static void
2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2899{ 2899{
2900 int index; 2900 int index;
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3594 } 3594 }
3595} 3595}
3596 3596
3597static inline int 3597static int
3598qeth_send_packet(struct qeth_card *, struct sk_buff *); 3598qeth_send_packet(struct qeth_card *, struct sk_buff *);
3599 3599
3600static int 3600static int
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev)
3759 return 0; 3759 return 0;
3760} 3760}
3761 3761
3762static inline int 3762static int
3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3764{ 3764{
3765 int cast_type = RTN_UNSPEC; 3765 int cast_type = RTN_UNSPEC;
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3806 return cast_type; 3806 return cast_type;
3807} 3807}
3808 3808
3809static inline int 3809static int
3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3811 int ipv, int cast_type) 3811 int ipv, int cast_type)
3812{ 3812{
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb)
3853 } 3853 }
3854} 3854}
3855 3855
3856static inline struct qeth_hdr * 3856static struct qeth_hdr *
3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) 3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3858{ 3858{
3859#ifdef CONFIG_QETH_VLAN 3859#ifdef CONFIG_QETH_VLAN
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); 3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
3883} 3883}
3884 3884
3885static inline void 3885static void
3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) 3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
3887{ 3887{
3888 if (orig_skb != new_skb) 3888 if (orig_skb != new_skb)
3889 dev_kfree_skb_any(new_skb); 3889 dev_kfree_skb_any(new_skb);
3890} 3890}
3891 3891
3892static inline struct sk_buff * 3892static struct sk_buff *
3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3894 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3895{ 3895{
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type)
3940 return ct | QETH_CAST_UNICAST; 3940 return ct | QETH_CAST_UNICAST;
3941} 3941}
3942 3942
3943static inline void 3943static void
3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, 3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3945 struct sk_buff *skb) 3945 struct sk_buff *skb)
3946{ 3946{
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3977 } 3977 }
3978} 3978}
3979 3979
3980static inline void 3980static void
3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3982 struct sk_buff *skb, int cast_type) 3982 struct sk_buff *skb, int cast_type)
3983{ 3983{
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4068 } 4068 }
4069} 4069}
4070 4070
4071static inline void 4071static void
4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4073 int is_tso, int *next_element_to_fill) 4073 int is_tso, int *next_element_to_fill)
4074{ 4074{
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4112 *next_element_to_fill = element; 4112 *next_element_to_fill = element;
4113} 4113}
4114 4114
4115static inline int 4115static int
4116qeth_fill_buffer(struct qeth_qdio_out_q *queue, 4116qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4117 struct qeth_qdio_out_buffer *buf, 4117 struct qeth_qdio_out_buffer *buf,
4118 struct sk_buff *skb) 4118 struct sk_buff *skb)
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4171 return flush_cnt; 4171 return flush_cnt;
4172} 4172}
4173 4173
4174static inline int 4174static int
4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4176 struct sk_buff *skb, struct qeth_hdr *hdr, 4176 struct sk_buff *skb, struct qeth_hdr *hdr,
4177 int elements_needed, 4177 int elements_needed,
@@ -4222,7 +4222,7 @@ out:
4222 return -EBUSY; 4222 return -EBUSY;
4223} 4223}
4224 4224
4225static inline int 4225static int
4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4227 struct sk_buff *skb, struct qeth_hdr *hdr, 4227 struct sk_buff *skb, struct qeth_hdr *hdr,
4228 int elements_needed, struct qeth_eddp_context *ctx) 4228 int elements_needed, struct qeth_eddp_context *ctx)
@@ -4328,7 +4328,7 @@ out:
4328 return rc; 4328 return rc;
4329} 4329}
4330 4330
4331static inline int 4331static int
4332qeth_get_elements_no(struct qeth_card *card, void *hdr, 4332qeth_get_elements_no(struct qeth_card *card, void *hdr,
4333 struct sk_buff *skb, int elems) 4333 struct sk_buff *skb, int elems)
4334{ 4334{
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
4349} 4349}
4350 4350
4351 4351
4352static inline int 4352static int
4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) 4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4354{ 4354{
4355 int ipv = 0; 4355 int ipv = 0;
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4536} 4536}
4537 4537
4538 4538
4539static inline const char * 4539static const char *
4540qeth_arp_get_error_cause(int *rc) 4540qeth_arp_get_error_cause(int *rc)
4541{ 4541{
4542 switch (*rc) { 4542 switch (*rc) {
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
4600static inline void 4600static void
4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4602 struct qeth_arp_query_data *qdata, 4602 struct qeth_arp_query_data *qdata,
4603 int entry_size, int uentry_size) 4603 int entry_size, int uentry_size)
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5214 spin_unlock_irqrestore(&card->vlanlock, flags); 5214 spin_unlock_irqrestore(&card->vlanlock, flags);
5215} 5215}
5216 5216
5217static inline void 5217static void
5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, 5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5219 unsigned short vid) 5219 unsigned short vid)
5220{ 5220{
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card)
5625 spin_unlock_irqrestore(&card->ip_lock, flags); 5625 spin_unlock_irqrestore(&card->ip_lock, flags);
5626} 5626}
5627 5627
5628static inline void 5628static void
5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) 5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5630{ 5630{
5631 struct qeth_ipaddr *ipm; 5631 struct qeth_ipaddr *ipm;
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card)
5711} 5711}
5712 5712
5713#ifdef CONFIG_QETH_IPV6 5713#ifdef CONFIG_QETH_IPV6
5714static inline void 5714static void
5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5716{ 5716{
5717 struct qeth_ipaddr *ipm; 5717 struct qeth_ipaddr *ipm;
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6022 6022
6023 return rc; 6023 return rc;
6024} 6024}
6025static inline void 6025static void
6026qeth_fill_netmask(u8 *netmask, unsigned int len) 6026qeth_fill_netmask(u8 *netmask, unsigned int len)
6027{ 6027{
6028 int i,j; 6028 int i,j;
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6626 return rc; 6626 return rc;
6627} 6627}
6628 6628
6629static inline int 6629static int
6630qeth_setadapter_hstr(struct qeth_card *card) 6630qeth_setadapter_hstr(struct qeth_card *card)
6631{ 6631{
6632 int rc; 6632 int rc;
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card,
6889 return rc; 6889 return rc;
6890} 6890}
6891 6891
6892static inline int 6892static int
6893qeth_start_ipa_arp_processing(struct qeth_card *card) 6893qeth_start_ipa_arp_processing(struct qeth_card *card)
6894{ 6894{
6895 int rc; 6895 int rc;
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7529 wake_up(&card->wait_q); 7529 wake_up(&card->wait_q);
7530} 7530}
7531 7531
7532static inline int 7532static int
7533qeth_threads_running(struct qeth_card *card, unsigned long threads) 7533qeth_threads_running(struct qeth_card *card, unsigned long threads)
7534{ 7534{
7535 unsigned long flags; 7535 unsigned long flags;
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8118 spin_unlock_irqrestore(&card->ip_lock, flags); 8118 spin_unlock_irqrestore(&card->ip_lock, flags);
8119} 8119}
8120 8120
8121static inline void 8121static void
8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8123{ 8123{
8124 int i, j; 8124 int i, j;
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 5836737ac58f..d518419cd0c6 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, 328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store); 329 qeth_dev_bufcnt_store);
330 330
331static inline ssize_t 331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, 332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf) 333 char *buf)
334{ 334{
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu
368 return qeth_dev_route_show(card, &card->options.route4, buf); 368 return qeth_dev_route_show(card, &card->options.route4, buf);
369} 369}
370 370
371static inline ssize_t 371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, 372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count) 373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{ 374{
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \
998 .store = _store, \ 998 .store = _store, \
999}; 999};
1000 1000
1001int 1001static int
1002qeth_check_layer2(struct qeth_card *card) 1002qeth_check_layer2(struct qeth_card *card)
1003{ 1003{
1004 if (card->options.layer2) 1004 if (card->options.layer2)
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1100 qeth_dev_ipato_invert4_show, 1100 qeth_dev_ipato_invert4_show,
1101 qeth_dev_ipato_invert4_store); 1101 qeth_dev_ipato_invert4_store);
1102 1102
1103static inline ssize_t 1103static ssize_t
1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, 1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1105 enum qeth_prot_versions proto) 1105 enum qeth_prot_versions proto)
1106{ 1106{
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char
1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); 1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1147} 1147}
1148 1148
1149static inline int 1149static int
1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, 1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1151 u8 *addr, int *mask_bits) 1151 u8 *addr, int *mask_bits)
1152{ 1152{
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181static inline ssize_t 1181static ssize_t
1182qeth_dev_ipato_add_store(const char *buf, size_t count, 1182qeth_dev_ipato_add_store(const char *buf, size_t count,
1183 struct qeth_card *card, enum qeth_prot_versions proto) 1183 struct qeth_card *card, enum qeth_prot_versions proto)
1184{ 1184{
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1223 qeth_dev_ipato_add4_show, 1223 qeth_dev_ipato_add4_show,
1224 qeth_dev_ipato_add4_store); 1224 qeth_dev_ipato_add4_store);
1225 1225
1226static inline ssize_t 1226static ssize_t
1227qeth_dev_ipato_del_store(const char *buf, size_t count, 1227qeth_dev_ipato_del_store(const char *buf, size_t count,
1228 struct qeth_card *card, enum qeth_prot_versions proto) 1228 struct qeth_card *card, enum qeth_prot_versions proto)
1229{ 1229{
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = {
1361 .attrs = (struct attribute **)qeth_ipato_device_attrs, 1361 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1362}; 1362};
1363 1363
1364static inline ssize_t 1364static ssize_t
1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, 1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1366 enum qeth_prot_versions proto) 1366 enum qeth_prot_versions proto)
1367{ 1367{
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char
1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); 1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1408} 1408}
1409 1409
1410static inline int 1410static int
1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, 1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1412 u8 *addr) 1412 u8 *addr)
1413{ 1413{
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1418 return 0; 1418 return 0;
1419} 1419}
1420 1420
1421static inline ssize_t 1421static ssize_t
1422qeth_dev_vipa_add_store(const char *buf, size_t count, 1422qeth_dev_vipa_add_store(const char *buf, size_t count,
1423 struct qeth_card *card, enum qeth_prot_versions proto) 1423 struct qeth_card *card, enum qeth_prot_versions proto)
1424{ 1424{
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1451 qeth_dev_vipa_add4_show, 1451 qeth_dev_vipa_add4_show,
1452 qeth_dev_vipa_add4_store); 1452 qeth_dev_vipa_add4_store);
1453 1453
1454static inline ssize_t 1454static ssize_t
1455qeth_dev_vipa_del_store(const char *buf, size_t count, 1455qeth_dev_vipa_del_store(const char *buf, size_t count,
1456 struct qeth_card *card, enum qeth_prot_versions proto) 1456 struct qeth_card *card, enum qeth_prot_versions proto)
1457{ 1457{
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = {
1542 .attrs = (struct attribute **)qeth_vipa_device_attrs, 1542 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1543}; 1543};
1544 1544
1545static inline ssize_t 1545static ssize_t
1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, 1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1547 enum qeth_prot_versions proto) 1547 enum qeth_prot_versions proto)
1548{ 1548{
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char
1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); 1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1589} 1589}
1590 1590
1591static inline int 1591static int
1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, 1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1593 u8 *addr) 1593 u8 *addr)
1594{ 1594{
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1599 return 0; 1599 return 0;
1600} 1600}
1601 1601
1602static inline ssize_t 1602static ssize_t
1603qeth_dev_rxip_add_store(const char *buf, size_t count, 1603qeth_dev_rxip_add_store(const char *buf, size_t count,
1604 struct qeth_card *card, enum qeth_prot_versions proto) 1604 struct qeth_card *card, enum qeth_prot_versions proto)
1605{ 1605{
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1632 qeth_dev_rxip_add4_show, 1632 qeth_dev_rxip_add4_show,
1633 qeth_dev_rxip_add4_store); 1633 qeth_dev_rxip_add4_store);
1634 1634
1635static inline ssize_t 1635static ssize_t
1636qeth_dev_rxip_del_store(const char *buf, size_t count, 1636qeth_dev_rxip_del_store(const char *buf, size_t count,
1637 struct qeth_card *card, enum qeth_prot_versions proto) 1637 struct qeth_card *card, enum qeth_prot_versions proto)
1638{ 1638{
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index b8179c27ceb6..3ccca5871fdf 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * IUCV special message driver 2 * IUCV special message driver
3 * 3 *
4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 4 * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -23,10 +23,10 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <net/iucv/iucv.h>
26#include <asm/cpcmd.h> 27#include <asm/cpcmd.h>
27#include <asm/ebcdic.h> 28#include <asm/ebcdic.h>
28 29#include "smsgiucv.h"
29#include "iucv.h"
30 30
31struct smsg_callback { 31struct smsg_callback {
32 struct list_head list; 32 struct list_head list;
@@ -39,38 +39,46 @@ MODULE_AUTHOR
39 ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); 39 ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
40MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); 40MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
41 41
42static iucv_handle_t smsg_handle; 42static struct iucv_path *smsg_path;
43static unsigned short smsg_pathid; 43
44static DEFINE_SPINLOCK(smsg_list_lock); 44static DEFINE_SPINLOCK(smsg_list_lock);
45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); 45static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
46 46
47static void 47static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
48smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data) 48static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
49
50static struct iucv_handler smsg_handler = {
51 .path_pending = smsg_path_pending,
52 .message_pending = smsg_message_pending,
53};
54
55static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
56 u8 ipuser[16])
49{ 57{
58 if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0)
59 return -EINVAL;
60 /* Path pending from *MSG. */
61 return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
50} 62}
51 63
52 64static void smsg_message_pending(struct iucv_path *path,
53static void 65 struct iucv_message *msg)
54smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
55{ 66{
56 struct smsg_callback *cb; 67 struct smsg_callback *cb;
57 unsigned char *msg; 68 unsigned char *buffer;
58 unsigned char sender[9]; 69 unsigned char sender[9];
59 unsigned short len;
60 int rc, i; 70 int rc, i;
61 71
62 len = eib->ln1msg2.ipbfln1f; 72 buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
63 msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA); 73 if (!buffer) {
64 if (!msg) { 74 iucv_message_reject(path, msg);
65 iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
66 return; 75 return;
67 } 76 }
68 rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, 77 rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
69 msg, len, NULL, NULL, NULL);
70 if (rc == 0) { 78 if (rc == 0) {
71 msg[len] = 0; 79 buffer[msg->length] = 0;
72 EBCASC(msg, len); 80 EBCASC(buffer, msg->length);
73 memcpy(sender, msg, 8); 81 memcpy(sender, buffer, 8);
74 sender[8] = 0; 82 sender[8] = 0;
75 /* Remove trailing whitespace from the sender name. */ 83 /* Remove trailing whitespace from the sender name. */
76 for (i = 7; i >= 0; i--) { 84 for (i = 7; i >= 0; i--) {
@@ -80,27 +88,17 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
80 } 88 }
81 spin_lock(&smsg_list_lock); 89 spin_lock(&smsg_list_lock);
82 list_for_each_entry(cb, &smsg_list, list) 90 list_for_each_entry(cb, &smsg_list, list)
83 if (strncmp(msg + 8, cb->prefix, cb->len) == 0) { 91 if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
84 cb->callback(sender, msg + 8); 92 cb->callback(sender, buffer + 8);
85 break; 93 break;
86 } 94 }
87 spin_unlock(&smsg_list_lock); 95 spin_unlock(&smsg_list_lock);
88 } 96 }
89 kfree(msg); 97 kfree(buffer);
90} 98}
91 99
92static iucv_interrupt_ops_t smsg_ops = { 100int smsg_register_callback(char *prefix,
93 .ConnectionComplete = smsg_connection_complete, 101 void (*callback)(char *from, char *str))
94 .MessagePending = smsg_message_pending,
95};
96
97static struct device_driver smsg_driver = {
98 .name = "SMSGIUCV",
99 .bus = &iucv_bus,
100};
101
102int
103smsg_register_callback(char *prefix, void (*callback)(char *from, char *str))
104{ 102{
105 struct smsg_callback *cb; 103 struct smsg_callback *cb;
106 104
@@ -110,18 +108,18 @@ smsg_register_callback(char *prefix, void (*callback)(char *from, char *str))
110 cb->prefix = prefix; 108 cb->prefix = prefix;
111 cb->len = strlen(prefix); 109 cb->len = strlen(prefix);
112 cb->callback = callback; 110 cb->callback = callback;
113 spin_lock(&smsg_list_lock); 111 spin_lock_bh(&smsg_list_lock);
114 list_add_tail(&cb->list, &smsg_list); 112 list_add_tail(&cb->list, &smsg_list);
115 spin_unlock(&smsg_list_lock); 113 spin_unlock_bh(&smsg_list_lock);
116 return 0; 114 return 0;
117} 115}
118 116
119void 117void smsg_unregister_callback(char *prefix,
120smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) 118 void (*callback)(char *from, char *str))
121{ 119{
122 struct smsg_callback *cb, *tmp; 120 struct smsg_callback *cb, *tmp;
123 121
124 spin_lock(&smsg_list_lock); 122 spin_lock_bh(&smsg_list_lock);
125 cb = NULL; 123 cb = NULL;
126 list_for_each_entry(tmp, &smsg_list, list) 124 list_for_each_entry(tmp, &smsg_list, list)
127 if (tmp->callback == callback && 125 if (tmp->callback == callback &&
@@ -130,55 +128,58 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str))
130 list_del(&cb->list); 128 list_del(&cb->list);
131 break; 129 break;
132 } 130 }
133 spin_unlock(&smsg_list_lock); 131 spin_unlock_bh(&smsg_list_lock);
134 kfree(cb); 132 kfree(cb);
135} 133}
136 134
137static void __exit 135static struct device_driver smsg_driver = {
138smsg_exit(void) 136 .name = "SMSGIUCV",
137 .bus = &iucv_bus,
138};
139
140static void __exit smsg_exit(void)
139{ 141{
140 if (smsg_handle > 0) { 142 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
141 cpcmd("SET SMSG OFF", NULL, 0, NULL); 143 iucv_unregister(&smsg_handler, 1);
142 iucv_sever(smsg_pathid, NULL); 144 driver_unregister(&smsg_driver);
143 iucv_unregister_program(smsg_handle);
144 driver_unregister(&smsg_driver);
145 }
146 return;
147} 145}
148 146
149static int __init 147static int __init smsg_init(void)
150smsg_init(void)
151{ 148{
152 static unsigned char pgmmask[24] = {
153 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
154 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
155 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
156 };
157 int rc; 149 int rc;
158 150
159 rc = driver_register(&smsg_driver); 151 rc = driver_register(&smsg_driver);
160 if (rc != 0) { 152 if (rc != 0)
161 printk(KERN_ERR "SMSGIUCV: failed to register driver.\n"); 153 goto out;
162 return rc; 154 rc = iucv_register(&smsg_handler, 1);
163 } 155 if (rc) {
164 smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
165 pgmmask, &smsg_ops, NULL);
166 if (!smsg_handle) {
167 printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); 156 printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
168 driver_unregister(&smsg_driver); 157 rc = -EIO; /* better errno ? */
169 return -EIO; /* better errno ? */ 158 goto out_driver;
159 }
160 smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
161 if (!smsg_path) {
162 rc = -ENOMEM;
163 goto out_register;
170 } 164 }
171 rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0, 165 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
172 NULL, NULL, smsg_handle, NULL); 166 NULL, NULL, NULL);
173 if (rc) { 167 if (rc) {
174 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); 168 printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
175 iucv_unregister_program(smsg_handle); 169 rc = -EIO; /* better errno ? */
176 driver_unregister(&smsg_driver); 170 goto out_free;
177 smsg_handle = NULL;
178 return -EIO;
179 } 171 }
180 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 172 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
181 return 0; 173 return 0;
174
175out_free:
176 iucv_path_free(smsg_path);
177out_register:
178 iucv_unregister(&smsg_handler, 1);
179out_driver:
180 driver_unregister(&smsg_driver);
181out:
182 return rc;
182} 183}
183 184
184module_init(smsg_init); 185module_init(smsg_init);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index e088b5e28711..806bb1a921eb 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,22 +13,18 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/device.h>
16#include <linux/kthread.h> 17#include <linux/kthread.h>
17 18#include <asm/etr.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
20#include "s390mach.h" 24#include "s390mach.h"
21 25
22static struct semaphore m_sem; 26static struct semaphore m_sem;
23 27
24extern int css_process_crw(int, int);
25extern int chsc_process_crw(void);
26extern int chp_process_crw(int, int);
27extern void css_reiterate_subchannels(void);
28
29extern struct workqueue_struct *slow_path_wq;
30extern struct work_struct slow_path_work;
31
32static NORET_TYPE void 28static NORET_TYPE void
33s390_handle_damage(char *msg) 29s390_handle_damage(char *msg)
34{ 30{
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs)
470 s390_handle_damage("unable to revalidate registers."); 466 s390_handle_damage("unable to revalidate registers.");
471 } 467 }
472 468
469 if (mci->cd) {
470 /* Timing facility damage */
471 s390_handle_damage("TOD clock damaged");
472 }
473
474 if (mci->ed && mci->ec) {
475 /* External damage */
476 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
477 etr_sync_check();
478 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
479 etr_switch_to_local();
480 }
481
473 if (mci->se) 482 if (mci->se)
474 /* Storage error uncorrected */ 483 /* Storage error uncorrected */
475 s390_handle_damage("received storage error uncorrected " 484 s390_handle_damage("received storage error uncorrected "
@@ -508,7 +517,7 @@ static int
508machine_check_init(void) 517machine_check_init(void)
509{ 518{
510 init_MUTEX_LOCKED(&m_sem); 519 init_MUTEX_LOCKED(&m_sem);
511 ctl_clear_bit(14, 25); /* disable external damage MCH */ 520 ctl_set_bit(14, 25); /* enable external damage MCH */
512 ctl_set_bit(14, 27); /* enable system recovery MCH */ 521 ctl_set_bit(14, 27); /* enable system recovery MCH */
513#ifdef CONFIG_MACHCHK_WARNING 522#ifdef CONFIG_MACHCHK_WARNING
514 ctl_set_bit(14, 24); /* enable warning MCH */ 523 ctl_set_bit(14, 24); /* enable warning MCH */
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init);
529static int __init 538static int __init
530machine_check_crw_init (void) 539machine_check_crw_init (void)
531{ 540{
532 kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); 541 struct task_struct *task;
542
543 task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
544 if (IS_ERR(task))
545 return PTR_ERR(task);
533 ctl_set_bit(14, 28); /* enable channel report MCH */ 546 ctl_set_bit(14, 28); /* enable channel report MCH */
534 return 0; 547 return 0;
535} 548}
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index 7abb42a09ae2..d3ca4281a494 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw )
102 return ccode; 102 return ccode;
103} 103}
104 104
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107
105#endif /* __s390mach */ 108#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 85093b71f9fa..39a885266790 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void);
47static void zfcp_ns_gid_pn_handler(unsigned long); 47static void zfcp_ns_gid_pn_handler(unsigned long);
48 48
49/* miscellaneous */ 49/* miscellaneous */
50static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); 50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static inline void zfcp_sg_list_free(struct zfcp_sg_list *); 51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, 52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t); 53 void __user *, size_t);
54static inline int zfcp_sg_list_copy_to_user(void __user *, 54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t); 55 struct zfcp_sg_list *, size_t);
56
57static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); 56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
58 57
59#define ZFCP_CFDC_IOC_MAGIC 0xDD 58#define ZFCP_CFDC_IOC_MAGIC 0xDD
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
605 * elements of the scatter-gather list. The maximum size of a single element 604 * elements of the scatter-gather list. The maximum size of a single element
606 * in the scatter-gather list is PAGE_SIZE. 605 * in the scatter-gather list is PAGE_SIZE.
607 */ 606 */
608static inline int 607static int
609zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) 608zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
610{ 609{
611 struct scatterlist *sg; 610 struct scatterlist *sg;
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
652 * Memory for each element in the scatter-gather list is freed. 651 * Memory for each element in the scatter-gather list is freed.
653 * Finally sg_list->sg is freed itself and sg_list->count is reset. 652 * Finally sg_list->sg is freed itself and sg_list->count is reset.
654 */ 653 */
655static inline void 654static void
656zfcp_sg_list_free(struct zfcp_sg_list *sg_list) 655zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
657{ 656{
658 struct scatterlist *sg; 657 struct scatterlist *sg;
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
697 * @size: number of bytes to be copied 696 * @size: number of bytes to be copied
698 * Return: 0 on success, -EFAULT if copy_from_user fails. 697 * Return: 0 on success, -EFAULT if copy_from_user fails.
699 */ 698 */
700static inline int 699static int
701zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, 700zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
702 void __user *user_buffer, 701 void __user *user_buffer,
703 size_t size) 702 size_t size)
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
735 * @size: number of bytes to be copied 734 * @size: number of bytes to be copied
736 * Return: 0 on success, -EFAULT if copy_to_user fails 735 * Return: 0 on success, -EFAULT if copy_to_user fails
737 */ 736 */
738static inline int 737static int
739zfcp_sg_list_copy_to_user(void __user *user_buffer, 738zfcp_sg_list_copy_to_user(void __user *user_buffer,
740 struct zfcp_sg_list *sg_list, 739 struct zfcp_sg_list *sg_list,
741 size_t size) 740 size_t size)
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1799 * @code: reason code 1798 * @code: reason code
1800 * @rc_table: table of reason codes and descriptions 1799 * @rc_table: table of reason codes and descriptions
1801 */ 1800 */
1802static inline const char * 1801static const char *
1803zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) 1802zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1804{ 1803{
1805 const char *descr = "unknown reason code"; 1804 const char *descr = "unknown reason code";
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt)
1847 * @rjt_par: reject parameter acc. to FC-PH/FC-FS 1846 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1848 * @rc_table: table of reason codes and descriptions 1847 * @rc_table: table of reason codes and descriptions
1849 */ 1848 */
1850static inline void 1849static void
1851zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, 1850zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1852 const struct zfcp_rc_entry *rc_table) 1851 const struct zfcp_rc_entry *rc_table)
1853{ 1852{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0aa3b1ac76af..d8191d115c14 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize,
31 31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33 33
34static inline int 34static int
35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) 35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
36{ 36{
37 unsigned long long sec; 37 unsigned long long sec;
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label,
106 return len; 106 return len;
107} 107}
108 108
109static inline int 109static int
110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, 110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
111 debug_entry_t * entry, char *out_buf) 111 debug_entry_t * entry, char *out_buf)
112{ 112{
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
130 return len; 130 return len;
131} 131}
132 132
133inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) 133void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
134{ 134{
135 struct zfcp_adapter *adapter = fsf_req->adapter; 135 struct zfcp_adapter *adapter = fsf_req->adapter;
136 struct fsf_qtcb *qtcb = fsf_req->qtcb; 136 struct fsf_qtcb *qtcb = fsf_req->qtcb;
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
242} 242}
243 243
244inline void 244void
245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, 245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
246 struct fsf_status_read_buffer *status_buffer) 246 struct fsf_status_read_buffer *status_buffer)
247{ 247{
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
296} 296}
297 297
298inline void 298void
299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
300 unsigned int qdio_error, unsigned int siga_error, 300 unsigned int qdio_error, unsigned int siga_error,
301 int sbal_index, int sbal_count) 301 int sbal_index, int sbal_count)
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
317} 317}
318 318
319static inline int 319static int
320zfcp_hba_dbf_view_response(char *out_buf, 320zfcp_hba_dbf_view_response(char *out_buf,
321 struct zfcp_hba_dbf_record_response *rec) 321 struct zfcp_hba_dbf_record_response *rec)
322{ 322{
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf,
403 return len; 403 return len;
404} 404}
405 405
406static inline int 406static int
407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) 407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
408{ 408{
409 int len = 0; 409 int len = 0;
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
424 return len; 424 return len;
425} 425}
426 426
427static inline int 427static int
428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) 428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
429{ 429{
430 int len = 0; 430 int len = 0;
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
469 return len; 469 return len;
470} 470}
471 471
472struct debug_view zfcp_hba_dbf_view = { 472static struct debug_view zfcp_hba_dbf_view = {
473 "structured", 473 "structured",
474 NULL, 474 NULL,
475 &zfcp_dbf_view_header, 475 &zfcp_dbf_view_header,
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481inline void 481void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
520} 520}
521 521
522inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 522void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
523{ 523{
524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
525 struct zfcp_port *port = ct->port; 525 struct zfcp_port *port = ct->port;
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
531 ct->req->length); 531 ct->req->length);
532} 532}
533 533
534inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 534void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
535{ 535{
536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
537 struct zfcp_port *port = ct->port; 537 struct zfcp_port *port = ct->port;
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
543 ct->resp->length); 543 ct->resp->length);
544} 544}
545 545
546static inline void 546static void
547_zfcp_san_dbf_event_common_els(const char *tag, int level, 547_zfcp_san_dbf_event_common_els(const char *tag, int level,
548 struct zfcp_fsf_req *fsf_req, u32 s_id, 548 struct zfcp_fsf_req *fsf_req, u32 s_id,
549 u32 d_id, u8 ls_code, void *buffer, int buflen) 549 u32 d_id, u8 ls_code, void *buffer, int buflen)
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level,
585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
586} 586}
587 587
588inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) 588void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
589{ 589{
590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
591 591
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
597 els->req->length); 597 els->req->length);
598} 598}
599 599
600inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) 600void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
601{ 601{
602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
603 603
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
608 els->resp->length); 608 els->resp->length);
609} 609}
610 610
611inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) 611void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
612{ 612{
613 struct zfcp_adapter *adapter = fsf_req->adapter; 613 struct zfcp_adapter *adapter = fsf_req->adapter;
614 struct fsf_status_read_buffer *status_buffer = 614 struct fsf_status_read_buffer *status_buffer =
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
693 return len; 693 return len;
694} 694}
695 695
696struct debug_view zfcp_san_dbf_view = { 696static struct debug_view zfcp_san_dbf_view = {
697 "structured", 697 "structured",
698 NULL, 698 NULL,
699 &zfcp_dbf_view_header, 699 &zfcp_dbf_view_header,
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = {
702 NULL 702 NULL
703}; 703};
704 704
705static inline void 705static void
706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); 786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
787} 787}
788 788
789inline void 789void
790zfcp_scsi_dbf_event_result(const char *tag, int level, 790zfcp_scsi_dbf_event_result(const char *tag, int level,
791 struct zfcp_adapter *adapter, 791 struct zfcp_adapter *adapter,
792 struct scsi_cmnd *scsi_cmnd, 792 struct scsi_cmnd *scsi_cmnd,
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
796 adapter, scsi_cmnd, fsf_req, 0); 796 adapter, scsi_cmnd, fsf_req, 0);
797} 797}
798 798
799inline void 799void
800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
801 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
802 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
806 adapter, scsi_cmnd, new_fsf_req, old_req_id); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
807} 807}
808 808
809inline void 809void
810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
811 struct scsi_cmnd *scsi_cmnd) 811 struct scsi_cmnd *scsi_cmnd)
812{ 812{
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
884 return len; 884 return len;
885} 885}
886 886
887struct debug_view zfcp_scsi_dbf_view = { 887static struct debug_view zfcp_scsi_dbf_view = {
888 "structured", 888 "structured",
889 NULL, 889 NULL,
890 &zfcp_dbf_view_header, 890 &zfcp_dbf_view_header,
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 755b754dec60..421da1e7c0ea 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
200 * returns: 0 - initiated action successfully 200 * returns: 0 - initiated action successfully
201 * <0 - failed to initiate action 201 * <0 - failed to initiate action
202 */ 202 */
203int 203static int
204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) 204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
205{ 205{
206 int retval; 206 int retval;
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
295 * zfcp_erp_adisc - send ADISC ELS command 295 * zfcp_erp_adisc - send ADISC ELS command
296 * @port: port structure 296 * @port: port structure
297 */ 297 */
298int 298static int
299zfcp_erp_adisc(struct zfcp_port *port) 299zfcp_erp_adisc(struct zfcp_port *port)
300{ 300{
301 struct zfcp_adapter *adapter = port->adapter; 301 struct zfcp_adapter *adapter = port->adapter;
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
380 * 380 *
381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
382 */ 382 */
383void 383static void
384zfcp_erp_adisc_handler(unsigned long data) 384zfcp_erp_adisc_handler(unsigned long data)
385{ 385{
386 struct zfcp_send_els *send_els; 386 struct zfcp_send_els *send_els;
@@ -3135,7 +3135,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3135 break; 3135 break;
3136 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 3136 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3137 if (result != ZFCP_ERP_SUCCEEDED) { 3137 if (result != ZFCP_ERP_SUCCEEDED) {
3138 struct zfcp_port *port;
3139 list_for_each_entry(port, &adapter->port_list_head, list) 3138 list_for_each_entry(port, &adapter->port_list_head, list)
3140 if (port->rport && 3139 if (port->rport &&
3141 !atomic_test_mask(ZFCP_STATUS_PORT_WKA, 3140 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 7ec8e352b1fe..01386ac688a2 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); 120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); 121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
122extern void set_host_byte(u32 *, char); 122extern void set_host_byte(int *, char);
123extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(int *, char);
124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
126 126
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index eabf86bb13f5..ef16f7ca4bb1 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4560,7 +4560,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4560/* 4560/*
4561 * set qtcb pointer in fsf_req and initialize QTCB 4561 * set qtcb pointer in fsf_req and initialize QTCB
4562 */ 4562 */
4563static inline void 4563static void
4564zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4564zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4565{ 4565{
4566 if (likely(fsf_req->qtcb != NULL)) { 4566 if (likely(fsf_req->qtcb != NULL)) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbd9f48e863e..1e12a78e8edd 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -21,22 +21,22 @@
21 21
22#include "zfcp_ext.h" 22#include "zfcp_ext.h"
23 23
24static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
26 (struct zfcp_qdio_queue *, int, int); 26 (struct zfcp_qdio_queue *, int, int);
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
28 (struct zfcp_fsf_req *, int, int); 28 (struct zfcp_fsf_req *, int, int);
29static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain 29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long); 30 (struct zfcp_fsf_req *, unsigned long);
31static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next 31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long); 32 (struct zfcp_fsf_req *, unsigned long);
33static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); 33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); 34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static inline void zfcp_qdio_sbale_fill 35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int); 36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static inline int zfcp_qdio_sbals_from_segment 37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); 38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39static inline int zfcp_qdio_sbals_from_buffer 39static int zfcp_qdio_sbals_from_buffer
40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); 40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
41 41
42static qdio_handler_t zfcp_qdio_request_handler; 42static qdio_handler_t zfcp_qdio_request_handler;
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
201 * returns: error flag 201 * returns: error flag
202 * 202 *
203 */ 203 */
204static inline int 204static int
205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, 205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
206 unsigned int qdio_error, unsigned int siga_error, 206 unsigned int qdio_error, unsigned int siga_error,
207 int first_element, int elements_processed) 207 int first_element, int elements_processed)
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for 462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
463 * a struct zfcp_fsf_req 463 * a struct zfcp_fsf_req
464 */ 464 */
465inline volatile struct qdio_buffer_element * 465volatile struct qdio_buffer_element *
466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
467{ 467{
468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for 484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
485 * a struct zfcp_fsf_req 485 * a struct zfcp_fsf_req
486 */ 486 */
487inline volatile struct qdio_buffer_element * 487volatile struct qdio_buffer_element *
488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
489{ 489{
490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
499 * 499 *
500 * Note: We can assume at least one free SBAL in the request_queue when called. 500 * Note: We can assume at least one free SBAL in the request_queue when called.
501 */ 501 */
502static inline void 502static void
503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
504{ 504{
505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
517 * 517 *
518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. 518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
519 */ 519 */
520static inline volatile struct qdio_buffer_element * 520static volatile struct qdio_buffer_element *
521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
522{ 522{
523 volatile struct qdio_buffer_element *sbale; 523 volatile struct qdio_buffer_element *sbale;
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
554/** 554/**
555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed 555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
556 */ 556 */
557static inline volatile struct qdio_buffer_element * 557static volatile struct qdio_buffer_element *
558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
559{ 559{
560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue 569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
570 * with zero from 570 * with zero from
571 */ 571 */
572static inline int 572static int
573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) 573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
574{ 574{
575 struct qdio_buffer **buf = queue->buffer; 575 struct qdio_buffer **buf = queue->buffer;
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
604 * on request_queue 604 * on request_queue
605 */ 605 */
606static inline void 606static void
607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
608 void *addr, int length) 608 void *addr, int length)
609{ 609{
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
624 * Alignment and length of the segment determine how many SBALEs are needed 624 * Alignment and length of the segment determine how many SBALEs are needed
625 * for the memory segment. 625 * for the memory segment.
626 */ 626 */
627static inline int 627static int
628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
629 void *start_addr, unsigned long total_length) 629 void *start_addr, unsigned long total_length)
630{ 630{
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
659 * @sg_count: number of elements in scatter-gather list 659 * @sg_count: number of elements in scatter-gather list
660 * @max_sbals: upper bound for number of SBALs to be used 660 * @max_sbals: upper bound for number of SBALs to be used
661 */ 661 */
662inline int 662int
663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
664 struct scatterlist *sg, int sg_count, int max_sbals) 664 struct scatterlist *sg, int sg_count, int max_sbals)
665{ 665{
@@ -707,7 +707,7 @@ out:
707 * @length: length of buffer 707 * @length: length of buffer
708 * @max_sbals: upper bound for number of SBALs to be used 708 * @max_sbals: upper bound for number of SBALs to be used
709 */ 709 */
710static inline int 710static int
711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
712 void *buffer, unsigned long length, int max_sbals) 712 void *buffer, unsigned long length, int max_sbals)
713{ 713{
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used 728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
729 * to fill SBALs 729 * to fill SBALs
730 */ 730 */
731inline int 731int
732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, 732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) 733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
734{ 734{
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 452d96f92a14..99db02062c3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
90 return fcp_sns_info_ptr; 90 return fcp_sns_info_ptr;
91} 91}
92 92
93fcp_dl_t * 93static fcp_dl_t *
94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) 94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
95{ 95{
96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
124 * regarding the specified byte 124 * regarding the specified byte
125 */ 125 */
126static inline void 126static inline void
127set_byte(u32 * result, char status, char pos) 127set_byte(int *result, char status, char pos)
128{ 128{
129 *result |= status << (pos * 8); 129 *result |= status << (pos * 8);
130} 130}
131 131
132void 132void
133set_host_byte(u32 * result, char status) 133set_host_byte(int *result, char status)
134{ 134{
135 set_byte(result, status, 2); 135 set_byte(result, status, 2);
136} 136}
137 137
138void 138void
139set_driver_byte(u32 * result, char status) 139set_driver_byte(int *result, char status)
140{ 140{
141 set_byte(result, status, 3); 141 set_byte(result, status, 3);
142} 142}
@@ -280,7 +280,7 @@ out:
280 return retval; 280 return retval;
281} 281}
282 282
283void 283static void
284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) 284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
285{ 285{
286 struct completion *wait = (struct completion *) scpnt->SCp.ptr; 286 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
324 * returns: 0 - success, SCSI command enqueued 324 * returns: 0 - success, SCSI command enqueued
325 * !0 - failure 325 * !0 - failure
326 */ 326 */
327int 327static int
328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
329 void (*done) (struct scsi_cmnd *)) 329 void (*done) (struct scsi_cmnd *))
330{ 330{
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
380 * will handle late commands. (Usually, the normal completion of late 380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.) 381 * commands is ignored with respect to the running abort operation.)
382 */ 382 */
383int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 384{
385 struct Scsi_Host *scsi_host; 385 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 386 struct zfcp_adapter *adapter;
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
445 return retval; 445 return retval;
446} 446}
447 447
448int 448static int
449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
450{ 450{
451 int retval; 451 int retval;
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
541/** 541/**
542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
543 */ 543 */
544int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 544static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
545{ 545{
546 struct zfcp_unit *unit; 546 struct zfcp_unit *unit;
547 struct zfcp_adapter *adapter; 547 struct zfcp_adapter *adapter;
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 1e788e815ce7..090743d2f914 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -9,8 +9,14 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13 14
15/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h>
18#include <math-emu/single.h>
19
14struct sysinfo_1_1_1 { 20struct sysinfo_1_1_1 {
15 char reserved_0[32]; 21 char reserved_0[32];
16 char manufacturer[16]; 22 char manufacturer[16];
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
198 * if the higher order 8 bits are not zero. Printing 204 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no, 205 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer. 206 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange 207 * The user-space needs to know about the strange
202 * encoding of the alternate cpu capability. 208 * encoding of the alternate cpu capability.
203 */ 209 */
204 len += sprintf(page + len, "Capability: %u %u\n", 210 len += sprintf(page + len, "Capability: %u %u\n",
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void)
351 357
352__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
353 359
360/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */
363void s390_adjust_jiffies(void)
364{
365 struct sysinfo_1_2_2 *info;
366 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
367 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
368 FP_DECL_EX;
369 unsigned int capability;
370
371 info = (void *) get_zeroed_page(GFP_KERNEL);
372 if (!info)
373 return;
374
375 if (stsi(info, 1, 2, 2) != -ENOSYS) {
376 /*
377 * Major sigh. The cpu capability encoding is "special".
378 * If the first 9 bits of info->capability are 0 then it
379 * is a 32 bit unsigned integer in the range 0 .. 2^23.
380 * If the first 9 bits are != 0 then it is a 32 bit float.
381 * In addition a lower value indicates a proportionally
382 * higher cpu capacity. Bogomips are the other way round.
383 * To get to a halfway suitable number we divide 1e7
384 * by the cpu capability number. Yes, that means a floating
385 * point division .. math-emu here we come :-)
386 */
387 FP_UNPACK_SP(SA, &fmil);
388 if ((info->capability >> 23) == 0)
389 FP_FROM_INT_S(SB, info->capability, 32, int);
390 else
391 FP_UNPACK_SP(SB, &info->capability);
392 FP_DIV_S(SR, SA, SB);
393 FP_TO_INT_S(capability, SR, 32, 0);
394 } else
395 /*
396 * Really old machine without stsi block for basic
397 * cpu information. Report 42.0 bogomips.
398 */
399 capability = 42;
400 loops_per_jiffy = capability * (500000/HZ);
401 free_page((unsigned long) info);
402}
403
404/*
405 * calibrate the delay loop
406 */
407void __init calibrate_delay(void)
408{
409 s390_adjust_jiffies();
410 /* Print the good old Bogomips line .. */
411 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
412 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
413 (loops_per_jiffy/(5000/HZ)) % 100);
414}
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 3c912ee29da0..8b5334c56f0a 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -528,12 +528,16 @@ void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
528/* Allocate structure and insert basic data such as SCSI chip frequency 528/* Allocate structure and insert basic data such as SCSI chip frequency
529 * data and a pointer to the device 529 * data and a pointer to the device
530 */ 530 */
531struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev) 531struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev,
532 int hotplug)
532{ 533{
533 struct NCR_ESP *esp, *elink; 534 struct NCR_ESP *esp, *elink;
534 struct Scsi_Host *esp_host; 535 struct Scsi_Host *esp_host;
535 536
536 esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP)); 537 if (hotplug)
538 esp_host = scsi_host_alloc(tpnt, sizeof(struct NCR_ESP));
539 else
540 esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP));
537 if(!esp_host) 541 if(!esp_host)
538 panic("Cannot register ESP SCSI host"); 542 panic("Cannot register ESP SCSI host");
539 esp = (struct NCR_ESP *) esp_host->hostdata; 543 esp = (struct NCR_ESP *) esp_host->hostdata;
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
index 521e3f842cfd..d85cb73a9f69 100644
--- a/drivers/scsi/NCR53C9x.h
+++ b/drivers/scsi/NCR53C9x.h
@@ -652,7 +652,7 @@ extern int nesps, esps_in_use, esps_running;
652 652
653/* External functions */ 653/* External functions */
654extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs); 654extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs);
655extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *); 655extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int);
656extern void esp_deallocate(struct NCR_ESP *); 656extern void esp_deallocate(struct NCR_ESP *);
657extern void esp_release(void); 657extern void esp_release(void);
658extern void esp_initialize(struct NCR_ESP *); 658extern void esp_initialize(struct NCR_ESP *);
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
index 329a8f297b31..23f7c24ab809 100644
--- a/drivers/scsi/blz1230.c
+++ b/drivers/scsi/blz1230.c
@@ -121,7 +121,8 @@ int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
121 */ 121 */
122 address = ZTWO_VADDR(board); 122 address = ZTWO_VADDR(board);
123 eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR); 123 eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
124 esp = esp_allocate(tpnt, (void *)board+REAL_BLZ1230_ESP_ADDR); 124 esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR,
125 0);
125 126
126 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); 127 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
127 udelay(5); 128 udelay(5);
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
index b6c137b97350..b6203ec00961 100644
--- a/drivers/scsi/blz2060.c
+++ b/drivers/scsi/blz2060.c
@@ -100,7 +100,7 @@ int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
100 unsigned long board = z->resource.start; 100 unsigned long board = z->resource.start;
101 if (request_mem_region(board+BLZ2060_ESP_ADDR, 101 if (request_mem_region(board+BLZ2060_ESP_ADDR,
102 sizeof(struct ESP_regs), "NCR53C9x")) { 102 sizeof(struct ESP_regs), "NCR53C9x")) {
103 esp = esp_allocate(tpnt, (void *)board+BLZ2060_ESP_ADDR); 103 esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0);
104 104
105 /* Do command transfer with programmed I/O */ 105 /* Do command transfer with programmed I/O */
106 esp->do_pio_cmds = 1; 106 esp->do_pio_cmds = 1;
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
index 7c7cfb54e897..c6b98a42e89d 100644
--- a/drivers/scsi/cyberstorm.c
+++ b/drivers/scsi/cyberstorm.c
@@ -126,7 +126,7 @@ int __init cyber_esp_detect(struct scsi_host_template *tpnt)
126 sizeof(struct ESP_regs)); 126 sizeof(struct ESP_regs));
127 return 0; 127 return 0;
128 } 128 }
129 esp = esp_allocate(tpnt, (void *)board+CYBER_ESP_ADDR); 129 esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0);
130 130
131 /* Do command transfer with programmed I/O */ 131 /* Do command transfer with programmed I/O */
132 esp->do_pio_cmds = 1; 132 esp->do_pio_cmds = 1;
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
index d88cb9cf091e..e336e853e66f 100644
--- a/drivers/scsi/cyberstormII.c
+++ b/drivers/scsi/cyberstormII.c
@@ -98,7 +98,7 @@ int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
98 address = (unsigned long)ZTWO_VADDR(board); 98 address = (unsigned long)ZTWO_VADDR(board);
99 eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR); 99 eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR);
100 100
101 esp = esp_allocate(tpnt, (void *)board+CYBERII_ESP_ADDR); 101 esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0);
102 102
103 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); 103 esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
104 udelay(5); 104 udelay(5);
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
index c29ccbc44693..d42ad663ffee 100644
--- a/drivers/scsi/dec_esp.c
+++ b/drivers/scsi/dec_esp.c
@@ -18,7 +18,7 @@
18 * 20001005 - Initialization fixes for 2.4.0-test9 18 * 20001005 - Initialization fixes for 2.4.0-test9
19 * Florian Lohoff <flo@rfc822.org> 19 * Florian Lohoff <flo@rfc822.org>
20 * 20 *
21 * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki 21 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
22 */ 22 */
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -30,6 +30,7 @@
30#include <linux/proc_fs.h> 30#include <linux/proc_fs.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/stat.h> 32#include <linux/stat.h>
33#include <linux/tc.h>
33 34
34#include <asm/dma.h> 35#include <asm/dma.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
@@ -42,7 +43,6 @@
42#include <asm/dec/ioasic_ints.h> 43#include <asm/dec/ioasic_ints.h>
43#include <asm/dec/machtype.h> 44#include <asm/dec/machtype.h>
44#include <asm/dec/system.h> 45#include <asm/dec/system.h>
45#include <asm/dec/tc.h>
46 46
47#define DEC_SCSI_SREG 0 47#define DEC_SCSI_SREG 0
48#define DEC_SCSI_DMAREG 0x40000 48#define DEC_SCSI_DMAREG 0x40000
@@ -98,51 +98,33 @@ static irqreturn_t scsi_dma_merr_int(int, void *);
98static irqreturn_t scsi_dma_err_int(int, void *); 98static irqreturn_t scsi_dma_err_int(int, void *);
99static irqreturn_t scsi_dma_int(int, void *); 99static irqreturn_t scsi_dma_int(int, void *);
100 100
101static int dec_esp_detect(struct scsi_host_template * tpnt); 101static struct scsi_host_template dec_esp_template = {
102 102 .module = THIS_MODULE,
103static int dec_esp_release(struct Scsi_Host *shost)
104{
105 if (shost->irq)
106 free_irq(shost->irq, NULL);
107 if (shost->io_port && shost->n_io_port)
108 release_region(shost->io_port, shost->n_io_port);
109 scsi_unregister(shost);
110 return 0;
111}
112
113static struct scsi_host_template driver_template = {
114 .proc_name = "dec_esp",
115 .proc_info = esp_proc_info,
116 .name = "NCR53C94", 103 .name = "NCR53C94",
117 .detect = dec_esp_detect,
118 .slave_alloc = esp_slave_alloc,
119 .slave_destroy = esp_slave_destroy,
120 .release = dec_esp_release,
121 .info = esp_info, 104 .info = esp_info,
122 .queuecommand = esp_queue, 105 .queuecommand = esp_queue,
123 .eh_abort_handler = esp_abort, 106 .eh_abort_handler = esp_abort,
124 .eh_bus_reset_handler = esp_reset, 107 .eh_bus_reset_handler = esp_reset,
108 .slave_alloc = esp_slave_alloc,
109 .slave_destroy = esp_slave_destroy,
110 .proc_info = esp_proc_info,
111 .proc_name = "dec_esp",
125 .can_queue = 7, 112 .can_queue = 7,
126 .this_id = 7,
127 .sg_tablesize = SG_ALL, 113 .sg_tablesize = SG_ALL,
128 .cmd_per_lun = 1, 114 .cmd_per_lun = 1,
129 .use_clustering = DISABLE_CLUSTERING, 115 .use_clustering = DISABLE_CLUSTERING,
130}; 116};
131 117
132 118static struct NCR_ESP *dec_esp_platform;
133#include "scsi_module.c"
134 119
135/***************************************************************** Detection */ 120/***************************************************************** Detection */
136static int dec_esp_detect(struct scsi_host_template * tpnt) 121static int dec_esp_platform_probe(void)
137{ 122{
138 struct NCR_ESP *esp; 123 struct NCR_ESP *esp;
139 struct ConfigDev *esp_dev; 124 int err = 0;
140 int slot;
141 unsigned long mem_start;
142 125
143 if (IOASIC) { 126 if (IOASIC) {
144 esp_dev = 0; 127 esp = esp_allocate(&dec_esp_template, NULL, 1);
145 esp = esp_allocate(tpnt, (void *) esp_dev);
146 128
147 /* Do command transfer with programmed I/O */ 129 /* Do command transfer with programmed I/O */
148 esp->do_pio_cmds = 1; 130 esp->do_pio_cmds = 1;
@@ -200,112 +182,175 @@ static int dec_esp_detect(struct scsi_host_template * tpnt)
200 /* Check for differential SCSI-bus */ 182 /* Check for differential SCSI-bus */
201 esp->diff = 0; 183 esp->diff = 0;
202 184
185 err = request_irq(esp->irq, esp_intr, IRQF_DISABLED,
186 "ncr53c94", esp->ehost);
187 if (err)
188 goto err_alloc;
189 err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
190 scsi_dma_merr_int, IRQF_DISABLED,
191 "ncr53c94 error", esp->ehost);
192 if (err)
193 goto err_irq;
194 err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
195 scsi_dma_err_int, IRQF_DISABLED,
196 "ncr53c94 overrun", esp->ehost);
197 if (err)
198 goto err_irq_merr;
199 err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int,
200 IRQF_DISABLED, "ncr53c94 dma", esp->ehost);
201 if (err)
202 goto err_irq_err;
203
203 esp_initialize(esp); 204 esp_initialize(esp);
204 205
205 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED, 206 err = scsi_add_host(esp->ehost, NULL);
206 "ncr53c94", esp->ehost)) 207 if (err) {
207 goto err_dealloc; 208 printk(KERN_ERR "ESP: Unable to register adapter\n");
208 if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR], 209 goto err_irq_dma;
209 scsi_dma_merr_int, IRQF_DISABLED, 210 }
210 "ncr53c94 error", esp->ehost)) 211
211 goto err_free_irq; 212 scsi_scan_host(esp->ehost);
212 if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
213 scsi_dma_err_int, IRQF_DISABLED,
214 "ncr53c94 overrun", esp->ehost))
215 goto err_free_irq_merr;
216 if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA],
217 scsi_dma_int, IRQF_DISABLED,
218 "ncr53c94 dma", esp->ehost))
219 goto err_free_irq_err;
220 213
214 dec_esp_platform = esp;
221 } 215 }
222 216
223 if (TURBOCHANNEL) { 217 return 0;
224 while ((slot = search_tc_card("PMAZ-AA")) >= 0) { 218
225 claim_tc_card(slot); 219err_irq_dma:
226 220 free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost);
227 esp_dev = 0; 221err_irq_err:
228 esp = esp_allocate(tpnt, (void *) esp_dev); 222 free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost);
229 223err_irq_merr:
230 mem_start = get_tc_base_addr(slot); 224 free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost);
231 225err_irq:
232 /* Store base addr into esp struct */ 226 free_irq(esp->irq, esp->ehost);
233 esp->slot = CPHYSADDR(mem_start); 227err_alloc:
234 228 esp_deallocate(esp);
235 esp->dregs = 0; 229 scsi_host_put(esp->ehost);
236 esp->eregs = (void *)CKSEG1ADDR(mem_start + 230 return err;
237 DEC_SCSI_SREG); 231}
238 esp->do_pio_cmds = 1; 232
239 233static int __init dec_esp_probe(struct device *dev)
240 /* Set the command buffer */ 234{
241 esp->esp_command = (volatile unsigned char *) pmaz_cmd_buffer; 235 struct NCR_ESP *esp;
242 236 resource_size_t start, len;
243 /* get virtual dma address for command buffer */ 237 int err;
244 esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer); 238
245 239 esp = esp_allocate(&dec_esp_template, NULL, 1);
246 esp->cfreq = get_tc_speed(); 240
247 241 dev_set_drvdata(dev, esp);
248 esp->irq = get_tc_irq_nr(slot); 242
249 243 start = to_tc_dev(dev)->resource.start;
250 /* Required functions */ 244 len = to_tc_dev(dev)->resource.end - start + 1;
251 esp->dma_bytes_sent = &dma_bytes_sent; 245
252 esp->dma_can_transfer = &dma_can_transfer; 246 if (!request_mem_region(start, len, dev->bus_id)) {
253 esp->dma_dump_state = &dma_dump_state; 247 printk(KERN_ERR "%s: Unable to reserve MMIO resource\n",
254 esp->dma_init_read = &pmaz_dma_init_read; 248 dev->bus_id);
255 esp->dma_init_write = &pmaz_dma_init_write; 249 err = -EBUSY;
256 esp->dma_ints_off = &pmaz_dma_ints_off; 250 goto err_alloc;
257 esp->dma_ints_on = &pmaz_dma_ints_on;
258 esp->dma_irq_p = &dma_irq_p;
259 esp->dma_ports_p = &dma_ports_p;
260 esp->dma_setup = &pmaz_dma_setup;
261
262 /* Optional functions */
263 esp->dma_barrier = 0;
264 esp->dma_drain = &pmaz_dma_drain;
265 esp->dma_invalidate = 0;
266 esp->dma_irq_entry = 0;
267 esp->dma_irq_exit = 0;
268 esp->dma_poll = 0;
269 esp->dma_reset = 0;
270 esp->dma_led_off = 0;
271 esp->dma_led_on = 0;
272
273 esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
274 esp->dma_mmu_get_scsi_sgl = 0;
275 esp->dma_mmu_release_scsi_one = 0;
276 esp->dma_mmu_release_scsi_sgl = 0;
277 esp->dma_advance_sg = 0;
278
279 if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
280 "PMAZ_AA", esp->ehost)) {
281 esp_deallocate(esp);
282 release_tc_card(slot);
283 continue;
284 }
285 esp->scsi_id = 7;
286 esp->diff = 0;
287 esp_initialize(esp);
288 }
289 } 251 }
290 252
291 if(nesps) { 253 /* Store base addr into esp struct. */
292 printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); 254 esp->slot = start;
293 esps_running = esps_in_use; 255
294 return esps_in_use; 256 esp->dregs = 0;
257 esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG);
258 esp->do_pio_cmds = 1;
259
260 /* Set the command buffer. */
261 esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer;
262
263 /* Get virtual dma address for command buffer. */
264 esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
265
266 esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus);
267
268 esp->irq = to_tc_dev(dev)->interrupt;
269
270 /* Required functions. */
271 esp->dma_bytes_sent = &dma_bytes_sent;
272 esp->dma_can_transfer = &dma_can_transfer;
273 esp->dma_dump_state = &dma_dump_state;
274 esp->dma_init_read = &pmaz_dma_init_read;
275 esp->dma_init_write = &pmaz_dma_init_write;
276 esp->dma_ints_off = &pmaz_dma_ints_off;
277 esp->dma_ints_on = &pmaz_dma_ints_on;
278 esp->dma_irq_p = &dma_irq_p;
279 esp->dma_ports_p = &dma_ports_p;
280 esp->dma_setup = &pmaz_dma_setup;
281
282 /* Optional functions. */
283 esp->dma_barrier = 0;
284 esp->dma_drain = &pmaz_dma_drain;
285 esp->dma_invalidate = 0;
286 esp->dma_irq_entry = 0;
287 esp->dma_irq_exit = 0;
288 esp->dma_poll = 0;
289 esp->dma_reset = 0;
290 esp->dma_led_off = 0;
291 esp->dma_led_on = 0;
292
293 esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
294 esp->dma_mmu_get_scsi_sgl = 0;
295 esp->dma_mmu_release_scsi_one = 0;
296 esp->dma_mmu_release_scsi_sgl = 0;
297 esp->dma_advance_sg = 0;
298
299 err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA",
300 esp->ehost);
301 if (err) {
302 printk(KERN_ERR "%s: Unable to get IRQ %d\n",
303 dev->bus_id, esp->irq);
304 goto err_resource;
305 }
306
307 esp->scsi_id = 7;
308 esp->diff = 0;
309 esp_initialize(esp);
310
311 err = scsi_add_host(esp->ehost, dev);
312 if (err) {
313 printk(KERN_ERR "%s: Unable to register adapter\n",
314 dev->bus_id);
315 goto err_irq;
295 } 316 }
317
318 scsi_scan_host(esp->ehost);
319
296 return 0; 320 return 0;
297 321
298err_free_irq_err: 322err_irq:
299 free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], scsi_dma_err_int); 323 free_irq(esp->irq, esp->ehost);
300err_free_irq_merr: 324
301 free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], scsi_dma_merr_int); 325err_resource:
302err_free_irq: 326 release_mem_region(start, len);
303 free_irq(esp->irq, esp_intr); 327
304err_dealloc: 328err_alloc:
305 esp_deallocate(esp); 329 esp_deallocate(esp);
306 return 0; 330 scsi_host_put(esp->ehost);
331 return err;
332}
333
334static void __exit dec_esp_platform_remove(void)
335{
336 struct NCR_ESP *esp = dec_esp_platform;
337
338 free_irq(esp->irq, esp->ehost);
339 esp_deallocate(esp);
340 scsi_host_put(esp->ehost);
341 dec_esp_platform = NULL;
307} 342}
308 343
344static void __exit dec_esp_remove(struct device *dev)
345{
346 struct NCR_ESP *esp = dev_get_drvdata(dev);
347
348 free_irq(esp->irq, esp->ehost);
349 esp_deallocate(esp);
350 scsi_host_put(esp->ehost);
351}
352
353
309/************************************************************* DMA Functions */ 354/************************************************************* DMA Functions */
310static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id) 355static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id)
311{ 356{
@@ -576,3 +621,67 @@ static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp
576{ 621{
577 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); 622 sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
578} 623}
624
625
626#ifdef CONFIG_TC
627static int __init dec_esp_tc_probe(struct device *dev);
628static int __exit dec_esp_tc_remove(struct device *dev);
629
630static const struct tc_device_id dec_esp_tc_table[] = {
631 { "DEC ", "PMAZ-AA " },
632 { }
633};
634MODULE_DEVICE_TABLE(tc, dec_esp_tc_table);
635
636static struct tc_driver dec_esp_tc_driver = {
637 .id_table = dec_esp_tc_table,
638 .driver = {
639 .name = "dec_esp",
640 .bus = &tc_bus_type,
641 .probe = dec_esp_tc_probe,
642 .remove = __exit_p(dec_esp_tc_remove),
643 },
644};
645
646static int __init dec_esp_tc_probe(struct device *dev)
647{
648 int status = dec_esp_probe(dev);
649 if (!status)
650 get_device(dev);
651 return status;
652}
653
654static int __exit dec_esp_tc_remove(struct device *dev)
655{
656 put_device(dev);
657 dec_esp_remove(dev);
658 return 0;
659}
660#endif
661
662static int __init dec_esp_init(void)
663{
664 int status;
665
666 status = tc_register_driver(&dec_esp_tc_driver);
667 if (!status)
668 dec_esp_platform_probe();
669
670 if (nesps) {
671 pr_info("ESP: Total of %d ESP hosts found, "
672 "%d actually in use.\n", nesps, esps_in_use);
673 esps_running = esps_in_use;
674 }
675
676 return status;
677}
678
679static void __exit dec_esp_exit(void)
680{
681 dec_esp_platform_remove();
682 tc_unregister_driver(&dec_esp_tc_driver);
683}
684
685
686module_init(dec_esp_init);
687module_exit(dec_esp_exit);
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
index 2a1c5c22b9e0..4266a2139b5f 100644
--- a/drivers/scsi/fastlane.c
+++ b/drivers/scsi/fastlane.c
@@ -142,7 +142,7 @@ int __init fastlane_esp_detect(struct scsi_host_template *tpnt)
142 if (board < 0x1000000) { 142 if (board < 0x1000000) {
143 goto err_release; 143 goto err_release;
144 } 144 }
145 esp = esp_allocate(tpnt, (void *)board+FASTLANE_ESP_ADDR); 145 esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0);
146 146
147 /* Do command transfer with programmed I/O */ 147 /* Do command transfer with programmed I/O */
148 esp->do_pio_cmds = 1; 148 esp->do_pio_cmds = 1;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 437684084377..8f55e1431433 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -1375,7 +1375,7 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1375 } 1375 }
1376 1376
1377 BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); 1377 BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
1378 if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { 1378 if (mtask->hdr->itt == RESERVED_ITT) {
1379 struct iscsi_session *session = conn->session; 1379 struct iscsi_session *session = conn->session;
1380 1380
1381 spin_lock_bh(&session->lock); 1381 spin_lock_bh(&session->lock);
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index bfac4441d89f..19dd4b962e18 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -75,7 +75,7 @@ static int jazz_esp_detect(struct scsi_host_template *tpnt)
75 */ 75 */
76 if (1) { 76 if (1) {
77 esp_dev = NULL; 77 esp_dev = NULL;
78 esp = esp_allocate(tpnt, (void *) esp_dev); 78 esp = esp_allocate(tpnt, esp_dev, 0);
79 79
80 /* Do command transfer with programmed I/O */ 80 /* Do command transfer with programmed I/O */
81 esp->do_pio_cmds = 1; 81 esp->do_pio_cmds = 1;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d37048c96eab..7c75771c77ff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -113,8 +113,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
113 hdr->opcode = ISCSI_OP_SCSI_CMD; 113 hdr->opcode = ISCSI_OP_SCSI_CMD;
114 hdr->flags = ISCSI_ATTR_SIMPLE; 114 hdr->flags = ISCSI_ATTR_SIMPLE;
115 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 115 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
116 hdr->itt = ctask->itt | (conn->id << ISCSI_CID_SHIFT) | 116 hdr->itt = build_itt(ctask->itt, conn->id, session->age);
117 (session->age << ISCSI_AGE_SHIFT);
118 hdr->data_length = cpu_to_be32(sc->request_bufflen); 117 hdr->data_length = cpu_to_be32(sc->request_bufflen);
119 hdr->cmdsn = cpu_to_be32(session->cmdsn); 118 hdr->cmdsn = cpu_to_be32(session->cmdsn);
120 session->cmdsn++; 119 session->cmdsn++;
@@ -270,7 +269,7 @@ invalid_datalen:
270 goto out; 269 goto out;
271 } 270 }
272 271
273 senselen = be16_to_cpu(*(uint16_t *)data); 272 senselen = be16_to_cpu(*(__be16 *)data);
274 if (datalen < senselen) 273 if (datalen < senselen)
275 goto invalid_datalen; 274 goto invalid_datalen;
276 275
@@ -338,7 +337,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
338 337
339 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { 338 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
340 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); 339 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
341 itt = rejected_pdu.itt & ISCSI_ITT_MASK; 340 itt = get_itt(rejected_pdu.itt);
342 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " 341 printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
343 "due to DataDigest error.\n", itt, 342 "due to DataDigest error.\n", itt,
344 rejected_pdu.opcode); 343 rejected_pdu.opcode);
@@ -367,10 +366,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
367 struct iscsi_mgmt_task *mtask; 366 struct iscsi_mgmt_task *mtask;
368 uint32_t itt; 367 uint32_t itt;
369 368
370 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) 369 if (hdr->itt != RESERVED_ITT)
371 itt = hdr->itt & ISCSI_ITT_MASK; 370 itt = get_itt(hdr->itt);
372 else 371 else
373 itt = hdr->itt; 372 itt = ~0U;
374 373
375 if (itt < session->cmds_max) { 374 if (itt < session->cmds_max) {
376 ctask = session->cmds[itt]; 375 ctask = session->cmds[itt];
@@ -440,7 +439,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
440 iscsi_tmf_rsp(conn, hdr); 439 iscsi_tmf_rsp(conn, hdr);
441 break; 440 break;
442 case ISCSI_OP_NOOP_IN: 441 case ISCSI_OP_NOOP_IN:
443 if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { 442 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
444 rc = ISCSI_ERR_PROTO; 443 rc = ISCSI_ERR_PROTO;
445 break; 444 break;
446 } 445 }
@@ -457,7 +456,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
457 rc = ISCSI_ERR_BAD_OPCODE; 456 rc = ISCSI_ERR_BAD_OPCODE;
458 break; 457 break;
459 } 458 }
460 } else if (itt == ISCSI_RESERVED_TAG) { 459 } else if (itt == ~0U) {
461 rc = iscsi_check_assign_cmdsn(session, 460 rc = iscsi_check_assign_cmdsn(session,
462 (struct iscsi_nopin*)hdr); 461 (struct iscsi_nopin*)hdr);
463 if (rc) 462 if (rc)
@@ -470,7 +469,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
470 break; 469 break;
471 } 470 }
472 471
473 if (hdr->ttt == ISCSI_RESERVED_TAG) 472 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
474 break; 473 break;
475 474
476 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) 475 if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
@@ -516,24 +515,24 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
516 struct iscsi_cmd_task *ctask; 515 struct iscsi_cmd_task *ctask;
517 uint32_t itt; 516 uint32_t itt;
518 517
519 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) { 518 if (hdr->itt != RESERVED_ITT) {
520 if ((hdr->itt & ISCSI_AGE_MASK) != 519 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
521 (session->age << ISCSI_AGE_SHIFT)) { 520 (session->age << ISCSI_AGE_SHIFT)) {
522 printk(KERN_ERR "iscsi: received itt %x expected " 521 printk(KERN_ERR "iscsi: received itt %x expected "
523 "session age (%x)\n", hdr->itt, 522 "session age (%x)\n", (__force u32)hdr->itt,
524 session->age & ISCSI_AGE_MASK); 523 session->age & ISCSI_AGE_MASK);
525 return ISCSI_ERR_BAD_ITT; 524 return ISCSI_ERR_BAD_ITT;
526 } 525 }
527 526
528 if ((hdr->itt & ISCSI_CID_MASK) != 527 if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
529 (conn->id << ISCSI_CID_SHIFT)) { 528 (conn->id << ISCSI_CID_SHIFT)) {
530 printk(KERN_ERR "iscsi: received itt %x, expected " 529 printk(KERN_ERR "iscsi: received itt %x, expected "
531 "CID (%x)\n", hdr->itt, conn->id); 530 "CID (%x)\n", (__force u32)hdr->itt, conn->id);
532 return ISCSI_ERR_BAD_ITT; 531 return ISCSI_ERR_BAD_ITT;
533 } 532 }
534 itt = hdr->itt & ISCSI_ITT_MASK; 533 itt = get_itt(hdr->itt);
535 } else 534 } else
536 itt = hdr->itt; 535 itt = ~0U;
537 536
538 if (itt < session->cmds_max) { 537 if (itt < session->cmds_max) {
539 ctask = session->cmds[itt]; 538 ctask = session->cmds[itt];
@@ -896,9 +895,8 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
896 /* 895 /*
897 * pre-format CmdSN for outgoing PDU. 896 * pre-format CmdSN for outgoing PDU.
898 */ 897 */
899 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) { 898 if (hdr->itt != RESERVED_ITT) {
900 hdr->itt = mtask->itt | (conn->id << ISCSI_CID_SHIFT) | 899 hdr->itt = build_itt(mtask->itt, conn->id, session->age);
901 (session->age << ISCSI_AGE_SHIFT);
902 nop->cmdsn = cpu_to_be32(session->cmdsn); 900 nop->cmdsn = cpu_to_be32(session->cmdsn);
903 if (conn->c_stage == ISCSI_CONN_STARTED && 901 if (conn->c_stage == ISCSI_CONN_STARTED &&
904 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) 902 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
@@ -1064,7 +1062,7 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1064 1062
1065 spin_lock_bh(&session->lock); 1063 spin_lock_bh(&session->lock);
1066 ctask->mtask = (struct iscsi_mgmt_task *) 1064 ctask->mtask = (struct iscsi_mgmt_task *)
1067 session->mgmt_cmds[(hdr->itt & ISCSI_ITT_MASK) - 1065 session->mgmt_cmds[get_itt(hdr->itt) -
1068 ISCSI_MGMT_ITT_OFFSET]; 1066 ISCSI_MGMT_ITT_OFFSET];
1069 1067
1070 if (conn->tmabort_state == TMABORT_INITIAL) { 1068 if (conn->tmabort_state == TMABORT_INITIAL) {
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 3586fac9be9a..bcb49021b7e2 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -351,7 +351,7 @@ int mac_esp_detect(struct scsi_host_template * tpnt)
351 for (chipnum = 0; chipnum < chipspresent; chipnum ++) { 351 for (chipnum = 0; chipnum < chipspresent; chipnum ++) {
352 struct NCR_ESP * esp; 352 struct NCR_ESP * esp;
353 353
354 esp = esp_allocate(tpnt, (void *) NULL); 354 esp = esp_allocate(tpnt, NULL, 0);
355 esp->eregs = (struct ESP_regs *) get_base(chipnum); 355 esp->eregs = (struct ESP_regs *) get_base(chipnum);
356 356
357 esp->dma_irq_p = &esp_dafb_dma_irq_p; 357 esp->dma_irq_p = &esp_dafb_dma_irq_p;
diff --git a/drivers/scsi/mca_53c9x.c b/drivers/scsi/mca_53c9x.c
index 998a8bbc1a4b..d693d0f21395 100644
--- a/drivers/scsi/mca_53c9x.c
+++ b/drivers/scsi/mca_53c9x.c
@@ -122,7 +122,7 @@ static int mca_esp_detect(struct scsi_host_template *tpnt)
122 if ((slot = mca_find_adapter(*id_to_check, 0)) != 122 if ((slot = mca_find_adapter(*id_to_check, 0)) !=
123 MCA_NOTFOUND) 123 MCA_NOTFOUND)
124 { 124 {
125 esp = esp_allocate(tpnt, (void *) NULL); 125 esp = esp_allocate(tpnt, NULL, 0);
126 126
127 pos[0] = mca_read_stored_pos(slot, 2); 127 pos[0] = mca_read_stored_pos(slot, 2);
128 pos[1] = mca_read_stored_pos(slot, 3); 128 pos[1] = mca_read_stored_pos(slot, 3);
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index c116a6ae3c54..26a6d55faf3e 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -133,7 +133,7 @@ int oktagon_esp_detect(struct scsi_host_template *tpnt)
133 eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR); 133 eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR);
134 134
135 /* This line was 5 lines lower */ 135 /* This line was 5 lines lower */
136 esp = esp_allocate(tpnt, (void *)board+OKTAGON_ESP_ADDR); 136 esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0);
137 137
138 /* we have to shift the registers only one bit for oktagon */ 138 /* we have to shift the registers only one bit for oktagon */
139 esp->shift = 1; 139 esp->shift = 1;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 7d2311067903..bd6bbf61adb8 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -521,10 +521,10 @@ static void osst_init_aux(struct osst_tape * STp, int frame_type, int frame_seq_
521 break; 521 break;
522 default: ; /* probably FILL */ 522 default: ; /* probably FILL */
523 } 523 }
524 aux->filemark_cnt = ntohl(STp->filemark_cnt); 524 aux->filemark_cnt = htonl(STp->filemark_cnt);
525 aux->phys_fm = ntohl(0xffffffff); 525 aux->phys_fm = htonl(0xffffffff);
526 aux->last_mark_ppos = ntohl(STp->last_mark_ppos); 526 aux->last_mark_ppos = htonl(STp->last_mark_ppos);
527 aux->last_mark_lbn = ntohl(STp->last_mark_lbn); 527 aux->last_mark_lbn = htonl(STp->last_mark_lbn);
528} 528}
529 529
530/* 530/*
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
index 1e426f5d0ed8..2cc7b5a1606a 100644
--- a/drivers/scsi/osst.h
+++ b/drivers/scsi/osst.h
@@ -288,11 +288,11 @@ typedef struct {
288#else 288#else
289#error "Please fix <asm/byteorder.h>" 289#error "Please fix <asm/byteorder.h>"
290#endif 290#endif
291 u16 max_speed; /* Maximum speed supported in KBps */ 291 __be16 max_speed; /* Maximum speed supported in KBps */
292 u8 reserved10, reserved11; 292 u8 reserved10, reserved11;
293 u16 ctl; /* Continuous Transfer Limit in blocks */ 293 __be16 ctl; /* Continuous Transfer Limit in blocks */
294 u16 speed; /* Current Speed, in KBps */ 294 __be16 speed; /* Current Speed, in KBps */
295 u16 buffer_size; /* Buffer Size, in 512 bytes */ 295 __be16 buffer_size; /* Buffer Size, in 512 bytes */
296 u8 reserved18, reserved19; 296 u8 reserved18, reserved19;
297} osst_capabilities_page_t; 297} osst_capabilities_page_t;
298 298
@@ -352,8 +352,8 @@ typedef struct {
352 u8 reserved2; 352 u8 reserved2;
353 u8 density; 353 u8 density;
354 u8 reserved3,reserved4; 354 u8 reserved3,reserved4;
355 u16 segtrk; 355 __be16 segtrk;
356 u16 trks; 356 __be16 trks;
357 u8 reserved5,reserved6,reserved7,reserved8,reserved9,reserved10; 357 u8 reserved5,reserved6,reserved7,reserved8,reserved9,reserved10;
358} osst_tape_paramtr_page_t; 358} osst_tape_paramtr_page_t;
359 359
@@ -369,18 +369,18 @@ typedef struct {
369typedef struct os_partition_s { 369typedef struct os_partition_s {
370 __u8 partition_num; 370 __u8 partition_num;
371 __u8 par_desc_ver; 371 __u8 par_desc_ver;
372 __u16 wrt_pass_cntr; 372 __be16 wrt_pass_cntr;
373 __u32 first_frame_ppos; 373 __be32 first_frame_ppos;
374 __u32 last_frame_ppos; 374 __be32 last_frame_ppos;
375 __u32 eod_frame_ppos; 375 __be32 eod_frame_ppos;
376} os_partition_t; 376} os_partition_t;
377 377
378/* 378/*
379 * DAT entry 379 * DAT entry
380 */ 380 */
381typedef struct os_dat_entry_s { 381typedef struct os_dat_entry_s {
382 __u32 blk_sz; 382 __be32 blk_sz;
383 __u16 blk_cnt; 383 __be16 blk_cnt;
384 __u8 flags; 384 __u8 flags;
385 __u8 reserved; 385 __u8 reserved;
386} os_dat_entry_t; 386} os_dat_entry_t;
@@ -412,23 +412,23 @@ typedef struct os_dat_s {
412 * AUX 412 * AUX
413 */ 413 */
414typedef struct os_aux_s { 414typedef struct os_aux_s {
415 __u32 format_id; /* hardware compability AUX is based on */ 415 __be32 format_id; /* hardware compability AUX is based on */
416 char application_sig[4]; /* driver used to write this media */ 416 char application_sig[4]; /* driver used to write this media */
417 __u32 hdwr; /* reserved */ 417 __be32 hdwr; /* reserved */
418 __u32 update_frame_cntr; /* for configuration frame */ 418 __be32 update_frame_cntr; /* for configuration frame */
419 __u8 frame_type; 419 __u8 frame_type;
420 __u8 frame_type_reserved; 420 __u8 frame_type_reserved;
421 __u8 reserved_18_19[2]; 421 __u8 reserved_18_19[2];
422 os_partition_t partition; 422 os_partition_t partition;
423 __u8 reserved_36_43[8]; 423 __u8 reserved_36_43[8];
424 __u32 frame_seq_num; 424 __be32 frame_seq_num;
425 __u32 logical_blk_num_high; 425 __be32 logical_blk_num_high;
426 __u32 logical_blk_num; 426 __be32 logical_blk_num;
427 os_dat_t dat; 427 os_dat_t dat;
428 __u8 reserved188_191[4]; 428 __u8 reserved188_191[4];
429 __u32 filemark_cnt; 429 __be32 filemark_cnt;
430 __u32 phys_fm; 430 __be32 phys_fm;
431 __u32 last_mark_ppos; 431 __be32 last_mark_ppos;
432 __u8 reserved204_223[20]; 432 __u8 reserved204_223[20];
433 433
434 /* 434 /*
@@ -436,8 +436,8 @@ typedef struct os_aux_s {
436 * 436 *
437 * Linux specific fields: 437 * Linux specific fields:
438 */ 438 */
439 __u32 next_mark_ppos; /* when known, points to next marker */ 439 __be32 next_mark_ppos; /* when known, points to next marker */
440 __u32 last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */ 440 __be32 last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */
441 __u8 linux_specific[24]; 441 __u8 linux_specific[24];
442 442
443 __u8 reserved_256_511[256]; 443 __u8 reserved_256_511[256];
@@ -450,19 +450,19 @@ typedef struct os_fm_tab_s {
450 __u8 reserved_1; 450 __u8 reserved_1;
451 __u8 fm_tab_ent_sz; 451 __u8 fm_tab_ent_sz;
452 __u8 reserved_3; 452 __u8 reserved_3;
453 __u16 fm_tab_ent_cnt; 453 __be16 fm_tab_ent_cnt;
454 __u8 reserved6_15[10]; 454 __u8 reserved6_15[10];
455 __u32 fm_tab_ent[OS_FM_TAB_MAX]; 455 __be32 fm_tab_ent[OS_FM_TAB_MAX];
456} os_fm_tab_t; 456} os_fm_tab_t;
457 457
458typedef struct os_ext_trk_ey_s { 458typedef struct os_ext_trk_ey_s {
459 __u8 et_part_num; 459 __u8 et_part_num;
460 __u8 fmt; 460 __u8 fmt;
461 __u16 fm_tab_off; 461 __be16 fm_tab_off;
462 __u8 reserved4_7[4]; 462 __u8 reserved4_7[4];
463 __u32 last_hlb_hi; 463 __be32 last_hlb_hi;
464 __u32 last_hlb; 464 __be32 last_hlb;
465 __u32 last_pp; 465 __be32 last_pp;
466 __u8 reserved20_31[12]; 466 __u8 reserved20_31[12];
467} os_ext_trk_ey_t; 467} os_ext_trk_ey_t;
468 468
@@ -479,17 +479,17 @@ typedef struct os_header_s {
479 char ident_str[8]; 479 char ident_str[8];
480 __u8 major_rev; 480 __u8 major_rev;
481 __u8 minor_rev; 481 __u8 minor_rev;
482 __u16 ext_trk_tb_off; 482 __be16 ext_trk_tb_off;
483 __u8 reserved12_15[4]; 483 __u8 reserved12_15[4];
484 __u8 pt_par_num; 484 __u8 pt_par_num;
485 __u8 pt_reserved1_3[3]; 485 __u8 pt_reserved1_3[3];
486 os_partition_t partition[16]; 486 os_partition_t partition[16];
487 __u32 cfg_col_width; 487 __be32 cfg_col_width;
488 __u32 dat_col_width; 488 __be32 dat_col_width;
489 __u32 qfa_col_width; 489 __be32 qfa_col_width;
490 __u8 cartridge[16]; 490 __u8 cartridge[16];
491 __u8 reserved304_511[208]; 491 __u8 reserved304_511[208];
492 __u32 old_filemark_list[16680/4]; /* in ADR 1.4 __u8 track_table[16680] */ 492 __be32 old_filemark_list[16680/4]; /* in ADR 1.4 __u8 track_table[16680] */
493 os_ext_trk_tb_t ext_track_tb; 493 os_ext_trk_tb_t ext_track_tb;
494 __u8 reserved17272_17735[464]; 494 __u8 reserved17272_17735[464];
495 os_fm_tab_t dat_fm_tab; 495 os_fm_tab_t dat_fm_tab;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 4249e52a5592..6f4cf2dd2f4a 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -418,7 +418,6 @@ struct scsi_qla_host {
418 * concurrently. 418 * concurrently.
419 */ 419 */
420 struct mutex mbox_sem; 420 struct mutex mbox_sem;
421 wait_queue_head_t mailbox_wait_queue;
422 421
423 /* temporary mailbox status registers */ 422 /* temporary mailbox status registers */
424 volatile uint8_t mbox_status_count; 423 volatile uint8_t mbox_status_count;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2122967bbf0b..e021eb5db2b2 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -76,4 +76,5 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
76extern int ql4xextended_error_logging; 76extern int ql4xextended_error_logging;
77extern int ql4xdiscoverywait; 77extern int ql4xdiscoverywait;
78extern int ql4xdontresethba; 78extern int ql4xdontresethba;
79extern int ql4_mod_unload;
79#endif /* _QLA4x_GBL_H */ 80#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index cc210f297a78..b907b06d72ab 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -958,25 +958,25 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
958 return status; 958 return status;
959} 959}
960 960
961int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) 961int ql4xxx_lock_drvr_wait(struct scsi_qla_host *ha)
962{ 962{
963#define QL4_LOCK_DRVR_WAIT 300 963#define QL4_LOCK_DRVR_WAIT 30
964#define QL4_LOCK_DRVR_SLEEP 100 964#define QL4_LOCK_DRVR_SLEEP 1
965 965
966 int drvr_wait = QL4_LOCK_DRVR_WAIT; 966 int drvr_wait = QL4_LOCK_DRVR_WAIT;
967 while (drvr_wait) { 967 while (drvr_wait) {
968 if (ql4xxx_lock_drvr(a) == 0) { 968 if (ql4xxx_lock_drvr(ha) == 0) {
969 msleep(QL4_LOCK_DRVR_SLEEP); 969 ssleep(QL4_LOCK_DRVR_SLEEP);
970 if (drvr_wait) { 970 if (drvr_wait) {
971 DEBUG2(printk("scsi%ld: %s: Waiting for " 971 DEBUG2(printk("scsi%ld: %s: Waiting for "
972 "Global Init Semaphore...n", 972 "Global Init Semaphore(%d)...n",
973 a->host_no, 973 ha->host_no,
974 __func__)); 974 __func__, drvr_wait));
975 } 975 }
976 drvr_wait -= QL4_LOCK_DRVR_SLEEP; 976 drvr_wait -= QL4_LOCK_DRVR_SLEEP;
977 } else { 977 } else {
978 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " 978 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
979 "acquired.n", a->host_no, __func__)); 979 "acquired.n", ha->host_no, __func__));
980 return QLA_SUCCESS; 980 return QLA_SUCCESS;
981 } 981 }
982 } 982 }
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index ef975e0dc87f..35b9e36a0e8d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -433,7 +433,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
433 readl(&ha->reg->mailbox[i]); 433 readl(&ha->reg->mailbox[i]);
434 434
435 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 435 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
436 wake_up(&ha->mailbox_wait_queue);
437 } 436 }
438 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 437 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
439 /* Immediately process the AENs that don't require much work. 438 /* Immediately process the AENs that don't require much work.
@@ -686,7 +685,8 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
686 &ha->reg->ctrl_status); 685 &ha->reg->ctrl_status);
687 readl(&ha->reg->ctrl_status); 686 readl(&ha->reg->ctrl_status);
688 687
689 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 688 if (!ql4_mod_unload)
689 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
690 690
691 break; 691 break;
692 } else if (intr_status & INTR_PENDING) { 692 } else if (intr_status & INTR_PENDING) {
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index b721dc5dd711..7f28657eef3f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -29,18 +29,30 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
29 u_long wait_count; 29 u_long wait_count;
30 uint32_t intr_status; 30 uint32_t intr_status;
31 unsigned long flags = 0; 31 unsigned long flags = 0;
32 DECLARE_WAITQUEUE(wait, current);
33
34 mutex_lock(&ha->mbox_sem);
35
36 /* Mailbox code active */
37 set_bit(AF_MBOX_COMMAND, &ha->flags);
38 32
39 /* Make sure that pointers are valid */ 33 /* Make sure that pointers are valid */
40 if (!mbx_cmd || !mbx_sts) { 34 if (!mbx_cmd || !mbx_sts) {
41 DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts " 35 DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
42 "pointer\n", ha->host_no, __func__)); 36 "pointer\n", ha->host_no, __func__));
43 goto mbox_exit; 37 return status;
38 }
39 /* Mailbox code active */
40 wait_count = MBOX_TOV * 100;
41
42 while (wait_count--) {
43 mutex_lock(&ha->mbox_sem);
44 if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) {
45 set_bit(AF_MBOX_COMMAND, &ha->flags);
46 mutex_unlock(&ha->mbox_sem);
47 break;
48 }
49 mutex_unlock(&ha->mbox_sem);
50 if (!wait_count) {
51 DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n",
52 ha->host_no, __func__));
53 return status;
54 }
55 msleep(10);
44 } 56 }
45 57
46 /* To prevent overwriting mailbox registers for a command that has 58 /* To prevent overwriting mailbox registers for a command that has
@@ -73,8 +85,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
73 spin_unlock_irqrestore(&ha->hardware_lock, flags); 85 spin_unlock_irqrestore(&ha->hardware_lock, flags);
74 86
75 /* Wait for completion */ 87 /* Wait for completion */
76 set_current_state(TASK_UNINTERRUPTIBLE);
77 add_wait_queue(&ha->mailbox_wait_queue, &wait);
78 88
79 /* 89 /*
80 * If we don't want status, don't wait for the mailbox command to 90 * If we don't want status, don't wait for the mailbox command to
@@ -83,8 +93,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
83 */ 93 */
84 if (outCount == 0) { 94 if (outCount == 0) {
85 status = QLA_SUCCESS; 95 status = QLA_SUCCESS;
86 set_current_state(TASK_RUNNING);
87 remove_wait_queue(&ha->mailbox_wait_queue, &wait);
88 goto mbox_exit; 96 goto mbox_exit;
89 } 97 }
90 /* Wait for command to complete */ 98 /* Wait for command to complete */
@@ -108,8 +116,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 116 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109 msleep(10); 117 msleep(10);
110 } 118 }
111 set_current_state(TASK_RUNNING);
112 remove_wait_queue(&ha->mailbox_wait_queue, &wait);
113 119
114 /* Check for mailbox timeout. */ 120 /* Check for mailbox timeout. */
115 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) { 121 if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
@@ -155,9 +161,10 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
155 spin_unlock_irqrestore(&ha->hardware_lock, flags); 161 spin_unlock_irqrestore(&ha->hardware_lock, flags);
156 162
157mbox_exit: 163mbox_exit:
164 mutex_lock(&ha->mbox_sem);
158 clear_bit(AF_MBOX_COMMAND, &ha->flags); 165 clear_bit(AF_MBOX_COMMAND, &ha->flags);
159 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
160 mutex_unlock(&ha->mbox_sem); 166 mutex_unlock(&ha->mbox_sem);
167 clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
161 168
162 return status; 169 return status;
163} 170}
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 9ef693c8809a..81fb7bd44f01 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -40,6 +40,8 @@ MODULE_PARM_DESC(ql4xextended_error_logging,
40 "Option to enable extended error logging, " 40 "Option to enable extended error logging, "
41 "Default is 0 - no logging, 1 - debug logging"); 41 "Default is 0 - no logging, 1 - debug logging");
42 42
43int ql4_mod_unload = 0;
44
43/* 45/*
44 * SCSI host template entry points 46 * SCSI host template entry points
45 */ 47 */
@@ -422,6 +424,9 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
422 goto qc_host_busy; 424 goto qc_host_busy;
423 } 425 }
424 426
427 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
428 goto qc_host_busy;
429
425 spin_unlock_irq(ha->host->host_lock); 430 spin_unlock_irq(ha->host->host_lock);
426 431
427 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done); 432 srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd, done);
@@ -707,16 +712,12 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
707 return stat; 712 return stat;
708} 713}
709 714
710/** 715static void qla4xxx_hw_reset(struct scsi_qla_host *ha)
711 * qla4xxx_soft_reset - performs soft reset.
712 * @ha: Pointer to host adapter structure.
713 **/
714int qla4xxx_soft_reset(struct scsi_qla_host *ha)
715{ 716{
716 uint32_t max_wait_time;
717 unsigned long flags = 0;
718 int status = QLA_ERROR;
719 uint32_t ctrl_status; 717 uint32_t ctrl_status;
718 unsigned long flags = 0;
719
720 DEBUG2(printk(KERN_ERR "scsi%ld: %s\n", ha->host_no, __func__));
720 721
721 spin_lock_irqsave(&ha->hardware_lock, flags); 722 spin_lock_irqsave(&ha->hardware_lock, flags);
722 723
@@ -733,6 +734,20 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha)
733 readl(&ha->reg->ctrl_status); 734 readl(&ha->reg->ctrl_status);
734 735
735 spin_unlock_irqrestore(&ha->hardware_lock, flags); 736 spin_unlock_irqrestore(&ha->hardware_lock, flags);
737}
738
739/**
740 * qla4xxx_soft_reset - performs soft reset.
741 * @ha: Pointer to host adapter structure.
742 **/
743int qla4xxx_soft_reset(struct scsi_qla_host *ha)
744{
745 uint32_t max_wait_time;
746 unsigned long flags = 0;
747 int status = QLA_ERROR;
748 uint32_t ctrl_status;
749
750 qla4xxx_hw_reset(ha);
736 751
737 /* Wait until the Network Reset Intr bit is cleared */ 752 /* Wait until the Network Reset Intr bit is cleared */
738 max_wait_time = RESET_INTR_TOV; 753 max_wait_time = RESET_INTR_TOV;
@@ -966,10 +981,12 @@ static void qla4xxx_do_dpc(struct work_struct *work)
966 struct scsi_qla_host *ha = 981 struct scsi_qla_host *ha =
967 container_of(work, struct scsi_qla_host, dpc_work); 982 container_of(work, struct scsi_qla_host, dpc_work);
968 struct ddb_entry *ddb_entry, *dtemp; 983 struct ddb_entry *ddb_entry, *dtemp;
984 int status = QLA_ERROR;
969 985
970 DEBUG2(printk("scsi%ld: %s: DPC handler waking up." 986 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
971 "flags = 0x%08lx, dpc_flags = 0x%08lx\n", 987 "flags = 0x%08lx, dpc_flags = 0x%08lx ctrl_stat = 0x%08x\n",
972 ha->host_no, __func__, ha->flags, ha->dpc_flags)); 988 ha->host_no, __func__, ha->flags, ha->dpc_flags,
989 readw(&ha->reg->ctrl_status)));
973 990
974 /* Initialization not yet finished. Don't do anything yet. */ 991 /* Initialization not yet finished. Don't do anything yet. */
975 if (!test_bit(AF_INIT_DONE, &ha->flags)) 992 if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -983,31 +1000,28 @@ static void qla4xxx_do_dpc(struct work_struct *work)
983 test_bit(DPC_RESET_HA, &ha->dpc_flags)) 1000 test_bit(DPC_RESET_HA, &ha->dpc_flags))
984 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST); 1001 qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
985 1002
986 if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) { 1003 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
987 uint8_t wait_time = RESET_INTR_TOV; 1004 uint8_t wait_time = RESET_INTR_TOV;
988 unsigned long flags = 0;
989
990 qla4xxx_flush_active_srbs(ha);
991 1005
992 spin_lock_irqsave(&ha->hardware_lock, flags);
993 while ((readw(&ha->reg->ctrl_status) & 1006 while ((readw(&ha->reg->ctrl_status) &
994 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) { 1007 (CSR_SOFT_RESET | CSR_FORCE_SOFT_RESET)) != 0) {
995 if (--wait_time == 0) 1008 if (--wait_time == 0)
996 break; 1009 break;
997
998 spin_unlock_irqrestore(&ha->hardware_lock,
999 flags);
1000
1001 msleep(1000); 1010 msleep(1000);
1002
1003 spin_lock_irqsave(&ha->hardware_lock, flags);
1004 } 1011 }
1005 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1006
1007 if (wait_time == 0) 1012 if (wait_time == 0)
1008 DEBUG2(printk("scsi%ld: %s: SR|FSR " 1013 DEBUG2(printk("scsi%ld: %s: SR|FSR "
1009 "bit not cleared-- resetting\n", 1014 "bit not cleared-- resetting\n",
1010 ha->host_no, __func__)); 1015 ha->host_no, __func__));
1016 qla4xxx_flush_active_srbs(ha);
1017 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) {
1018 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
1019 status = qla4xxx_initialize_adapter(ha,
1020 PRESERVE_DDB_LIST);
1021 }
1022 clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1023 if (status == QLA_SUCCESS)
1024 qla4xxx_enable_intrs(ha);
1011 } 1025 }
1012 } 1026 }
1013 1027
@@ -1062,7 +1076,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1062 1076
1063 /* Issue Soft Reset to put firmware in unknown state */ 1077 /* Issue Soft Reset to put firmware in unknown state */
1064 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS) 1078 if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
1065 qla4xxx_soft_reset(ha); 1079 qla4xxx_hw_reset(ha);
1066 1080
1067 /* Remove timer thread, if present */ 1081 /* Remove timer thread, if present */
1068 if (ha->timer_active) 1082 if (ha->timer_active)
@@ -1198,7 +1212,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1198 INIT_LIST_HEAD(&ha->free_srb_q); 1212 INIT_LIST_HEAD(&ha->free_srb_q);
1199 1213
1200 mutex_init(&ha->mbox_sem); 1214 mutex_init(&ha->mbox_sem);
1201 init_waitqueue_head(&ha->mailbox_wait_queue);
1202 1215
1203 spin_lock_init(&ha->hardware_lock); 1216 spin_lock_init(&ha->hardware_lock);
1204 1217
@@ -1665,6 +1678,7 @@ no_srp_cache:
1665 1678
1666static void __exit qla4xxx_module_exit(void) 1679static void __exit qla4xxx_module_exit(void)
1667{ 1680{
1681 ql4_mod_unload = 1;
1668 pci_unregister_driver(&qla4xxx_pci_driver); 1682 pci_unregister_driver(&qla4xxx_pci_driver);
1669 iscsi_unregister_transport(&qla4xxx_iscsi_transport); 1683 iscsi_unregister_transport(&qla4xxx_iscsi_transport);
1670 kmem_cache_destroy(srb_cachep); 1684 kmem_cache_destroy(srb_cachep);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 454e19c8ad68..e5183a697d1f 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.00.07-k" 8#define QLA4XXX_DRIVER_VERSION "5.00.07-k1"
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 8160c00d1092..a43b9ec3aefd 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1453,6 +1453,12 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1453 struct device *parent = &shost->shost_gendev; 1453 struct device *parent = &shost->shost_gendev;
1454 struct scsi_target *starget; 1454 struct scsi_target *starget;
1455 1455
1456 if (strncmp(scsi_scan_type, "none", 4) == 0)
1457 return ERR_PTR(-ENODEV);
1458
1459 if (!shost->async_scan)
1460 scsi_complete_async_scans();
1461
1456 starget = scsi_alloc_target(parent, channel, id); 1462 starget = scsi_alloc_target(parent, channel, id);
1457 if (!starget) 1463 if (!starget)
1458 return ERR_PTR(-ENOMEM); 1464 return ERR_PTR(-ENOMEM);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 978bfc1e0c6a..b781a90d6699 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1647,16 +1647,6 @@ static int sd_probe(struct device *dev)
1647 if (error) 1647 if (error)
1648 goto out_put; 1648 goto out_put;
1649 1649
1650 class_device_initialize(&sdkp->cdev);
1651 sdkp->cdev.dev = &sdp->sdev_gendev;
1652 sdkp->cdev.class = &sd_disk_class;
1653 strncpy(sdkp->cdev.class_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
1654
1655 if (class_device_add(&sdkp->cdev))
1656 goto out_put;
1657
1658 get_device(&sdp->sdev_gendev);
1659
1660 sdkp->device = sdp; 1650 sdkp->device = sdp;
1661 sdkp->driver = &sd_template; 1651 sdkp->driver = &sd_template;
1662 sdkp->disk = gd; 1652 sdkp->disk = gd;
@@ -1670,6 +1660,16 @@ static int sd_probe(struct device *dev)
1670 sdp->timeout = SD_MOD_TIMEOUT; 1660 sdp->timeout = SD_MOD_TIMEOUT;
1671 } 1661 }
1672 1662
1663 class_device_initialize(&sdkp->cdev);
1664 sdkp->cdev.dev = &sdp->sdev_gendev;
1665 sdkp->cdev.class = &sd_disk_class;
1666 strncpy(sdkp->cdev.class_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
1667
1668 if (class_device_add(&sdkp->cdev))
1669 goto out_put;
1670
1671 get_device(&sdp->sdev_gendev);
1672
1673 gd->major = sd_major((index & 0xf0) >> 4); 1673 gd->major = sd_major((index & 0xf0) >> 4);
1674 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); 1674 gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
1675 gd->minors = 16; 1675 gd->minors = 16;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index fba8b204e310..16e279be4a3e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -2819,15 +2819,18 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2819 2819
2820 if (cmd_in == MTWEOF && 2820 if (cmd_in == MTWEOF &&
2821 cmdstatp->have_sense && 2821 cmdstatp->have_sense &&
2822 (cmdstatp->flags & SENSE_EOM) && 2822 (cmdstatp->flags & SENSE_EOM)) {
2823 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 2823 if (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
2824 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && 2824 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) {
2825 undone == 0) { 2825 ioctl_result = 0; /* EOF(s) written successfully at EOM */
2826 ioctl_result = 0; /* EOF written successfully at EOM */ 2826 STps->eof = ST_NOEOF;
2827 if (fileno >= 0) 2827 } else { /* Writing EOF(s) failed */
2828 fileno++; 2828 if (fileno >= 0)
2829 fileno -= undone;
2830 if (undone < arg)
2831 STps->eof = ST_NOEOF;
2832 }
2829 STps->drv_file = fileno; 2833 STps->drv_file = fileno;
2830 STps->eof = ST_NOEOF;
2831 } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) { 2834 } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) {
2832 if (fileno >= 0) 2835 if (fileno >= 0)
2833 STps->drv_file = fileno - undone; 2836 STps->drv_file = fileno - undone;
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 6b60536ac92b..80fb3f88af2e 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -53,7 +53,7 @@ int sun3x_esp_detect(struct scsi_host_template *tpnt)
53 struct ConfigDev *esp_dev; 53 struct ConfigDev *esp_dev;
54 54
55 esp_dev = 0; 55 esp_dev = 0;
56 esp = esp_allocate(tpnt, (void *) esp_dev); 56 esp = esp_allocate(tpnt, esp_dev, 0);
57 57
58 /* Do command transfer with DMA */ 58 /* Do command transfer with DMA */
59 esp->do_pio_cmds = 0; 59 esp->do_pio_cmds = 0;
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
index 08e55fdc882a..925fb607d8c4 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c
@@ -40,6 +40,7 @@
40 40
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/fs_pd.h>
43 44
44#include <linux/serial_core.h> 45#include <linux/serial_core.h>
45#include <linux/kernel.h> 46#include <linux/kernel.h>
@@ -145,7 +146,7 @@ int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
145 /* was hostalloc but changed cause it blows away the */ 146 /* was hostalloc but changed cause it blows away the */
146 /* large tlb mapping when pinning the kernel area */ 147 /* large tlb mapping when pinning the kernel area */
147 mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8)); 148 mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8));
148 dma_addr = (u32)mem_addr; 149 dma_addr = (u32)cpm_dpram_phys(mem_addr);
149 } else 150 } else
150 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, 151 mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr,
151 GFP_KERNEL); 152 GFP_KERNEL);
@@ -205,7 +206,7 @@ int __init cpm_uart_init_portdesc(void)
205 (unsigned long)&cpmp->cp_smc[0]; 206 (unsigned long)&cpmp->cp_smc[0];
206 cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); 207 cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
207 cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); 208 cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
208 cpm_uart_ports[UART_SMC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); 209 cpm_uart_ports[UART_SMC1].port.uartclk = uart_clock();
209 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; 210 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1;
210#endif 211#endif
211 212
@@ -217,7 +218,7 @@ int __init cpm_uart_init_portdesc(void)
217 (unsigned long)&cpmp->cp_smc[1]; 218 (unsigned long)&cpmp->cp_smc[1];
218 cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); 219 cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX);
219 cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); 220 cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN);
220 cpm_uart_ports[UART_SMC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); 221 cpm_uart_ports[UART_SMC2].port.uartclk = uart_clock();
221 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; 222 cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2;
222#endif 223#endif
223 224
@@ -231,7 +232,7 @@ int __init cpm_uart_init_portdesc(void)
231 ~(UART_SCCM_TX | UART_SCCM_RX); 232 ~(UART_SCCM_TX | UART_SCCM_RX);
232 cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= 233 cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &=
233 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); 234 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
234 cpm_uart_ports[UART_SCC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); 235 cpm_uart_ports[UART_SCC1].port.uartclk = uart_clock();
235 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; 236 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1;
236#endif 237#endif
237 238
@@ -245,7 +246,7 @@ int __init cpm_uart_init_portdesc(void)
245 ~(UART_SCCM_TX | UART_SCCM_RX); 246 ~(UART_SCCM_TX | UART_SCCM_RX);
246 cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= 247 cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &=
247 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); 248 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
248 cpm_uart_ports[UART_SCC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); 249 cpm_uart_ports[UART_SCC2].port.uartclk = uart_clock();
249 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; 250 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2;
250#endif 251#endif
251 252
@@ -259,7 +260,7 @@ int __init cpm_uart_init_portdesc(void)
259 ~(UART_SCCM_TX | UART_SCCM_RX); 260 ~(UART_SCCM_TX | UART_SCCM_RX);
260 cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= 261 cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &=
261 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); 262 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
262 cpm_uart_ports[UART_SCC3].port.uartclk = (((bd_t *) __res)->bi_intfreq); 263 cpm_uart_ports[UART_SCC3].port.uartclk = uart_clock();
263 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; 264 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3;
264#endif 265#endif
265 266
@@ -273,7 +274,7 @@ int __init cpm_uart_init_portdesc(void)
273 ~(UART_SCCM_TX | UART_SCCM_RX); 274 ~(UART_SCCM_TX | UART_SCCM_RX);
274 cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= 275 cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &=
275 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); 276 ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
276 cpm_uart_ports[UART_SCC4].port.uartclk = (((bd_t *) __res)->bi_intfreq); 277 cpm_uart_ports[UART_SCC4].port.uartclk = uart_clock();
277 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; 278 cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4;
278#endif 279#endif
279 return 0; 280 return 0;
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
index 5eb49ea63bfe..a99e45e2b6d8 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h
@@ -20,9 +20,6 @@
20#define SCC3_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC3) 20#define SCC3_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC3)
21#define SCC4_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC4) 21#define SCC4_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC4)
22 22
23/* the CPM address */
24#define CPM_ADDR IMAP_ADDR
25
26static inline void cpm_set_brg(int brg, int baud) 23static inline void cpm_set_brg(int brg, int baud)
27{ 24{
28 cpm_setbrg(brg, baud); 25 cpm_setbrg(brg, baud);
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/serial/cpm_uart/cpm_uart_cpm2.h
index 4b779111eaf9..1b3219f56c81 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.h
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.h
@@ -20,9 +20,6 @@
20#define SCC3_IRQ SIU_INT_SCC3 20#define SCC3_IRQ SIU_INT_SCC3
21#define SCC4_IRQ SIU_INT_SCC4 21#define SCC4_IRQ SIU_INT_SCC4
22 22
23/* the CPM address */
24#define CPM_ADDR CPM_MAP_ADDR
25
26static inline void cpm_set_brg(int brg, int baud) 23static inline void cpm_set_brg(int brg, int baud)
27{ 24{
28 cpm_setbrg(brg, baud); 25 cpm_setbrg(brg, baud);
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index db8607e3d531..f5051cf1a0c8 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -256,7 +256,7 @@ static void ulite_release_port(struct uart_port *port)
256{ 256{
257 release_mem_region(port->mapbase, ULITE_REGION); 257 release_mem_region(port->mapbase, ULITE_REGION);
258 iounmap(port->membase); 258 iounmap(port->membase);
259 port->membase = 0; 259 port->membase = NULL;
260} 260}
261 261
262static int ulite_request_port(struct uart_port *port) 262static int ulite_request_port(struct uart_port *port)
@@ -438,7 +438,7 @@ static int __devinit ulite_probe(struct platform_device *pdev)
438 port->iotype = UPIO_MEM; 438 port->iotype = UPIO_MEM;
439 port->iobase = 1; /* mark port in use */ 439 port->iobase = 1; /* mark port in use */
440 port->mapbase = res->start; 440 port->mapbase = res->start;
441 port->membase = 0; 441 port->membase = NULL;
442 port->ops = &ulite_ops; 442 port->ops = &ulite_ops;
443 port->irq = res2->start; 443 port->irq = res2->start;
444 port->flags = UPF_BOOT_AUTOCONF; 444 port->flags = UPF_BOOT_AUTOCONF;
@@ -462,7 +462,7 @@ static int ulite_remove(struct platform_device *pdev)
462 uart_remove_one_port(&ulite_uart_driver, port); 462 uart_remove_one_port(&ulite_uart_driver, port);
463 463
464 /* mark port as free */ 464 /* mark port as free */
465 port->membase = 0; 465 port->membase = NULL;
466 466
467 return 0; 467 return 0;
468} 468}
diff --git a/drivers/tc/Makefile b/drivers/tc/Makefile
index 83b5bd75ce26..967342692211 100644
--- a/drivers/tc/Makefile
+++ b/drivers/tc/Makefile
@@ -4,7 +4,7 @@
4 4
5# Object file lists. 5# Object file lists.
6 6
7obj-$(CONFIG_TC) += tc.o 7obj-$(CONFIG_TC) += tc.o tc-driver.o
8obj-$(CONFIG_ZS) += zs.o 8obj-$(CONFIG_ZS) += zs.o
9obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o 9obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o
10 10
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c
new file mode 100644
index 000000000000..16b5bae63c74
--- /dev/null
+++ b/drivers/tc/tc-driver.c
@@ -0,0 +1,110 @@
1/*
2 * TURBOchannel driver services.
3 *
4 * Copyright (c) 2005 James Simmons
5 * Copyright (c) 2006 Maciej W. Rozycki
6 *
7 * Loosely based on drivers/dio/dio-driver.c and
8 * drivers/pci/pci-driver.c.
9 *
10 * This file is subject to the terms and conditions of the GNU
11 * General Public License. See the file "COPYING" in the main
12 * directory of this archive for more details.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/tc.h>
18
19/**
20 * tc_register_driver - register a new TC driver
21 * @drv: the driver structure to register
22 *
23 * Adds the driver structure to the list of registered drivers
24 * Returns a negative value on error, otherwise 0.
25 * If no error occurred, the driver remains registered even if
26 * no device was claimed during registration.
27 */
28int tc_register_driver(struct tc_driver *tdrv)
29{
30 return driver_register(&tdrv->driver);
31}
32EXPORT_SYMBOL(tc_register_driver);
33
34/**
35 * tc_unregister_driver - unregister a TC driver
36 * @drv: the driver structure to unregister
37 *
38 * Deletes the driver structure from the list of registered TC drivers,
39 * gives it a chance to clean up by calling its remove() function for
40 * each device it was responsible for, and marks those devices as
41 * driverless.
42 */
43void tc_unregister_driver(struct tc_driver *tdrv)
44{
45 driver_unregister(&tdrv->driver);
46}
47EXPORT_SYMBOL(tc_unregister_driver);
48
49/**
50 * tc_match_device - tell if a TC device structure has a matching
51 * TC device ID structure
52 * @tdrv: the TC driver to earch for matching TC device ID strings
53 * @tdev: the TC device structure to match against
54 *
55 * Used by a driver to check whether a TC device present in the
56 * system is in its list of supported devices. Returns the matching
57 * tc_device_id structure or %NULL if there is no match.
58 */
59const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
60 struct tc_dev *tdev)
61{
62 const struct tc_device_id *id = tdrv->id_table;
63
64 if (id) {
65 while (id->name[0] || id->vendor[0]) {
66 if (strcmp(tdev->name, id->name) == 0 &&
67 strcmp(tdev->vendor, id->vendor) == 0)
68 return id;
69 id++;
70 }
71 }
72 return NULL;
73}
74EXPORT_SYMBOL(tc_match_device);
75
76/**
77 * tc_bus_match - Tell if a device structure has a matching
78 * TC device ID structure
79 * @dev: the device structure to match against
80 * @drv: the device driver to search for matching TC device ID strings
81 *
82 * Used by a driver to check whether a TC device present in the
83 * system is in its list of supported devices. Returns 1 if there
84 * is a match or 0 otherwise.
85 */
86static int tc_bus_match(struct device *dev, struct device_driver *drv)
87{
88 struct tc_dev *tdev = to_tc_dev(dev);
89 struct tc_driver *tdrv = to_tc_driver(drv);
90 const struct tc_device_id *id;
91
92 id = tc_match_device(tdrv, tdev);
93 if (id)
94 return 1;
95
96 return 0;
97}
98
99struct bus_type tc_bus_type = {
100 .name = "tc",
101 .match = tc_bus_match,
102};
103EXPORT_SYMBOL(tc_bus_type);
104
105static int __init tc_driver_init(void)
106{
107 return bus_register(&tc_bus_type);
108}
109
110postcore_initcall(tc_driver_init);
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 4a51e56f85b6..f77f62a4b325 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -1,254 +1,193 @@
1/* 1/*
2 * tc-init: We assume the TURBOchannel to be up and running so 2 * TURBOchannel bus services.
3 * just probe for Modules and fill in the global data structure
4 * tc_bus.
5 * 3 *
6 * This file is subject to the terms and conditions of the GNU General Public 4 * Copyright (c) Harald Koerfgen, 1998
7 * License. See the file "COPYING" in the main directory of this archive 5 * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
8 * for more details. 6 * Copyright (c) 2005 James Simmons
9 * 7 *
10 * Copyright (c) Harald Koerfgen, 1998 8 * This file is subject to the terms and conditions of the GNU
11 * Copyright (c) 2001, 2003, 2005 Maciej W. Rozycki 9 * General Public License. See the file "COPYING" in the main
10 * directory of this archive for more details.
12 */ 11 */
12#include <linux/compiler.h>
13#include <linux/errno.h>
13#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/ioport.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/list.h>
15#include <linux/module.h> 18#include <linux/module.h>
16#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/tc.h>
17#include <linux/types.h> 21#include <linux/types.h>
18 22
19#include <asm/addrspace.h>
20#include <asm/errno.h>
21#include <asm/io.h> 23#include <asm/io.h>
22#include <asm/paccess.h>
23 24
24#include <asm/dec/machtype.h> 25static struct tc_bus tc_bus = {
25#include <asm/dec/prom.h> 26 .name = "TURBOchannel",
26#include <asm/dec/tcinfo.h> 27};
27#include <asm/dec/tcmodule.h>
28#include <asm/dec/interrupts.h>
29
30MODULE_LICENSE("GPL");
31slot_info tc_bus[MAX_SLOT];
32static int num_tcslots;
33static tcinfo *info;
34 28
35/* 29/*
36 * Interface to the world. Read comment in include/asm-mips/tc.h. 30 * Probing for TURBOchannel modules.
37 */ 31 */
38 32static void __init tc_bus_add_devices(struct tc_bus *tbus)
39int search_tc_card(const char *name)
40{
41 int slot;
42 slot_info *sip;
43
44 for (slot = 0; slot < num_tcslots; slot++) {
45 sip = &tc_bus[slot];
46 if ((sip->flags & FREE) &&
47 (strncmp(sip->name, name, strlen(name)) == 0)) {
48 return slot;
49 }
50 }
51
52 return -ENODEV;
53}
54
55void claim_tc_card(int slot)
56{
57 if (tc_bus[slot].flags & IN_USE) {
58 printk("claim_tc_card: attempting to claim a card already in use\n");
59 return;
60 }
61 tc_bus[slot].flags &= ~FREE;
62 tc_bus[slot].flags |= IN_USE;
63}
64
65void release_tc_card(int slot)
66{ 33{
67 if (tc_bus[slot].flags & FREE) { 34 resource_size_t slotsize = tbus->info.slot_size << 20;
68 printk("release_tc_card: " 35 resource_size_t extslotsize = tbus->ext_slot_size;
69 "attempting to release a card already free\n"); 36 resource_size_t slotaddr;
70 return; 37 resource_size_t extslotaddr;
71 } 38 resource_size_t devsize;
72 tc_bus[slot].flags &= ~IN_USE; 39 void __iomem *module;
73 tc_bus[slot].flags |= FREE; 40 struct tc_dev *tdev;
74}
75
76unsigned long get_tc_base_addr(int slot)
77{
78 return tc_bus[slot].base_addr;
79}
80
81unsigned long get_tc_irq_nr(int slot)
82{
83 return tc_bus[slot].interrupt;
84}
85
86unsigned long get_tc_speed(void)
87{
88 return 100000 * (10000 / (unsigned long)info->clk_period);
89}
90
91/*
92 * Probing for TURBOchannel modules
93 */
94static void __init tc_probe(unsigned long startaddr, unsigned long size,
95 int slots)
96{
97 unsigned long slotaddr;
98 int i, slot, err; 41 int i, slot, err;
99 long offset;
100 u8 pattern[4]; 42 u8 pattern[4];
101 volatile u8 *module; 43 long offset;
102 44
103 for (slot = 0; slot < slots; slot++) { 45 for (slot = 0; slot < tbus->num_tcslots; slot++) {
104 slotaddr = startaddr + slot * size; 46 slotaddr = tbus->slot_base + slot * slotsize;
105 module = ioremap_nocache(slotaddr, size); 47 extslotaddr = tbus->ext_slot_base + slot * extslotsize;
48 module = ioremap_nocache(slotaddr, slotsize);
106 BUG_ON(!module); 49 BUG_ON(!module);
107 50
108 offset = OLDCARD; 51 offset = TC_OLDCARD;
109 52
110 err = 0; 53 err = 0;
111 err |= get_dbe(pattern[0], module + OLDCARD + TC_PATTERN0); 54 err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0);
112 err |= get_dbe(pattern[1], module + OLDCARD + TC_PATTERN1); 55 err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1);
113 err |= get_dbe(pattern[2], module + OLDCARD + TC_PATTERN2); 56 err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2);
114 err |= get_dbe(pattern[3], module + OLDCARD + TC_PATTERN3); 57 err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3);
115 if (err) { 58 if (err)
116 iounmap(module); 59 goto out_err;
117 continue;
118 }
119 60
120 if (pattern[0] != 0x55 || pattern[1] != 0x00 || 61 if (pattern[0] != 0x55 || pattern[1] != 0x00 ||
121 pattern[2] != 0xaa || pattern[3] != 0xff) { 62 pattern[2] != 0xaa || pattern[3] != 0xff) {
122 offset = NEWCARD; 63 offset = TC_NEWCARD;
123 64
124 err = 0; 65 err = 0;
125 err |= get_dbe(pattern[0], module + TC_PATTERN0); 66 err |= tc_preadb(pattern + 0,
126 err |= get_dbe(pattern[1], module + TC_PATTERN1); 67 module + offset + TC_PATTERN0);
127 err |= get_dbe(pattern[2], module + TC_PATTERN2); 68 err |= tc_preadb(pattern + 1,
128 err |= get_dbe(pattern[3], module + TC_PATTERN3); 69 module + offset + TC_PATTERN1);
129 if (err) { 70 err |= tc_preadb(pattern + 2,
130 iounmap(module); 71 module + offset + TC_PATTERN2);
131 continue; 72 err |= tc_preadb(pattern + 3,
132 } 73 module + offset + TC_PATTERN3);
74 if (err)
75 goto out_err;
133 } 76 }
134 77
135 if (pattern[0] != 0x55 || pattern[1] != 0x00 || 78 if (pattern[0] != 0x55 || pattern[1] != 0x00 ||
136 pattern[2] != 0xaa || pattern[3] != 0xff) { 79 pattern[2] != 0xaa || pattern[3] != 0xff)
137 iounmap(module); 80 goto out_err;
138 continue; 81
82 /* Found a board, allocate it an entry in the list */
83 tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
84 if (!tdev) {
85 printk(KERN_ERR "tc%x: unable to allocate tc_dev\n",
86 slot);
87 goto out_err;
139 } 88 }
89 sprintf(tdev->dev.bus_id, "tc%x", slot);
90 tdev->bus = tbus;
91 tdev->dev.parent = &tbus->dev;
92 tdev->dev.bus = &tc_bus_type;
93 tdev->slot = slot;
140 94
141 tc_bus[slot].base_addr = slotaddr;
142 for (i = 0; i < 8; i++) { 95 for (i = 0; i < 8; i++) {
143 tc_bus[slot].firmware[i] = 96 tdev->firmware[i] =
144 module[TC_FIRM_VER + offset + 4 * i]; 97 readb(module + offset + TC_FIRM_VER + 4 * i);
145 tc_bus[slot].vendor[i] = 98 tdev->vendor[i] =
146 module[TC_VENDOR + offset + 4 * i]; 99 readb(module + offset + TC_VENDOR + 4 * i);
147 tc_bus[slot].name[i] = 100 tdev->name[i] =
148 module[TC_MODULE + offset + 4 * i]; 101 readb(module + offset + TC_MODULE + 4 * i);
149 } 102 }
150 tc_bus[slot].firmware[8] = 0; 103 tdev->firmware[8] = 0;
151 tc_bus[slot].vendor[8] = 0; 104 tdev->vendor[8] = 0;
152 tc_bus[slot].name[8] = 0; 105 tdev->name[8] = 0;
153 /* 106
154 * Looks unneccesary, but we may change 107 pr_info("%s: %s %s %s\n", tdev->dev.bus_id, tdev->vendor,
155 * TC? in the future 108 tdev->name, tdev->firmware);
156 */ 109
157 switch (slot) { 110 devsize = readb(module + offset + TC_SLOT_SIZE);
158 case 0: 111 devsize <<= 22;
159 tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC0]; 112 if (devsize <= slotsize) {
160 break; 113 tdev->resource.start = slotaddr;
161 case 1: 114 tdev->resource.end = slotaddr + devsize - 1;
162 tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC1]; 115 } else if (devsize <= extslotsize) {
163 break; 116 tdev->resource.start = extslotaddr;
164 case 2: 117 tdev->resource.end = extslotaddr + devsize - 1;
165 tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC2]; 118 } else {
166 break; 119 printk(KERN_ERR "%s: Cannot provide slot space "
167 /* 120 "(%dMiB required, up to %dMiB supported)\n",
168 * Yuck! DS5000/200 onboard devices 121 tdev->dev.bus_id, devsize >> 20,
169 */ 122 max(slotsize, extslotsize) >> 20);
170 case 5: 123 kfree(tdev);
171 tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC5]; 124 goto out_err;
172 break;
173 case 6:
174 tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC6];
175 break;
176 default:
177 tc_bus[slot].interrupt = -1;
178 break;
179 } 125 }
126 tdev->resource.name = tdev->name;
127 tdev->resource.flags = IORESOURCE_MEM;
128
129 tc_device_get_irq(tdev);
180 130
131 device_register(&tdev->dev);
132 list_add_tail(&tdev->node, &tbus->devices);
133
134out_err:
181 iounmap(module); 135 iounmap(module);
182 } 136 }
183} 137}
184 138
185/* 139/*
186 * the main entry 140 * The main entry.
187 */ 141 */
188static int __init tc_init(void) 142static int __init tc_init(void)
189{ 143{
190 int tc_clock; 144 /* Initialize the TURBOchannel bus */
191 int i; 145 if (tc_bus_get_info(&tc_bus))
192 unsigned long slot0addr;
193 unsigned long slot_size;
194
195 if (!TURBOCHANNEL)
196 return 0; 146 return 0;
197 147
198 for (i = 0; i < MAX_SLOT; i++) { 148 INIT_LIST_HEAD(&tc_bus.devices);
199 tc_bus[i].base_addr = 0; 149 strcpy(tc_bus.dev.bus_id, "tc");
200 tc_bus[i].name[0] = 0; 150 device_register(&tc_bus.dev);
201 tc_bus[i].vendor[0] = 0; 151
202 tc_bus[i].firmware[0] = 0; 152 if (tc_bus.info.slot_size) {
203 tc_bus[i].interrupt = -1; 153 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
204 tc_bus[i].flags = FREE; 154
205 } 155 pr_info("tc: TURBOchannel rev. %d at %d.%d MHz "
206 156 "(with%s parity)\n", tc_bus.info.revision,
207 info = rex_gettcinfo(); 157 tc_clock / 10, tc_clock % 10,
208 slot0addr = CPHYSADDR((long)rex_slot_address(0)); 158 tc_bus.info.parity ? "" : "out");
209 159
210 switch (mips_machtype) { 160 tc_bus.resource[0].start = tc_bus.slot_base;
211 case MACH_DS5000_200: 161 tc_bus.resource[0].end = tc_bus.slot_base +
212 num_tcslots = 7; 162 (tc_bus.info.slot_size << 20) *
213 break; 163 tc_bus.num_tcslots - 1;
214 case MACH_DS5000_1XX: 164 tc_bus.resource[0].name = tc_bus.name;
215 case MACH_DS5000_2X0: 165 tc_bus.resource[0].flags = IORESOURCE_MEM;
216 case MACH_DS5900: 166 if (request_resource(&iomem_resource,
217 num_tcslots = 3; 167 &tc_bus.resource[0]) < 0) {
218 break; 168 printk(KERN_ERR "tc: Cannot reserve resource\n");
219 case MACH_DS5000_XX: 169 return 0;
220 default: 170 }
221 num_tcslots = 2; 171 if (tc_bus.ext_slot_size) {
222 break; 172 tc_bus.resource[1].start = tc_bus.ext_slot_base;
223 } 173 tc_bus.resource[1].end = tc_bus.ext_slot_base +
224 174 tc_bus.ext_slot_size *
225 tc_clock = 10000 / info->clk_period; 175 tc_bus.num_tcslots - 1;
226 176 tc_bus.resource[1].name = tc_bus.name;
227 if (info->slot_size && slot0addr) { 177 tc_bus.resource[1].flags = IORESOURCE_MEM;
228 pr_info("TURBOchannel rev. %d at %d.%d MHz (with%s parity)\n", 178 if (request_resource(&iomem_resource,
229 info->revision, tc_clock / 10, tc_clock % 10, 179 &tc_bus.resource[1]) < 0) {
230 info->parity ? "" : "out"); 180 printk(KERN_ERR
231 181 "tc: Cannot reserve resource\n");
232 slot_size = info->slot_size << 20; 182 release_resource(&tc_bus.resource[0]);
233 183 return 0;
234 tc_probe(slot0addr, slot_size, num_tcslots); 184 }
235
236 for (i = 0; i < num_tcslots; i++) {
237 if (!tc_bus[i].base_addr)
238 continue;
239 pr_info(" slot %d: %s %s %s\n", i, tc_bus[i].vendor,
240 tc_bus[i].name, tc_bus[i].firmware);
241 } 185 }
186
187 tc_bus_add_devices(&tc_bus);
242 } 188 }
243 189
244 return 0; 190 return 0;
245} 191}
246 192
247subsys_initcall(tc_init); 193subsys_initcall(tc_init);
248
249EXPORT_SYMBOL(search_tc_card);
250EXPORT_SYMBOL(claim_tc_card);
251EXPORT_SYMBOL(release_tc_card);
252EXPORT_SYMBOL(get_tc_base_addr);
253EXPORT_SYMBOL(get_tc_irq_nr);
254EXPORT_SYMBOL(get_tc_speed);
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 8ed6c75adf0f..638b8009b3bc 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -36,7 +36,7 @@
36#include <linux/stat.h> 36#include <linux/stat.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/usb_ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41 41
42#include "usbatm.h" 42#include "usbatm.h"
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 6377db1b446d..63e50a1f1396 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -398,6 +398,9 @@ static int usblp_open(struct inode *inode, struct file *file)
398 retval = 0; 398 retval = 0;
399#endif 399#endif
400 400
401 retval = usb_autopm_get_interface(intf);
402 if (retval < 0)
403 goto out;
401 usblp->used = 1; 404 usblp->used = 1;
402 file->private_data = usblp; 405 file->private_data = usblp;
403 406
@@ -442,6 +445,7 @@ static int usblp_release(struct inode *inode, struct file *file)
442 usblp->used = 0; 445 usblp->used = 0;
443 if (usblp->present) { 446 if (usblp->present) {
444 usblp_unlink_urbs(usblp); 447 usblp_unlink_urbs(usblp);
448 usb_autopm_put_interface(usblp->intf);
445 } else /* finish cleanup from disconnect */ 449 } else /* finish cleanup from disconnect */
446 usblp_cleanup (usblp); 450 usblp_cleanup (usblp);
447 mutex_unlock (&usblp_mutex); 451 mutex_unlock (&usblp_mutex);
@@ -1203,14 +1207,9 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
1203{ 1207{
1204 struct usblp *usblp = usb_get_intfdata (intf); 1208 struct usblp *usblp = usb_get_intfdata (intf);
1205 1209
1206 /* this races against normal access and open */
1207 mutex_lock (&usblp_mutex);
1208 mutex_lock (&usblp->mut);
1209 /* we take no more IO */ 1210 /* we take no more IO */
1210 usblp->sleeping = 1; 1211 usblp->sleeping = 1;
1211 usblp_unlink_urbs(usblp); 1212 usblp_unlink_urbs(usblp);
1212 mutex_unlock (&usblp->mut);
1213 mutex_unlock (&usblp_mutex);
1214 1213
1215 return 0; 1214 return 0;
1216} 1215}
@@ -1220,15 +1219,9 @@ static int usblp_resume (struct usb_interface *intf)
1220 struct usblp *usblp = usb_get_intfdata (intf); 1219 struct usblp *usblp = usb_get_intfdata (intf);
1221 int r; 1220 int r;
1222 1221
1223 mutex_lock (&usblp_mutex);
1224 mutex_lock (&usblp->mut);
1225
1226 usblp->sleeping = 0; 1222 usblp->sleeping = 0;
1227 r = handle_bidir (usblp); 1223 r = handle_bidir (usblp);
1228 1224
1229 mutex_unlock (&usblp->mut);
1230 mutex_unlock (&usblp_mutex);
1231
1232 return r; 1225 return r;
1233} 1226}
1234 1227
@@ -1251,6 +1244,7 @@ static struct usb_driver usblp_driver = {
1251 .suspend = usblp_suspend, 1244 .suspend = usblp_suspend,
1252 .resume = usblp_resume, 1245 .resume = usblp_resume,
1253 .id_table = usblp_ids, 1246 .id_table = usblp_ids,
1247 .supports_autosuspend = 1,
1254}; 1248};
1255 1249
1256static int __init usblp_init(void) 1250static int __init usblp_init(void)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 3e66b2a9974a..2fc0f88a3d86 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -33,19 +33,6 @@ config USB_DEVICEFS
33 33
34 Most users want to say Y here. 34 Most users want to say Y here.
35 35
36config USB_BANDWIDTH
37 bool "Enforce USB bandwidth allocation (EXPERIMENTAL)"
38 depends on USB && EXPERIMENTAL
39 help
40 If you say Y here, the USB subsystem enforces USB bandwidth
41 allocation and will prevent some device opens from succeeding
42 if they would cause USB bandwidth usage to go above 90% of
43 the bus bandwidth.
44
45 If you say N here, these conditions will cause warning messages
46 about USB bandwidth usage to be logged and some devices or
47 drivers may not work correctly.
48
49config USB_DYNAMIC_MINORS 36config USB_DYNAMIC_MINORS
50 bool "Dynamic USB minor allocation (EXPERIMENTAL)" 37 bool "Dynamic USB minor allocation (EXPERIMENTAL)"
51 depends on USB && EXPERIMENTAL 38 depends on USB && EXPERIMENTAL
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index c3915dc28608..ead2475406b8 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -49,9 +49,9 @@ static const size_t pool_max [HCD_BUFFER_POOLS] = {
49 * 49 *
50 * Call hcd_buffer_destroy() to clean up after using those pools. 50 * Call hcd_buffer_destroy() to clean up after using those pools.
51 */ 51 */
52int hcd_buffer_create (struct usb_hcd *hcd) 52int hcd_buffer_create(struct usb_hcd *hcd)
53{ 53{
54 char name [16]; 54 char name[16];
55 int i, size; 55 int i, size;
56 56
57 if (!hcd->self.controller->dma_mask) 57 if (!hcd->self.controller->dma_mask)
@@ -60,11 +60,11 @@ int hcd_buffer_create (struct usb_hcd *hcd)
60 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 60 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
61 if (!(size = pool_max [i])) 61 if (!(size = pool_max [i]))
62 continue; 62 continue;
63 snprintf (name, sizeof name, "buffer-%d", size); 63 snprintf(name, sizeof name, "buffer-%d", size);
64 hcd->pool [i] = dma_pool_create (name, hcd->self.controller, 64 hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
65 size, size, 0); 65 size, size, 0);
66 if (!hcd->pool [i]) { 66 if (!hcd->pool [i]) {
67 hcd_buffer_destroy (hcd); 67 hcd_buffer_destroy(hcd);
68 return -ENOMEM; 68 return -ENOMEM;
69 } 69 }
70 } 70 }
@@ -79,14 +79,14 @@ int hcd_buffer_create (struct usb_hcd *hcd)
79 * 79 *
80 * This frees the buffer pools created by hcd_buffer_create(). 80 * This frees the buffer pools created by hcd_buffer_create().
81 */ 81 */
82void hcd_buffer_destroy (struct usb_hcd *hcd) 82void hcd_buffer_destroy(struct usb_hcd *hcd)
83{ 83{
84 int i; 84 int i;
85 85
86 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 86 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
87 struct dma_pool *pool = hcd->pool [i]; 87 struct dma_pool *pool = hcd->pool[i];
88 if (pool) { 88 if (pool) {
89 dma_pool_destroy (pool); 89 dma_pool_destroy(pool);
90 hcd->pool[i] = NULL; 90 hcd->pool[i] = NULL;
91 } 91 }
92 } 92 }
@@ -97,8 +97,8 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
97 * better sharing and to leverage mm/slab.c intelligence. 97 * better sharing and to leverage mm/slab.c intelligence.
98 */ 98 */
99 99
100void *hcd_buffer_alloc ( 100void *hcd_buffer_alloc(
101 struct usb_bus *bus, 101 struct usb_bus *bus,
102 size_t size, 102 size_t size,
103 gfp_t mem_flags, 103 gfp_t mem_flags,
104 dma_addr_t *dma 104 dma_addr_t *dma
@@ -110,18 +110,18 @@ void *hcd_buffer_alloc (
110 /* some USB hosts just use PIO */ 110 /* some USB hosts just use PIO */
111 if (!bus->controller->dma_mask) { 111 if (!bus->controller->dma_mask) {
112 *dma = ~(dma_addr_t) 0; 112 *dma = ~(dma_addr_t) 0;
113 return kmalloc (size, mem_flags); 113 return kmalloc(size, mem_flags);
114 } 114 }
115 115
116 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 116 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
117 if (size <= pool_max [i]) 117 if (size <= pool_max [i])
118 return dma_pool_alloc (hcd->pool [i], mem_flags, dma); 118 return dma_pool_alloc(hcd->pool [i], mem_flags, dma);
119 } 119 }
120 return dma_alloc_coherent (hcd->self.controller, size, dma, 0); 120 return dma_alloc_coherent(hcd->self.controller, size, dma, 0);
121} 121}
122 122
123void hcd_buffer_free ( 123void hcd_buffer_free(
124 struct usb_bus *bus, 124 struct usb_bus *bus,
125 size_t size, 125 size_t size,
126 void *addr, 126 void *addr,
127 dma_addr_t dma 127 dma_addr_t dma
@@ -134,15 +134,15 @@ void hcd_buffer_free (
134 return; 134 return;
135 135
136 if (!bus->controller->dma_mask) { 136 if (!bus->controller->dma_mask) {
137 kfree (addr); 137 kfree(addr);
138 return; 138 return;
139 } 139 }
140 140
141 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 141 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
142 if (size <= pool_max [i]) { 142 if (size <= pool_max [i]) {
143 dma_pool_free (hcd->pool [i], addr, dma); 143 dma_pool_free(hcd->pool [i], addr, dma);
144 return; 144 return;
145 } 145 }
146 } 146 }
147 dma_free_coherent (hcd->self.controller, size, addr, dma); 147 dma_free_coherent(hcd->self.controller, size, addr, dma);
148} 148}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index ea398e5d50af..a47c30b2d764 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -104,7 +104,7 @@ static const char *format_config =
104 104
105static const char *format_iface = 105static const char *format_iface =
106/* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/ 106/* I: If#=dd Alt=dd #EPs=dd Cls=xx(sssss) Sub=xx Prot=xx Driver=xxxx*/
107 "I: If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n"; 107 "I:%c If#=%2d Alt=%2d #EPs=%2d Cls=%02x(%-5s) Sub=%02x Prot=%02x Driver=%s\n";
108 108
109static const char *format_endpt = 109static const char *format_endpt =
110/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */ 110/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */
@@ -164,10 +164,10 @@ static const char *class_decode(const int class)
164 for (ix = 0; clas_info[ix].class != -1; ix++) 164 for (ix = 0; clas_info[ix].class != -1; ix++)
165 if (clas_info[ix].class == class) 165 if (clas_info[ix].class == class)
166 break; 166 break;
167 return (clas_info[ix].class_name); 167 return clas_info[ix].class_name;
168} 168}
169 169
170static char *usb_dump_endpoint_descriptor ( 170static char *usb_dump_endpoint_descriptor(
171 int speed, 171 int speed,
172 char *start, 172 char *start,
173 char *end, 173 char *end,
@@ -212,9 +212,9 @@ static char *usb_dump_endpoint_descriptor (
212 break; 212 break;
213 case USB_ENDPOINT_XFER_INT: 213 case USB_ENDPOINT_XFER_INT:
214 type = "Int."; 214 type = "Int.";
215 if (speed == USB_SPEED_HIGH) { 215 if (speed == USB_SPEED_HIGH)
216 interval = 1 << (desc->bInterval - 1); 216 interval = 1 << (desc->bInterval - 1);
217 } else 217 else
218 interval = desc->bInterval; 218 interval = desc->bInterval;
219 break; 219 break;
220 default: /* "can't happen" */ 220 default: /* "can't happen" */
@@ -242,15 +242,19 @@ static char *usb_dump_interface_descriptor(char *start, char *end,
242{ 242{
243 const struct usb_interface_descriptor *desc = &intfc->altsetting[setno].desc; 243 const struct usb_interface_descriptor *desc = &intfc->altsetting[setno].desc;
244 const char *driver_name = ""; 244 const char *driver_name = "";
245 int active = 0;
245 246
246 if (start > end) 247 if (start > end)
247 return start; 248 return start;
248 down_read(&usb_bus_type.subsys.rwsem); 249 down_read(&usb_bus_type.subsys.rwsem);
249 if (iface) 250 if (iface) {
250 driver_name = (iface->dev.driver 251 driver_name = (iface->dev.driver
251 ? iface->dev.driver->name 252 ? iface->dev.driver->name
252 : "(none)"); 253 : "(none)");
254 active = (desc == &iface->cur_altsetting->desc);
255 }
253 start += sprintf(start, format_iface, 256 start += sprintf(start, format_iface,
257 active ? '*' : ' ', /* mark active altsetting */
254 desc->bInterfaceNumber, 258 desc->bInterfaceNumber,
255 desc->bAlternateSetting, 259 desc->bAlternateSetting,
256 desc->bNumEndpoints, 260 desc->bNumEndpoints,
@@ -343,7 +347,7 @@ static char *usb_dump_device_descriptor(char *start, char *end, const struct usb
343 347
344 if (start > end) 348 if (start > end)
345 return start; 349 return start;
346 start += sprintf (start, format_device1, 350 start += sprintf(start, format_device1,
347 bcdUSB >> 8, bcdUSB & 0xff, 351 bcdUSB >> 8, bcdUSB & 0xff,
348 desc->bDeviceClass, 352 desc->bDeviceClass,
349 class_decode (desc->bDeviceClass), 353 class_decode (desc->bDeviceClass),
@@ -363,7 +367,7 @@ static char *usb_dump_device_descriptor(char *start, char *end, const struct usb
363/* 367/*
364 * Dump the different strings that this device holds. 368 * Dump the different strings that this device holds.
365 */ 369 */
366static char *usb_dump_device_strings (char *start, char *end, struct usb_device *dev) 370static char *usb_dump_device_strings(char *start, char *end, struct usb_device *dev)
367{ 371{
368 if (start > end) 372 if (start > end)
369 return start; 373 return start;
@@ -395,7 +399,7 @@ static char *usb_dump_desc(char *start, char *end, struct usb_device *dev)
395 if (start > end) 399 if (start > end)
396 return start; 400 return start;
397 401
398 start = usb_dump_device_strings (start, end, dev); 402 start = usb_dump_device_strings(start, end, dev);
399 403
400 for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { 404 for (i = 0; i < dev->descriptor.bNumConfigurations; i++) {
401 if (start > end) 405 if (start > end)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4b3a6ab29bd3..2087766f9e88 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -522,19 +522,19 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, unsig
522 522
523static struct usb_device *usbdev_lookup_minor(int minor) 523static struct usb_device *usbdev_lookup_minor(int minor)
524{ 524{
525 struct class_device *class_dev; 525 struct device *device;
526 struct usb_device *dev = NULL; 526 struct usb_device *udev = NULL;
527 527
528 down(&usb_device_class->sem); 528 down(&usb_device_class->sem);
529 list_for_each_entry(class_dev, &usb_device_class->children, node) { 529 list_for_each_entry(device, &usb_device_class->devices, node) {
530 if (class_dev->devt == MKDEV(USB_DEVICE_MAJOR, minor)) { 530 if (device->devt == MKDEV(USB_DEVICE_MAJOR, minor)) {
531 dev = class_dev->class_data; 531 udev = device->platform_data;
532 break; 532 break;
533 } 533 }
534 } 534 }
535 up(&usb_device_class->sem); 535 up(&usb_device_class->sem);
536 536
537 return dev; 537 return udev;
538}; 538};
539 539
540/* 540/*
@@ -570,6 +570,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
570 ps->dev = dev; 570 ps->dev = dev;
571 ps->file = file; 571 ps->file = file;
572 spin_lock_init(&ps->lock); 572 spin_lock_init(&ps->lock);
573 INIT_LIST_HEAD(&ps->list);
573 INIT_LIST_HEAD(&ps->async_pending); 574 INIT_LIST_HEAD(&ps->async_pending);
574 INIT_LIST_HEAD(&ps->async_completed); 575 INIT_LIST_HEAD(&ps->async_completed);
575 init_waitqueue_head(&ps->wait); 576 init_waitqueue_head(&ps->wait);
@@ -1596,19 +1597,19 @@ static int usbdev_add(struct usb_device *dev)
1596{ 1597{
1597 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1); 1598 int minor = ((dev->bus->busnum-1) * 128) + (dev->devnum-1);
1598 1599
1599 dev->class_dev = class_device_create(usb_device_class, NULL, 1600 dev->usbfs_dev = device_create(usb_device_class, &dev->dev,
1600 MKDEV(USB_DEVICE_MAJOR, minor), &dev->dev, 1601 MKDEV(USB_DEVICE_MAJOR, minor),
1601 "usbdev%d.%d", dev->bus->busnum, dev->devnum); 1602 "usbdev%d.%d", dev->bus->busnum, dev->devnum);
1602 if (IS_ERR(dev->class_dev)) 1603 if (IS_ERR(dev->usbfs_dev))
1603 return PTR_ERR(dev->class_dev); 1604 return PTR_ERR(dev->usbfs_dev);
1604 1605
1605 dev->class_dev->class_data = dev; 1606 dev->usbfs_dev->platform_data = dev;
1606 return 0; 1607 return 0;
1607} 1608}
1608 1609
1609static void usbdev_remove(struct usb_device *dev) 1610static void usbdev_remove(struct usb_device *dev)
1610{ 1611{
1611 class_device_unregister(dev->class_dev); 1612 device_unregister(dev->usbfs_dev);
1612} 1613}
1613 1614
1614static int usbdev_notify(struct notifier_block *self, unsigned long action, 1615static int usbdev_notify(struct notifier_block *self, unsigned long action,
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index d6eb5ce1dd1d..600d1bc8272a 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -28,24 +28,16 @@
28#include "hcd.h" 28#include "hcd.h"
29#include "usb.h" 29#include "usb.h"
30 30
31static int usb_match_one_id(struct usb_interface *interface,
32 const struct usb_device_id *id);
33
34struct usb_dynid {
35 struct list_head node;
36 struct usb_device_id id;
37};
38
39#ifdef CONFIG_HOTPLUG 31#ifdef CONFIG_HOTPLUG
40 32
41/* 33/*
42 * Adds a new dynamic USBdevice ID to this driver, 34 * Adds a new dynamic USBdevice ID to this driver,
43 * and cause the driver to probe for all devices again. 35 * and cause the driver to probe for all devices again.
44 */ 36 */
45static ssize_t store_new_id(struct device_driver *driver, 37ssize_t usb_store_new_id(struct usb_dynids *dynids,
46 const char *buf, size_t count) 38 struct device_driver *driver,
39 const char *buf, size_t count)
47{ 40{
48 struct usb_driver *usb_drv = to_usb_driver(driver);
49 struct usb_dynid *dynid; 41 struct usb_dynid *dynid;
50 u32 idVendor = 0; 42 u32 idVendor = 0;
51 u32 idProduct = 0; 43 u32 idProduct = 0;
@@ -65,9 +57,9 @@ static ssize_t store_new_id(struct device_driver *driver,
65 dynid->id.idProduct = idProduct; 57 dynid->id.idProduct = idProduct;
66 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; 58 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
67 59
68 spin_lock(&usb_drv->dynids.lock); 60 spin_lock(&dynids->lock);
69 list_add_tail(&usb_drv->dynids.list, &dynid->node); 61 list_add_tail(&dynids->list, &dynid->node);
70 spin_unlock(&usb_drv->dynids.lock); 62 spin_unlock(&dynids->lock);
71 63
72 if (get_driver(driver)) { 64 if (get_driver(driver)) {
73 retval = driver_attach(driver); 65 retval = driver_attach(driver);
@@ -78,6 +70,15 @@ static ssize_t store_new_id(struct device_driver *driver,
78 return retval; 70 return retval;
79 return count; 71 return count;
80} 72}
73EXPORT_SYMBOL_GPL(usb_store_new_id);
74
75static ssize_t store_new_id(struct device_driver *driver,
76 const char *buf, size_t count)
77{
78 struct usb_driver *usb_drv = to_usb_driver(driver);
79
80 return usb_store_new_id(&usb_drv->dynids, driver, buf, count);
81}
81static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 82static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
82 83
83static int usb_create_newid_file(struct usb_driver *usb_drv) 84static int usb_create_newid_file(struct usb_driver *usb_drv)
@@ -365,8 +366,8 @@ void usb_driver_release_interface(struct usb_driver *driver,
365EXPORT_SYMBOL(usb_driver_release_interface); 366EXPORT_SYMBOL(usb_driver_release_interface);
366 367
367/* returns 0 if no match, 1 if match */ 368/* returns 0 if no match, 1 if match */
368static int usb_match_one_id(struct usb_interface *interface, 369int usb_match_one_id(struct usb_interface *interface,
369 const struct usb_device_id *id) 370 const struct usb_device_id *id)
370{ 371{
371 struct usb_host_interface *intf; 372 struct usb_host_interface *intf;
372 struct usb_device *dev; 373 struct usb_device *dev;
@@ -432,6 +433,8 @@ static int usb_match_one_id(struct usb_interface *interface,
432 433
433 return 1; 434 return 1;
434} 435}
436EXPORT_SYMBOL_GPL(usb_match_one_id);
437
435/** 438/**
436 * usb_match_id - find first usb_device_id matching device or interface 439 * usb_match_id - find first usb_device_id matching device or interface
437 * @interface: the interface of interest 440 * @interface: the interface of interest
@@ -750,7 +753,8 @@ EXPORT_SYMBOL_GPL(usb_deregister_device_driver);
750 * usb_register_dev() to enable that functionality. This function no longer 753 * usb_register_dev() to enable that functionality. This function no longer
751 * takes care of that. 754 * takes care of that.
752 */ 755 */
753int usb_register_driver(struct usb_driver *new_driver, struct module *owner) 756int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
757 const char *mod_name)
754{ 758{
755 int retval = 0; 759 int retval = 0;
756 760
@@ -763,6 +767,7 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner)
763 new_driver->drvwrap.driver.probe = usb_probe_interface; 767 new_driver->drvwrap.driver.probe = usb_probe_interface;
764 new_driver->drvwrap.driver.remove = usb_unbind_interface; 768 new_driver->drvwrap.driver.remove = usb_unbind_interface;
765 new_driver->drvwrap.driver.owner = owner; 769 new_driver->drvwrap.driver.owner = owner;
770 new_driver->drvwrap.driver.mod_name = mod_name;
766 spin_lock_init(&new_driver->dynids.lock); 771 spin_lock_init(&new_driver->dynids.lock);
767 INIT_LIST_HEAD(&new_driver->dynids.list); 772 INIT_LIST_HEAD(&new_driver->dynids.list);
768 773
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f794f07cfb33..01c857ac27af 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -194,14 +194,13 @@ int usb_register_dev(struct usb_interface *intf,
194 ++temp; 194 ++temp;
195 else 195 else
196 temp = name; 196 temp = name;
197 intf->class_dev = class_device_create(usb_class->class, NULL, 197 intf->usb_dev = device_create(usb_class->class, &intf->dev,
198 MKDEV(USB_MAJOR, minor), 198 MKDEV(USB_MAJOR, minor), "%s", temp);
199 &intf->dev, "%s", temp); 199 if (IS_ERR(intf->usb_dev)) {
200 if (IS_ERR(intf->class_dev)) {
201 spin_lock (&minor_lock); 200 spin_lock (&minor_lock);
202 usb_minors[intf->minor] = NULL; 201 usb_minors[intf->minor] = NULL;
203 spin_unlock (&minor_lock); 202 spin_unlock (&minor_lock);
204 retval = PTR_ERR(intf->class_dev); 203 retval = PTR_ERR(intf->usb_dev);
205 } 204 }
206exit: 205exit:
207 return retval; 206 return retval;
@@ -242,8 +241,8 @@ void usb_deregister_dev(struct usb_interface *intf,
242 spin_unlock (&minor_lock); 241 spin_unlock (&minor_lock);
243 242
244 snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base); 243 snprintf(name, BUS_ID_SIZE, class_driver->name, intf->minor - minor_base);
245 class_device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor)); 244 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
246 intf->class_dev = NULL; 245 intf->usb_dev = NULL;
247 intf->minor = -1; 246 intf->minor = -1;
248 destroy_usb_class(); 247 destroy_usb_class();
249} 248}
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index ebb20ff7ac58..b531a4fd30c2 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -25,6 +25,20 @@ static inline const char *plural(int n)
25 return (n == 1 ? "" : "s"); 25 return (n == 1 ? "" : "s");
26} 26}
27 27
28static int is_rndis(struct usb_interface_descriptor *desc)
29{
30 return desc->bInterfaceClass == USB_CLASS_COMM
31 && desc->bInterfaceSubClass == 2
32 && desc->bInterfaceProtocol == 0xff;
33}
34
35static int is_activesync(struct usb_interface_descriptor *desc)
36{
37 return desc->bInterfaceClass == USB_CLASS_MISC
38 && desc->bInterfaceSubClass == 1
39 && desc->bInterfaceProtocol == 1;
40}
41
28static int choose_configuration(struct usb_device *udev) 42static int choose_configuration(struct usb_device *udev)
29{ 43{
30 int i; 44 int i;
@@ -87,14 +101,12 @@ static int choose_configuration(struct usb_device *udev)
87 continue; 101 continue;
88 } 102 }
89 103
90 /* If the first config's first interface is COMM/2/0xff 104 /* When the first config's first interface is one of Microsoft's
91 * (MSFT RNDIS), rule it out unless Linux has host-side 105 * pet nonstandard Ethernet-over-USB protocols, ignore it unless
92 * RNDIS support. */ 106 * this kernel has enabled the necessary host side driver.
93 if (i == 0 && desc 107 */
94 && desc->bInterfaceClass == USB_CLASS_COMM 108 if (i == 0 && desc && (is_rndis(desc) || is_activesync(desc))) {
95 && desc->bInterfaceSubClass == 2 109#if !defined(CONFIG_USB_NET_RNDIS_HOST) && !defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
96 && desc->bInterfaceProtocol == 0xff) {
97#ifndef CONFIG_USB_NET_RNDIS_HOST
98 continue; 110 continue;
99#else 111#else
100 best = c; 112 best = c;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 10064af65d17..b26c19e8d19f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -45,8 +45,6 @@
45#include "hub.h" 45#include "hub.h"
46 46
47 47
48// #define USB_BANDWIDTH_MESSAGES
49
50/*-------------------------------------------------------------------------*/ 48/*-------------------------------------------------------------------------*/
51 49
52/* 50/*
@@ -891,136 +889,6 @@ long usb_calc_bus_time (int speed, int is_input, int isoc, int bytecount)
891} 889}
892EXPORT_SYMBOL (usb_calc_bus_time); 890EXPORT_SYMBOL (usb_calc_bus_time);
893 891
894/*
895 * usb_check_bandwidth():
896 *
897 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
898 * bustime is from calc_bus_time(), but converted to microseconds.
899 *
900 * returns <bustime in us> if successful,
901 * or -ENOSPC if bandwidth request fails.
902 *
903 * FIXME:
904 * This initial implementation does not use Endpoint.bInterval
905 * in managing bandwidth allocation.
906 * It probably needs to be expanded to use Endpoint.bInterval.
907 * This can be done as a later enhancement (correction).
908 *
909 * This will also probably require some kind of
910 * frame allocation tracking...meaning, for example,
911 * that if multiple drivers request interrupts every 10 USB frames,
912 * they don't all have to be allocated at
913 * frame numbers N, N+10, N+20, etc. Some of them could be at
914 * N+11, N+21, N+31, etc., and others at
915 * N+12, N+22, N+32, etc.
916 *
917 * Similarly for isochronous transfers...
918 *
919 * Individual HCDs can schedule more directly ... this logic
920 * is not correct for high speed transfers.
921 */
922int usb_check_bandwidth (struct usb_device *dev, struct urb *urb)
923{
924 unsigned int pipe = urb->pipe;
925 long bustime;
926 int is_in = usb_pipein (pipe);
927 int is_iso = usb_pipeisoc (pipe);
928 int old_alloc = dev->bus->bandwidth_allocated;
929 int new_alloc;
930
931
932 bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
933 usb_maxpacket (dev, pipe, !is_in)));
934 if (is_iso)
935 bustime /= urb->number_of_packets;
936
937 new_alloc = old_alloc + (int) bustime;
938 if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
939#ifdef DEBUG
940 char *mode =
941#ifdef CONFIG_USB_BANDWIDTH
942 "";
943#else
944 "would have ";
945#endif
946 dev_dbg (&dev->dev, "usb_check_bandwidth %sFAILED: %d + %ld = %d usec\n",
947 mode, old_alloc, bustime, new_alloc);
948#endif
949#ifdef CONFIG_USB_BANDWIDTH
950 bustime = -ENOSPC; /* report error */
951#endif
952 }
953
954 return bustime;
955}
956EXPORT_SYMBOL (usb_check_bandwidth);
957
958
959/**
960 * usb_claim_bandwidth - records bandwidth for a periodic transfer
961 * @dev: source/target of request
962 * @urb: request (urb->dev == dev)
963 * @bustime: bandwidth consumed, in (average) microseconds per frame
964 * @isoc: true iff the request is isochronous
965 *
966 * Bus bandwidth reservations are recorded purely for diagnostic purposes.
967 * HCDs are expected not to overcommit periodic bandwidth, and to record such
968 * reservations whenever endpoints are added to the periodic schedule.
969 *
970 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
971 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
972 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
973 * large its periodic schedule is.
974 */
975void usb_claim_bandwidth (struct usb_device *dev, struct urb *urb, int bustime, int isoc)
976{
977 dev->bus->bandwidth_allocated += bustime;
978 if (isoc)
979 dev->bus->bandwidth_isoc_reqs++;
980 else
981 dev->bus->bandwidth_int_reqs++;
982 urb->bandwidth = bustime;
983
984#ifdef USB_BANDWIDTH_MESSAGES
985 dev_dbg (&dev->dev, "bandwidth alloc increased by %d (%s) to %d for %d requesters\n",
986 bustime,
987 isoc ? "ISOC" : "INTR",
988 dev->bus->bandwidth_allocated,
989 dev->bus->bandwidth_int_reqs + dev->bus->bandwidth_isoc_reqs);
990#endif
991}
992EXPORT_SYMBOL (usb_claim_bandwidth);
993
994
995/**
996 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
997 * @dev: source/target of request
998 * @urb: request (urb->dev == dev)
999 * @isoc: true iff the request is isochronous
1000 *
1001 * This records that previously allocated bandwidth has been released.
1002 * Bandwidth is released when endpoints are removed from the host controller's
1003 * periodic schedule.
1004 */
1005void usb_release_bandwidth (struct usb_device *dev, struct urb *urb, int isoc)
1006{
1007 dev->bus->bandwidth_allocated -= urb->bandwidth;
1008 if (isoc)
1009 dev->bus->bandwidth_isoc_reqs--;
1010 else
1011 dev->bus->bandwidth_int_reqs--;
1012
1013#ifdef USB_BANDWIDTH_MESSAGES
1014 dev_dbg (&dev->dev, "bandwidth alloc reduced by %d (%s) to %d for %d requesters\n",
1015 urb->bandwidth,
1016 isoc ? "ISOC" : "INTR",
1017 dev->bus->bandwidth_allocated,
1018 dev->bus->bandwidth_int_reqs + dev->bus->bandwidth_isoc_reqs);
1019#endif
1020 urb->bandwidth = 0;
1021}
1022EXPORT_SYMBOL (usb_release_bandwidth);
1023
1024 892
1025/*-------------------------------------------------------------------------*/ 893/*-------------------------------------------------------------------------*/
1026 894
@@ -1034,11 +902,6 @@ static void urb_unlink (struct urb *urb)
1034{ 902{
1035 unsigned long flags; 903 unsigned long flags;
1036 904
1037 /* Release any periodic transfer bandwidth */
1038 if (urb->bandwidth)
1039 usb_release_bandwidth (urb->dev, urb,
1040 usb_pipeisoc (urb->pipe));
1041
1042 /* clear all state linking urb to this dev (and hcd) */ 905 /* clear all state linking urb to this dev (and hcd) */
1043 906
1044 spin_lock_irqsave (&hcd_data_lock, flags); 907 spin_lock_irqsave (&hcd_data_lock, flags);
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 8f8df0d4382e..2a269ca20517 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -308,10 +308,6 @@ extern void usb_destroy_configuration(struct usb_device *dev);
308#define NS_TO_US(ns) ((ns + 500L) / 1000L) 308#define NS_TO_US(ns) ((ns + 500L) / 1000L)
309 /* convert & round nanoseconds to microseconds */ 309 /* convert & round nanoseconds to microseconds */
310 310
311extern void usb_claim_bandwidth (struct usb_device *dev, struct urb *urb,
312 int bustime, int isoc);
313extern void usb_release_bandwidth (struct usb_device *dev, struct urb *urb,
314 int isoc);
315 311
316/* 312/*
317 * Full/low speed bandwidth allocation constants/support. 313 * Full/low speed bandwidth allocation constants/support.
@@ -324,8 +320,6 @@ extern void usb_release_bandwidth (struct usb_device *dev, struct urb *urb,
324#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L) 320#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L)
325#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L) 321#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L)
326 322
327extern int usb_check_bandwidth (struct usb_device *dev, struct urb *urb);
328
329/* 323/*
330 * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed 324 * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed
331 * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed 325 * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1988224b362b..590ec82d0515 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -87,9 +87,6 @@ static DECLARE_WAIT_QUEUE_HEAD(khubd_wait);
87 87
88static struct task_struct *khubd_task; 88static struct task_struct *khubd_task;
89 89
90/* multithreaded probe logic */
91static int multithread_probe = 0;
92
93/* cycle leds on hubs that aren't blinking for attention */ 90/* cycle leds on hubs that aren't blinking for attention */
94static int blinkenlights = 0; 91static int blinkenlights = 0;
95module_param (blinkenlights, bool, S_IRUGO); 92module_param (blinkenlights, bool, S_IRUGO);
@@ -1256,9 +1253,28 @@ static inline void show_string(struct usb_device *udev, char *id, char *string)
1256static int __usb_port_suspend(struct usb_device *, int port1); 1253static int __usb_port_suspend(struct usb_device *, int port1);
1257#endif 1254#endif
1258 1255
1259static int __usb_new_device(void *void_data) 1256/**
1257 * usb_new_device - perform initial device setup (usbcore-internal)
1258 * @udev: newly addressed device (in ADDRESS state)
1259 *
1260 * This is called with devices which have been enumerated, but not yet
1261 * configured. The device descriptor is available, but not descriptors
1262 * for any device configuration. The caller must have locked either
1263 * the parent hub (if udev is a normal device) or else the
1264 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
1265 * udev has already been installed, but udev is not yet visible through
1266 * sysfs or other filesystem code.
1267 *
1268 * It will return if the device is configured properly or not. Zero if
1269 * the interface was registered with the driver core; else a negative
1270 * errno value.
1271 *
1272 * This call is synchronous, and may not be used in an interrupt context.
1273 *
1274 * Only the hub driver or root-hub registrar should ever call this.
1275 */
1276int usb_new_device(struct usb_device *udev)
1260{ 1277{
1261 struct usb_device *udev = void_data;
1262 int err; 1278 int err;
1263 1279
1264 /* Lock ourself into memory in order to keep a probe sequence 1280 /* Lock ourself into memory in order to keep a probe sequence
@@ -1375,44 +1391,6 @@ fail:
1375 goto exit; 1391 goto exit;
1376} 1392}
1377 1393
1378/**
1379 * usb_new_device - perform initial device setup (usbcore-internal)
1380 * @udev: newly addressed device (in ADDRESS state)
1381 *
1382 * This is called with devices which have been enumerated, but not yet
1383 * configured. The device descriptor is available, but not descriptors
1384 * for any device configuration. The caller must have locked either
1385 * the parent hub (if udev is a normal device) or else the
1386 * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
1387 * udev has already been installed, but udev is not yet visible through
1388 * sysfs or other filesystem code.
1389 *
1390 * The return value for this function depends on if the
1391 * multithread_probe variable is set or not. If it's set, it will
1392 * return a if the probe thread was successfully created or not. If the
1393 * variable is not set, it will return if the device is configured
1394 * properly or not. interfaces, in sysfs); else a negative errno value.
1395 *
1396 * This call is synchronous, and may not be used in an interrupt context.
1397 *
1398 * Only the hub driver or root-hub registrar should ever call this.
1399 */
1400int usb_new_device(struct usb_device *udev)
1401{
1402 struct task_struct *probe_task;
1403 int ret = 0;
1404
1405 if (multithread_probe) {
1406 probe_task = kthread_run(__usb_new_device, udev,
1407 "usb-probe-%s", udev->devnum);
1408 if (IS_ERR(probe_task))
1409 ret = PTR_ERR(probe_task);
1410 } else
1411 ret = __usb_new_device(udev);
1412
1413 return ret;
1414}
1415
1416static int hub_port_status(struct usb_hub *hub, int port1, 1394static int hub_port_status(struct usb_hub *hub, int port1,
1417 u16 *status, u16 *change) 1395 u16 *status, u16 *change)
1418{ 1396{
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 149aa8bfb1fe..8aca3574c2b5 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1545,11 +1545,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
1545 INIT_WORK(&req->work, driver_set_config_work); 1545 INIT_WORK(&req->work, driver_set_config_work);
1546 1546
1547 usb_get_dev(udev); 1547 usb_get_dev(udev);
1548 if (!schedule_work(&req->work)) { 1548 schedule_work(&req->work);
1549 usb_put_dev(udev);
1550 kfree(req);
1551 return -EINVAL;
1552 }
1553 return 0; 1549 return 0;
1554} 1550}
1555EXPORT_SYMBOL_GPL(usb_driver_set_configuration); 1551EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 55d8f575206d..4eaa0ee8e72f 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -16,16 +16,16 @@
16 16
17/* Active configuration fields */ 17/* Active configuration fields */
18#define usb_actconfig_show(field, multiplier, format_string) \ 18#define usb_actconfig_show(field, multiplier, format_string) \
19static ssize_t show_##field (struct device *dev, \ 19static ssize_t show_##field(struct device *dev, \
20 struct device_attribute *attr, char *buf) \ 20 struct device_attribute *attr, char *buf) \
21{ \ 21{ \
22 struct usb_device *udev; \ 22 struct usb_device *udev; \
23 struct usb_host_config *actconfig; \ 23 struct usb_host_config *actconfig; \
24 \ 24 \
25 udev = to_usb_device (dev); \ 25 udev = to_usb_device(dev); \
26 actconfig = udev->actconfig; \ 26 actconfig = udev->actconfig; \
27 if (actconfig) \ 27 if (actconfig) \
28 return sprintf (buf, format_string, \ 28 return sprintf(buf, format_string, \
29 actconfig->desc.field * multiplier); \ 29 actconfig->desc.field * multiplier); \
30 else \ 30 else \
31 return 0; \ 31 return 0; \
@@ -35,9 +35,9 @@ static ssize_t show_##field (struct device *dev, \
35usb_actconfig_show(field, multiplier, format_string) \ 35usb_actconfig_show(field, multiplier, format_string) \
36static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 36static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
37 37
38usb_actconfig_attr (bNumInterfaces, 1, "%2d\n") 38usb_actconfig_attr(bNumInterfaces, 1, "%2d\n")
39usb_actconfig_attr (bmAttributes, 1, "%2x\n") 39usb_actconfig_attr(bmAttributes, 1, "%2x\n")
40usb_actconfig_attr (bMaxPower, 2, "%3dmA\n") 40usb_actconfig_attr(bMaxPower, 2, "%3dmA\n")
41 41
42static ssize_t show_configuration_string(struct device *dev, 42static ssize_t show_configuration_string(struct device *dev,
43 struct device_attribute *attr, char *buf) 43 struct device_attribute *attr, char *buf)
@@ -45,7 +45,7 @@ static ssize_t show_configuration_string(struct device *dev,
45 struct usb_device *udev; 45 struct usb_device *udev;
46 struct usb_host_config *actconfig; 46 struct usb_host_config *actconfig;
47 47
48 udev = to_usb_device (dev); 48 udev = to_usb_device(dev);
49 actconfig = udev->actconfig; 49 actconfig = udev->actconfig;
50 if ((!actconfig) || (!actconfig->string)) 50 if ((!actconfig) || (!actconfig->string))
51 return 0; 51 return 0;
@@ -57,16 +57,16 @@ static DEVICE_ATTR(configuration, S_IRUGO, show_configuration_string, NULL);
57usb_actconfig_show(bConfigurationValue, 1, "%u\n"); 57usb_actconfig_show(bConfigurationValue, 1, "%u\n");
58 58
59static ssize_t 59static ssize_t
60set_bConfigurationValue (struct device *dev, struct device_attribute *attr, 60set_bConfigurationValue(struct device *dev, struct device_attribute *attr,
61 const char *buf, size_t count) 61 const char *buf, size_t count)
62{ 62{
63 struct usb_device *udev = to_usb_device (dev); 63 struct usb_device *udev = to_usb_device(dev);
64 int config, value; 64 int config, value;
65 65
66 if (sscanf (buf, "%u", &config) != 1 || config > 255) 66 if (sscanf(buf, "%u", &config) != 1 || config > 255)
67 return -EINVAL; 67 return -EINVAL;
68 usb_lock_device(udev); 68 usb_lock_device(udev);
69 value = usb_set_configuration (udev, config); 69 value = usb_set_configuration(udev, config);
70 usb_unlock_device(udev); 70 usb_unlock_device(udev);
71 return (value < 0) ? value : count; 71 return (value < 0) ? value : count;
72} 72}
@@ -81,7 +81,7 @@ static ssize_t show_##name(struct device *dev, \
81{ \ 81{ \
82 struct usb_device *udev; \ 82 struct usb_device *udev; \
83 \ 83 \
84 udev = to_usb_device (dev); \ 84 udev = to_usb_device(dev); \
85 return sprintf(buf, "%s\n", udev->name); \ 85 return sprintf(buf, "%s\n", udev->name); \
86} \ 86} \
87static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); 87static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
@@ -91,12 +91,12 @@ usb_string_attr(manufacturer);
91usb_string_attr(serial); 91usb_string_attr(serial);
92 92
93static ssize_t 93static ssize_t
94show_speed (struct device *dev, struct device_attribute *attr, char *buf) 94show_speed(struct device *dev, struct device_attribute *attr, char *buf)
95{ 95{
96 struct usb_device *udev; 96 struct usb_device *udev;
97 char *speed; 97 char *speed;
98 98
99 udev = to_usb_device (dev); 99 udev = to_usb_device(dev);
100 100
101 switch (udev->speed) { 101 switch (udev->speed) {
102 case USB_SPEED_LOW: 102 case USB_SPEED_LOW:
@@ -112,22 +112,22 @@ show_speed (struct device *dev, struct device_attribute *attr, char *buf)
112 default: 112 default:
113 speed = "unknown"; 113 speed = "unknown";
114 } 114 }
115 return sprintf (buf, "%s\n", speed); 115 return sprintf(buf, "%s\n", speed);
116} 116}
117static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); 117static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL);
118 118
119static ssize_t 119static ssize_t
120show_devnum (struct device *dev, struct device_attribute *attr, char *buf) 120show_devnum(struct device *dev, struct device_attribute *attr, char *buf)
121{ 121{
122 struct usb_device *udev; 122 struct usb_device *udev;
123 123
124 udev = to_usb_device (dev); 124 udev = to_usb_device(dev);
125 return sprintf (buf, "%d\n", udev->devnum); 125 return sprintf(buf, "%d\n", udev->devnum);
126} 126}
127static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); 127static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
128 128
129static ssize_t 129static ssize_t
130show_version (struct device *dev, struct device_attribute *attr, char *buf) 130show_version(struct device *dev, struct device_attribute *attr, char *buf)
131{ 131{
132 struct usb_device *udev; 132 struct usb_device *udev;
133 u16 bcdUSB; 133 u16 bcdUSB;
@@ -139,25 +139,25 @@ show_version (struct device *dev, struct device_attribute *attr, char *buf)
139static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); 139static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
140 140
141static ssize_t 141static ssize_t
142show_maxchild (struct device *dev, struct device_attribute *attr, char *buf) 142show_maxchild(struct device *dev, struct device_attribute *attr, char *buf)
143{ 143{
144 struct usb_device *udev; 144 struct usb_device *udev;
145 145
146 udev = to_usb_device (dev); 146 udev = to_usb_device(dev);
147 return sprintf (buf, "%d\n", udev->maxchild); 147 return sprintf(buf, "%d\n", udev->maxchild);
148} 148}
149static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL); 149static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL);
150 150
151/* Descriptor fields */ 151/* Descriptor fields */
152#define usb_descriptor_attr_le16(field, format_string) \ 152#define usb_descriptor_attr_le16(field, format_string) \
153static ssize_t \ 153static ssize_t \
154show_##field (struct device *dev, struct device_attribute *attr, \ 154show_##field(struct device *dev, struct device_attribute *attr, \
155 char *buf) \ 155 char *buf) \
156{ \ 156{ \
157 struct usb_device *udev; \ 157 struct usb_device *udev; \
158 \ 158 \
159 udev = to_usb_device (dev); \ 159 udev = to_usb_device(dev); \
160 return sprintf (buf, format_string, \ 160 return sprintf(buf, format_string, \
161 le16_to_cpu(udev->descriptor.field)); \ 161 le16_to_cpu(udev->descriptor.field)); \
162} \ 162} \
163static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 163static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
@@ -168,21 +168,21 @@ usb_descriptor_attr_le16(bcdDevice, "%04x\n")
168 168
169#define usb_descriptor_attr(field, format_string) \ 169#define usb_descriptor_attr(field, format_string) \
170static ssize_t \ 170static ssize_t \
171show_##field (struct device *dev, struct device_attribute *attr, \ 171show_##field(struct device *dev, struct device_attribute *attr, \
172 char *buf) \ 172 char *buf) \
173{ \ 173{ \
174 struct usb_device *udev; \ 174 struct usb_device *udev; \
175 \ 175 \
176 udev = to_usb_device (dev); \ 176 udev = to_usb_device(dev); \
177 return sprintf (buf, format_string, udev->descriptor.field); \ 177 return sprintf(buf, format_string, udev->descriptor.field); \
178} \ 178} \
179static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 179static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
180 180
181usb_descriptor_attr (bDeviceClass, "%02x\n") 181usb_descriptor_attr(bDeviceClass, "%02x\n")
182usb_descriptor_attr (bDeviceSubClass, "%02x\n") 182usb_descriptor_attr(bDeviceSubClass, "%02x\n")
183usb_descriptor_attr (bDeviceProtocol, "%02x\n") 183usb_descriptor_attr(bDeviceProtocol, "%02x\n")
184usb_descriptor_attr (bNumConfigurations, "%d\n") 184usb_descriptor_attr(bNumConfigurations, "%d\n")
185usb_descriptor_attr (bMaxPacketSize0, "%d\n") 185usb_descriptor_attr(bMaxPacketSize0, "%d\n")
186 186
187static struct attribute *dev_attrs[] = { 187static struct attribute *dev_attrs[] = {
188 /* current configuration's attributes */ 188 /* current configuration's attributes */
@@ -220,17 +220,17 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
220 return retval; 220 return retval;
221 221
222 if (udev->manufacturer) { 222 if (udev->manufacturer) {
223 retval = device_create_file (dev, &dev_attr_manufacturer); 223 retval = device_create_file(dev, &dev_attr_manufacturer);
224 if (retval) 224 if (retval)
225 goto error; 225 goto error;
226 } 226 }
227 if (udev->product) { 227 if (udev->product) {
228 retval = device_create_file (dev, &dev_attr_product); 228 retval = device_create_file(dev, &dev_attr_product);
229 if (retval) 229 if (retval)
230 goto error; 230 goto error;
231 } 231 }
232 if (udev->serial) { 232 if (udev->serial) {
233 retval = device_create_file (dev, &dev_attr_serial); 233 retval = device_create_file(dev, &dev_attr_serial);
234 if (retval) 234 if (retval)
235 goto error; 235 goto error;
236 } 236 }
@@ -246,7 +246,7 @@ error:
246 return retval; 246 return retval;
247} 247}
248 248
249void usb_remove_sysfs_dev_files (struct usb_device *udev) 249void usb_remove_sysfs_dev_files(struct usb_device *udev)
250{ 250{
251 struct device *dev = &udev->dev; 251 struct device *dev = &udev->dev;
252 252
@@ -264,22 +264,22 @@ void usb_remove_sysfs_dev_files (struct usb_device *udev)
264/* Interface fields */ 264/* Interface fields */
265#define usb_intf_attr(field, format_string) \ 265#define usb_intf_attr(field, format_string) \
266static ssize_t \ 266static ssize_t \
267show_##field (struct device *dev, struct device_attribute *attr, \ 267show_##field(struct device *dev, struct device_attribute *attr, \
268 char *buf) \ 268 char *buf) \
269{ \ 269{ \
270 struct usb_interface *intf = to_usb_interface (dev); \ 270 struct usb_interface *intf = to_usb_interface(dev); \
271 \ 271 \
272 return sprintf (buf, format_string, \ 272 return sprintf(buf, format_string, \
273 intf->cur_altsetting->desc.field); \ 273 intf->cur_altsetting->desc.field); \
274} \ 274} \
275static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); 275static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
276 276
277usb_intf_attr (bInterfaceNumber, "%02x\n") 277usb_intf_attr(bInterfaceNumber, "%02x\n")
278usb_intf_attr (bAlternateSetting, "%2d\n") 278usb_intf_attr(bAlternateSetting, "%2d\n")
279usb_intf_attr (bNumEndpoints, "%02x\n") 279usb_intf_attr(bNumEndpoints, "%02x\n")
280usb_intf_attr (bInterfaceClass, "%02x\n") 280usb_intf_attr(bInterfaceClass, "%02x\n")
281usb_intf_attr (bInterfaceSubClass, "%02x\n") 281usb_intf_attr(bInterfaceSubClass, "%02x\n")
282usb_intf_attr (bInterfaceProtocol, "%02x\n") 282usb_intf_attr(bInterfaceProtocol, "%02x\n")
283 283
284static ssize_t show_interface_string(struct device *dev, 284static ssize_t show_interface_string(struct device *dev,
285 struct device_attribute *attr, char *buf) 285 struct device_attribute *attr, char *buf)
@@ -288,8 +288,8 @@ static ssize_t show_interface_string(struct device *dev,
288 struct usb_device *udev; 288 struct usb_device *udev;
289 int len; 289 int len;
290 290
291 intf = to_usb_interface (dev); 291 intf = to_usb_interface(dev);
292 udev = interface_to_usbdev (intf); 292 udev = interface_to_usbdev(intf);
293 len = snprintf(buf, 256, "%s", intf->cur_altsetting->string); 293 len = snprintf(buf, 256, "%s", intf->cur_altsetting->string);
294 if (len < 0) 294 if (len < 0)
295 return 0; 295 return 0;
@@ -384,7 +384,7 @@ error:
384 return retval; 384 return retval;
385} 385}
386 386
387void usb_remove_sysfs_intf_files (struct usb_interface *intf) 387void usb_remove_sysfs_intf_files(struct usb_interface *intf)
388{ 388{
389 usb_remove_intf_ep_files(intf); 389 usb_remove_intf_ep_files(intf);
390 sysfs_remove_group(&intf->dev.kobj, &intf_attr_grp); 390 sysfs_remove_group(&intf->dev.kobj, &intf_attr_grp);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 9801d08edacf..94ea9727ff55 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -235,16 +235,15 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
235 235
236 urb->status = -EINPROGRESS; 236 urb->status = -EINPROGRESS;
237 urb->actual_length = 0; 237 urb->actual_length = 0;
238 urb->bandwidth = 0;
239 238
240 /* Lots of sanity checks, so HCDs can rely on clean data 239 /* Lots of sanity checks, so HCDs can rely on clean data
241 * and don't need to duplicate tests 240 * and don't need to duplicate tests
242 */ 241 */
243 pipe = urb->pipe; 242 pipe = urb->pipe;
244 temp = usb_pipetype (pipe); 243 temp = usb_pipetype(pipe);
245 is_out = usb_pipeout (pipe); 244 is_out = usb_pipeout(pipe);
246 245
247 if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED) 246 if (!usb_pipecontrol(pipe) && dev->state < USB_STATE_CONFIGURED)
248 return -ENODEV; 247 return -ENODEV;
249 248
250 /* FIXME there should be a sharable lock protecting us against 249 /* FIXME there should be a sharable lock protecting us against
@@ -253,11 +252,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
253 * checks get made.) 252 * checks get made.)
254 */ 253 */
255 254
256 max = usb_maxpacket (dev, pipe, is_out); 255 max = usb_maxpacket(dev, pipe, is_out);
257 if (max <= 0) { 256 if (max <= 0) {
258 dev_dbg(&dev->dev, 257 dev_dbg(&dev->dev,
259 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", 258 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
260 usb_pipeendpoint (pipe), is_out ? "out" : "in", 259 usb_pipeendpoint(pipe), is_out ? "out" : "in",
261 __FUNCTION__, max); 260 __FUNCTION__, max);
262 return -EMSGSIZE; 261 return -EMSGSIZE;
263 } 262 }
@@ -279,11 +278,11 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
279 if (urb->number_of_packets <= 0) 278 if (urb->number_of_packets <= 0)
280 return -EINVAL; 279 return -EINVAL;
281 for (n = 0; n < urb->number_of_packets; n++) { 280 for (n = 0; n < urb->number_of_packets; n++) {
282 len = urb->iso_frame_desc [n].length; 281 len = urb->iso_frame_desc[n].length;
283 if (len < 0 || len > max) 282 if (len < 0 || len > max)
284 return -EMSGSIZE; 283 return -EMSGSIZE;
285 urb->iso_frame_desc [n].status = -EXDEV; 284 urb->iso_frame_desc[n].status = -EXDEV;
286 urb->iso_frame_desc [n].actual_length = 0; 285 urb->iso_frame_desc[n].actual_length = 0;
287 } 286 }
288 } 287 }
289 288
@@ -322,7 +321,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
322 321
323 /* fail if submitter gave bogus flags */ 322 /* fail if submitter gave bogus flags */
324 if (urb->transfer_flags != orig_flags) { 323 if (urb->transfer_flags != orig_flags) {
325 err ("BOGUS urb flags, %x --> %x", 324 err("BOGUS urb flags, %x --> %x",
326 orig_flags, urb->transfer_flags); 325 orig_flags, urb->transfer_flags);
327 return -EINVAL; 326 return -EINVAL;
328 } 327 }
@@ -373,7 +372,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
373 urb->interval = temp; 372 urb->interval = temp;
374 } 373 }
375 374
376 return usb_hcd_submit_urb (urb, mem_flags); 375 return usb_hcd_submit_urb(urb, mem_flags);
377} 376}
378 377
379/*-------------------------------------------------------------------*/ 378/*-------------------------------------------------------------------*/
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 02426d0b9a34..3db721cd557a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -233,7 +233,7 @@ static void usb_autosuspend_work(struct work_struct *work)
233 * @parent: hub to which device is connected; null to allocate a root hub 233 * @parent: hub to which device is connected; null to allocate a root hub
234 * @bus: bus used to access the device 234 * @bus: bus used to access the device
235 * @port1: one-based index of port; ignored for root hubs 235 * @port1: one-based index of port; ignored for root hubs
236 * Context: !in_interrupt () 236 * Context: !in_interrupt()
237 * 237 *
238 * Only hub drivers (including virtual root hub drivers for host 238 * Only hub drivers (including virtual root hub drivers for host
239 * controllers) should ever call this. 239 * controllers) should ever call this.
@@ -277,22 +277,22 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
277 * as stable: bus->busnum changes easily from modprobe order, 277 * as stable: bus->busnum changes easily from modprobe order,
278 * cardbus or pci hotplugging, and so on. 278 * cardbus or pci hotplugging, and so on.
279 */ 279 */
280 if (unlikely (!parent)) { 280 if (unlikely(!parent)) {
281 dev->devpath [0] = '0'; 281 dev->devpath[0] = '0';
282 282
283 dev->dev.parent = bus->controller; 283 dev->dev.parent = bus->controller;
284 sprintf (&dev->dev.bus_id[0], "usb%d", bus->busnum); 284 sprintf(&dev->dev.bus_id[0], "usb%d", bus->busnum);
285 } else { 285 } else {
286 /* match any labeling on the hubs; it's one-based */ 286 /* match any labeling on the hubs; it's one-based */
287 if (parent->devpath [0] == '0') 287 if (parent->devpath[0] == '0')
288 snprintf (dev->devpath, sizeof dev->devpath, 288 snprintf(dev->devpath, sizeof dev->devpath,
289 "%d", port1); 289 "%d", port1);
290 else 290 else
291 snprintf (dev->devpath, sizeof dev->devpath, 291 snprintf(dev->devpath, sizeof dev->devpath,
292 "%s.%d", parent->devpath, port1); 292 "%s.%d", parent->devpath, port1);
293 293
294 dev->dev.parent = &parent->dev; 294 dev->dev.parent = &parent->dev;
295 sprintf (&dev->dev.bus_id[0], "%d-%s", 295 sprintf(&dev->dev.bus_id[0], "%d-%s",
296 bus->busnum, dev->devpath); 296 bus->busnum, dev->devpath);
297 297
298 /* hub driver sets up TT records */ 298 /* hub driver sets up TT records */
@@ -463,7 +463,7 @@ static struct usb_device *match_device(struct usb_device *dev,
463 /* see if this device matches */ 463 /* see if this device matches */
464 if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) && 464 if ((vendor_id == le16_to_cpu(dev->descriptor.idVendor)) &&
465 (product_id == le16_to_cpu(dev->descriptor.idProduct))) { 465 (product_id == le16_to_cpu(dev->descriptor.idProduct))) {
466 dev_dbg (&dev->dev, "matched this device!\n"); 466 dev_dbg(&dev->dev, "matched this device!\n");
467 ret_dev = usb_get_dev(dev); 467 ret_dev = usb_get_dev(dev);
468 goto exit; 468 goto exit;
469 } 469 }
@@ -535,7 +535,7 @@ exit:
535 */ 535 */
536int usb_get_current_frame_number(struct usb_device *dev) 536int usb_get_current_frame_number(struct usb_device *dev)
537{ 537{
538 return usb_hcd_get_frame_number (dev); 538 return usb_hcd_get_frame_number(dev);
539} 539}
540 540
541/*-------------------------------------------------------------------*/ 541/*-------------------------------------------------------------------*/
@@ -593,7 +593,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
593 * 593 *
594 * When the buffer is no longer used, free it with usb_buffer_free(). 594 * When the buffer is no longer used, free it with usb_buffer_free().
595 */ 595 */
596void *usb_buffer_alloc ( 596void *usb_buffer_alloc(
597 struct usb_device *dev, 597 struct usb_device *dev,
598 size_t size, 598 size_t size,
599 gfp_t mem_flags, 599 gfp_t mem_flags,
@@ -602,7 +602,7 @@ void *usb_buffer_alloc (
602{ 602{
603 if (!dev || !dev->bus) 603 if (!dev || !dev->bus)
604 return NULL; 604 return NULL;
605 return hcd_buffer_alloc (dev->bus, size, mem_flags, dma); 605 return hcd_buffer_alloc(dev->bus, size, mem_flags, dma);
606} 606}
607 607
608/** 608/**
@@ -616,7 +616,7 @@ void *usb_buffer_alloc (
616 * been allocated using usb_buffer_alloc(), and the parameters must match 616 * been allocated using usb_buffer_alloc(), and the parameters must match
617 * those provided in that allocation request. 617 * those provided in that allocation request.
618 */ 618 */
619void usb_buffer_free ( 619void usb_buffer_free(
620 struct usb_device *dev, 620 struct usb_device *dev,
621 size_t size, 621 size_t size,
622 void *addr, 622 void *addr,
@@ -627,7 +627,7 @@ void usb_buffer_free (
627 return; 627 return;
628 if (!addr) 628 if (!addr)
629 return; 629 return;
630 hcd_buffer_free (dev->bus, size, addr, dma); 630 hcd_buffer_free(dev->bus, size, addr, dma);
631} 631}
632 632
633/** 633/**
@@ -647,7 +647,7 @@ void usb_buffer_free (
647 * Reverse the effect of this call with usb_buffer_unmap(). 647 * Reverse the effect of this call with usb_buffer_unmap().
648 */ 648 */
649#if 0 649#if 0
650struct urb *usb_buffer_map (struct urb *urb) 650struct urb *usb_buffer_map(struct urb *urb)
651{ 651{
652 struct usb_bus *bus; 652 struct usb_bus *bus;
653 struct device *controller; 653 struct device *controller;
@@ -659,14 +659,14 @@ struct urb *usb_buffer_map (struct urb *urb)
659 return NULL; 659 return NULL;
660 660
661 if (controller->dma_mask) { 661 if (controller->dma_mask) {
662 urb->transfer_dma = dma_map_single (controller, 662 urb->transfer_dma = dma_map_single(controller,
663 urb->transfer_buffer, urb->transfer_buffer_length, 663 urb->transfer_buffer, urb->transfer_buffer_length,
664 usb_pipein (urb->pipe) 664 usb_pipein(urb->pipe)
665 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 665 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
666 if (usb_pipecontrol (urb->pipe)) 666 if (usb_pipecontrol(urb->pipe))
667 urb->setup_dma = dma_map_single (controller, 667 urb->setup_dma = dma_map_single(controller,
668 urb->setup_packet, 668 urb->setup_packet,
669 sizeof (struct usb_ctrlrequest), 669 sizeof(struct usb_ctrlrequest),
670 DMA_TO_DEVICE); 670 DMA_TO_DEVICE);
671 // FIXME generic api broken like pci, can't report errors 671 // FIXME generic api broken like pci, can't report errors
672 // if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; 672 // if (urb->transfer_dma == DMA_ADDR_INVALID) return 0;
@@ -689,7 +689,7 @@ struct urb *usb_buffer_map (struct urb *urb)
689 * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s) 689 * usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
690 * @urb: urb whose transfer_buffer/setup_packet will be synchronized 690 * @urb: urb whose transfer_buffer/setup_packet will be synchronized
691 */ 691 */
692void usb_buffer_dmasync (struct urb *urb) 692void usb_buffer_dmasync(struct urb *urb)
693{ 693{
694 struct usb_bus *bus; 694 struct usb_bus *bus;
695 struct device *controller; 695 struct device *controller;
@@ -702,14 +702,14 @@ void usb_buffer_dmasync (struct urb *urb)
702 return; 702 return;
703 703
704 if (controller->dma_mask) { 704 if (controller->dma_mask) {
705 dma_sync_single (controller, 705 dma_sync_single(controller,
706 urb->transfer_dma, urb->transfer_buffer_length, 706 urb->transfer_dma, urb->transfer_buffer_length,
707 usb_pipein (urb->pipe) 707 usb_pipein(urb->pipe)
708 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 708 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
709 if (usb_pipecontrol (urb->pipe)) 709 if (usb_pipecontrol(urb->pipe))
710 dma_sync_single (controller, 710 dma_sync_single(controller,
711 urb->setup_dma, 711 urb->setup_dma,
712 sizeof (struct usb_ctrlrequest), 712 sizeof(struct usb_ctrlrequest),
713 DMA_TO_DEVICE); 713 DMA_TO_DEVICE);
714 } 714 }
715} 715}
@@ -722,7 +722,7 @@ void usb_buffer_dmasync (struct urb *urb)
722 * Reverses the effect of usb_buffer_map(). 722 * Reverses the effect of usb_buffer_map().
723 */ 723 */
724#if 0 724#if 0
725void usb_buffer_unmap (struct urb *urb) 725void usb_buffer_unmap(struct urb *urb)
726{ 726{
727 struct usb_bus *bus; 727 struct usb_bus *bus;
728 struct device *controller; 728 struct device *controller;
@@ -735,14 +735,14 @@ void usb_buffer_unmap (struct urb *urb)
735 return; 735 return;
736 736
737 if (controller->dma_mask) { 737 if (controller->dma_mask) {
738 dma_unmap_single (controller, 738 dma_unmap_single(controller,
739 urb->transfer_dma, urb->transfer_buffer_length, 739 urb->transfer_dma, urb->transfer_buffer_length,
740 usb_pipein (urb->pipe) 740 usb_pipein(urb->pipe)
741 ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 741 ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
742 if (usb_pipecontrol (urb->pipe)) 742 if (usb_pipecontrol(urb->pipe))
743 dma_unmap_single (controller, 743 dma_unmap_single(controller,
744 urb->setup_dma, 744 urb->setup_dma,
745 sizeof (struct usb_ctrlrequest), 745 sizeof(struct usb_ctrlrequest),
746 DMA_TO_DEVICE); 746 DMA_TO_DEVICE);
747 } 747 }
748 urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP 748 urb->transfer_flags &= ~(URB_NO_TRANSFER_DMA_MAP
@@ -783,15 +783,15 @@ int usb_buffer_map_sg(const struct usb_device *dev, unsigned pipe,
783 struct device *controller; 783 struct device *controller;
784 784
785 if (!dev 785 if (!dev
786 || usb_pipecontrol (pipe) 786 || usb_pipecontrol(pipe)
787 || !(bus = dev->bus) 787 || !(bus = dev->bus)
788 || !(controller = bus->controller) 788 || !(controller = bus->controller)
789 || !controller->dma_mask) 789 || !controller->dma_mask)
790 return -1; 790 return -1;
791 791
792 // FIXME generic api broken like pci, can't report errors 792 // FIXME generic api broken like pci, can't report errors
793 return dma_map_sg (controller, sg, nents, 793 return dma_map_sg(controller, sg, nents,
794 usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 794 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
795} 795}
796 796
797/* XXX DISABLED, no users currently. If you wish to re-enable this 797/* XXX DISABLED, no users currently. If you wish to re-enable this
@@ -823,8 +823,8 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, unsigned pipe,
823 || !controller->dma_mask) 823 || !controller->dma_mask)
824 return; 824 return;
825 825
826 dma_sync_sg (controller, sg, n_hw_ents, 826 dma_sync_sg(controller, sg, n_hw_ents,
827 usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 827 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
828} 828}
829#endif 829#endif
830 830
@@ -849,8 +849,8 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, unsigned pipe,
849 || !controller->dma_mask) 849 || !controller->dma_mask)
850 return; 850 return;
851 851
852 dma_unmap_sg (controller, sg, n_hw_ents, 852 dma_unmap_sg(controller, sg, n_hw_ents,
853 usb_pipein (pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 853 usb_pipein(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
854} 854}
855 855
856/* format to disable USB on kernel command line is: nousb */ 856/* format to disable USB on kernel command line is: nousb */
@@ -871,7 +871,7 @@ static int __init usb_init(void)
871{ 871{
872 int retval; 872 int retval;
873 if (nousb) { 873 if (nousb) {
874 pr_info ("%s: USB support disabled\n", usbcore_name); 874 pr_info("%s: USB support disabled\n", usbcore_name);
875 return 0; 875 return 0;
876 } 876 }
877 877
@@ -971,19 +971,19 @@ EXPORT_SYMBOL(__usb_get_extra_descriptor);
971EXPORT_SYMBOL(usb_find_device); 971EXPORT_SYMBOL(usb_find_device);
972EXPORT_SYMBOL(usb_get_current_frame_number); 972EXPORT_SYMBOL(usb_get_current_frame_number);
973 973
974EXPORT_SYMBOL (usb_buffer_alloc); 974EXPORT_SYMBOL(usb_buffer_alloc);
975EXPORT_SYMBOL (usb_buffer_free); 975EXPORT_SYMBOL(usb_buffer_free);
976 976
977#if 0 977#if 0
978EXPORT_SYMBOL (usb_buffer_map); 978EXPORT_SYMBOL(usb_buffer_map);
979EXPORT_SYMBOL (usb_buffer_dmasync); 979EXPORT_SYMBOL(usb_buffer_dmasync);
980EXPORT_SYMBOL (usb_buffer_unmap); 980EXPORT_SYMBOL(usb_buffer_unmap);
981#endif 981#endif
982 982
983EXPORT_SYMBOL (usb_buffer_map_sg); 983EXPORT_SYMBOL(usb_buffer_map_sg);
984#if 0 984#if 0
985EXPORT_SYMBOL (usb_buffer_dmasync_sg); 985EXPORT_SYMBOL(usb_buffer_dmasync_sg);
986#endif 986#endif
987EXPORT_SYMBOL (usb_buffer_unmap_sg); 987EXPORT_SYMBOL(usb_buffer_unmap_sg);
988 988
989MODULE_LICENSE("GPL"); 989MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 812c733ba8ce..f39050145f1f 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -39,7 +39,7 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/usb_ch9.h> 42#include <linux/usb/ch9.h>
43#include <linux/usb_gadget.h> 43#include <linux/usb_gadget.h>
44 44
45#include <asm/byteorder.h> 45#include <asm/byteorder.h>
@@ -1807,16 +1807,13 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
1807 || !wake 1807 || !wake
1808 || at91_suspend_entering_slow_clock()) { 1808 || at91_suspend_entering_slow_clock()) {
1809 pullup(udc, 0); 1809 pullup(udc, 0);
1810 disable_irq_wake(udc->udp_irq); 1810 wake = 0;
1811 } else 1811 } else
1812 enable_irq_wake(udc->udp_irq); 1812 enable_irq_wake(udc->udp_irq);
1813 1813
1814 if (udc->board.vbus_pin > 0) { 1814 udc->active_suspend = wake;
1815 if (wake) 1815 if (udc->board.vbus_pin > 0 && wake)
1816 enable_irq_wake(udc->board.vbus_pin); 1816 enable_irq_wake(udc->board.vbus_pin);
1817 else
1818 disable_irq_wake(udc->board.vbus_pin);
1819 }
1820 return 0; 1817 return 0;
1821} 1818}
1822 1819
@@ -1824,8 +1821,14 @@ static int at91udc_resume(struct platform_device *pdev)
1824{ 1821{
1825 struct at91_udc *udc = platform_get_drvdata(pdev); 1822 struct at91_udc *udc = platform_get_drvdata(pdev);
1826 1823
1824 if (udc->board.vbus_pin > 0 && udc->active_suspend)
1825 disable_irq_wake(udc->board.vbus_pin);
1826
1827 /* maybe reconnect to host; if so, clocks on */ 1827 /* maybe reconnect to host; if so, clocks on */
1828 pullup(udc, 1); 1828 if (udc->active_suspend)
1829 disable_irq_wake(udc->udp_irq);
1830 else
1831 pullup(udc, 1);
1829 return 0; 1832 return 0;
1830} 1833}
1831#else 1834#else
diff --git a/drivers/usb/gadget/at91_udc.h b/drivers/usb/gadget/at91_udc.h
index 677089baa59d..7e34e2f864f9 100644
--- a/drivers/usb/gadget/at91_udc.h
+++ b/drivers/usb/gadget/at91_udc.h
@@ -136,6 +136,7 @@ struct at91_udc {
136 unsigned wait_for_addr_ack:1; 136 unsigned wait_for_addr_ack:1;
137 unsigned wait_for_config_ack:1; 137 unsigned wait_for_config_ack:1;
138 unsigned selfpowered:1; 138 unsigned selfpowered:1;
139 unsigned active_suspend:1;
139 u8 addr; 140 u8 addr;
140 struct at91_udc_data board; 141 struct at91_udc_data board;
141 struct clk *iclk, *fclk; 142 struct clk *iclk, *fclk;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 83b4866df9af..d18901b92cda 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -24,7 +24,7 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/device.h> 25#include <linux/device.h>
26 26
27#include <linux/usb_ch9.h> 27#include <linux/usb/ch9.h>
28#include <linux/usb_gadget.h> 28#include <linux/usb_gadget.h>
29 29
30 30
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 53d584589c26..f28af06905a5 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -27,7 +27,7 @@
27#include <linux/ctype.h> 27#include <linux/ctype.h>
28#include <linux/string.h> 28#include <linux/string.h>
29 29
30#include <linux/usb_ch9.h> 30#include <linux/usb/ch9.h>
31#include <linux/usb_gadget.h> 31#include <linux/usb_gadget.h>
32 32
33#include "gadget_chips.h" 33#include "gadget_chips.h"
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index d15bf22b9a03..22e3c9443641 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -47,7 +47,7 @@
47#include <asm/uaccess.h> 47#include <asm/uaccess.h>
48#include <asm/unaligned.h> 48#include <asm/unaligned.h>
49 49
50#include <linux/usb_ch9.h> 50#include <linux/usb/ch9.h>
51#include <linux/usb/cdc.h> 51#include <linux/usb/cdc.h>
52#include <linux/usb_gadget.h> 52#include <linux/usb_gadget.h>
53 53
@@ -72,9 +72,18 @@
72 * 72 *
73 * There's some hardware that can't talk CDC. We make that hardware 73 * There's some hardware that can't talk CDC. We make that hardware
74 * implement a "minimalist" vendor-agnostic CDC core: same framing, but 74 * implement a "minimalist" vendor-agnostic CDC core: same framing, but
75 * link-level setup only requires activating the configuration. 75 * link-level setup only requires activating the configuration. Only the
76 * Linux supports it, but other host operating systems may not. 76 * endpoint descriptors, and product/vendor IDs, are relevant; no control
77 * (This is a subset of CDC Ethernet.) 77 * operations are available. Linux supports it, but other host operating
78 * systems may not. (This is a subset of CDC Ethernet.)
79 *
80 * It turns out that if you add a few descriptors to that "CDC Subset",
81 * (Windows) host side drivers from MCCI can treat it as one submode of
82 * a proprietary scheme called "SAFE" ... without needing to know about
83 * specific product/vendor IDs. So we do that, making it easier to use
84 * those MS-Windows drivers. Those added descriptors make it resemble a
85 * CDC MDLM device, but they don't change device behavior at all. (See
86 * MCCI Engineering report 950198 "SAFE Networking Functions".)
78 * 87 *
79 * A third option is also in use. Rather than CDC Ethernet, or something 88 * A third option is also in use. Rather than CDC Ethernet, or something
80 * simpler, Microsoft pushes their own approach: RNDIS. The published 89 * simpler, Microsoft pushes their own approach: RNDIS. The published
@@ -254,6 +263,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
254#define DEV_CONFIG_CDC 263#define DEV_CONFIG_CDC
255#endif 264#endif
256 265
266#ifdef CONFIG_USB_GADGET_S3C2410
267#define DEV_CONFIG_CDC
268#endif
269
257#ifdef CONFIG_USB_GADGET_AT91 270#ifdef CONFIG_USB_GADGET_AT91
258#define DEV_CONFIG_CDC 271#define DEV_CONFIG_CDC
259#endif 272#endif
@@ -266,6 +279,10 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
266#define DEV_CONFIG_CDC 279#define DEV_CONFIG_CDC
267#endif 280#endif
268 281
282#ifdef CONFIG_USB_GADGET_HUSB2DEV
283#define DEV_CONFIG_CDC
284#endif
285
269 286
270/* For CDC-incapable hardware, choose the simple cdc subset. 287/* For CDC-incapable hardware, choose the simple cdc subset.
271 * Anything that talks bulk (without notable bugs) can do this. 288 * Anything that talks bulk (without notable bugs) can do this.
@@ -283,9 +300,6 @@ MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
283#define DEV_CONFIG_SUBSET 300#define DEV_CONFIG_SUBSET
284#endif 301#endif
285 302
286#ifdef CONFIG_USB_GADGET_S3C2410
287#define DEV_CONFIG_CDC
288#endif
289 303
290/*-------------------------------------------------------------------------*/ 304/*-------------------------------------------------------------------------*/
291 305
@@ -487,8 +501,17 @@ rndis_config = {
487 * endpoint. Both have a "data" interface and two bulk endpoints. 501 * endpoint. Both have a "data" interface and two bulk endpoints.
488 * There are also differences in how control requests are handled. 502 * There are also differences in how control requests are handled.
489 * 503 *
490 * RNDIS shares a lot with CDC-Ethernet, since it's a variant of 504 * RNDIS shares a lot with CDC-Ethernet, since it's a variant of the
491 * the CDC-ACM (modem) spec. 505 * CDC-ACM (modem) spec. Unfortunately MSFT's RNDIS driver is buggy; it
506 * may hang or oops. Since bugfixes (or accurate specs, letting Linux
507 * work around those bugs) are unlikely to ever come from MSFT, you may
508 * wish to avoid using RNDIS.
509 *
510 * MCCI offers an alternative to RNDIS if you need to connect to Windows
511 * but have hardware that can't support CDC Ethernet. We add descriptors
512 * to present the CDC Subset as a (nonconformant) CDC MDLM variant called
513 * "SAFE". That borrows from both CDC Ethernet and CDC MDLM. You can
514 * get those drivers from MCCI, or bundled with various products.
492 */ 515 */
493 516
494#ifdef DEV_CONFIG_CDC 517#ifdef DEV_CONFIG_CDC
@@ -522,8 +545,6 @@ rndis_control_intf = {
522}; 545};
523#endif 546#endif
524 547
525#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
526
527static const struct usb_cdc_header_desc header_desc = { 548static const struct usb_cdc_header_desc header_desc = {
528 .bLength = sizeof header_desc, 549 .bLength = sizeof header_desc,
529 .bDescriptorType = USB_DT_CS_INTERFACE, 550 .bDescriptorType = USB_DT_CS_INTERFACE,
@@ -532,6 +553,8 @@ static const struct usb_cdc_header_desc header_desc = {
532 .bcdCDC = __constant_cpu_to_le16 (0x0110), 553 .bcdCDC = __constant_cpu_to_le16 (0x0110),
533}; 554};
534 555
556#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
557
535static const struct usb_cdc_union_desc union_desc = { 558static const struct usb_cdc_union_desc union_desc = {
536 .bLength = sizeof union_desc, 559 .bLength = sizeof union_desc,
537 .bDescriptorType = USB_DT_CS_INTERFACE, 560 .bDescriptorType = USB_DT_CS_INTERFACE,
@@ -564,7 +587,40 @@ static const struct usb_cdc_acm_descriptor acm_descriptor = {
564 587
565#endif 588#endif
566 589
567#ifdef DEV_CONFIG_CDC 590#ifndef DEV_CONFIG_CDC
591
592/* "SAFE" loosely follows CDC WMC MDLM, violating the spec in various
593 * ways: data endpoints live in the control interface, there's no data
594 * interface, and it's not used to talk to a cell phone radio.
595 */
596
597static const struct usb_cdc_mdlm_desc mdlm_desc = {
598 .bLength = sizeof mdlm_desc,
599 .bDescriptorType = USB_DT_CS_INTERFACE,
600 .bDescriptorSubType = USB_CDC_MDLM_TYPE,
601
602 .bcdVersion = __constant_cpu_to_le16(0x0100),
603 .bGUID = {
604 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6,
605 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f,
606 },
607};
608
609/* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we
610 * can't really use its struct. All we do here is say that we're using
611 * the submode of "SAFE" which directly matches the CDC Subset.
612 */
613static const u8 mdlm_detail_desc[] = {
614 6,
615 USB_DT_CS_INTERFACE,
616 USB_CDC_MDLM_DETAIL_TYPE,
617
618 0, /* "SAFE" */
619 0, /* network control capabilities (none) */
620 0, /* network data capabilities ("raw" encapsulation) */
621};
622
623#endif
568 624
569static const struct usb_cdc_ether_desc ether_desc = { 625static const struct usb_cdc_ether_desc ether_desc = {
570 .bLength = sizeof ether_desc, 626 .bLength = sizeof ether_desc,
@@ -579,7 +635,6 @@ static const struct usb_cdc_ether_desc ether_desc = {
579 .bNumberPowerFilters = 0, 635 .bNumberPowerFilters = 0,
580}; 636};
581 637
582#endif
583 638
584#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS) 639#if defined(DEV_CONFIG_CDC) || defined(CONFIG_USB_ETH_RNDIS)
585 640
@@ -672,6 +727,9 @@ rndis_data_intf = {
672/* 727/*
673 * "Simple" CDC-subset option is a simple vendor-neutral model that most 728 * "Simple" CDC-subset option is a simple vendor-neutral model that most
674 * full speed controllers can handle: one interface, two bulk endpoints. 729 * full speed controllers can handle: one interface, two bulk endpoints.
730 *
731 * To assist host side drivers, we fancy it up a bit, and add descriptors
732 * so some host side drivers will understand it as a "SAFE" variant.
675 */ 733 */
676 734
677static const struct usb_interface_descriptor 735static const struct usb_interface_descriptor
@@ -682,8 +740,8 @@ subset_data_intf = {
682 .bInterfaceNumber = 0, 740 .bInterfaceNumber = 0,
683 .bAlternateSetting = 0, 741 .bAlternateSetting = 0,
684 .bNumEndpoints = 2, 742 .bNumEndpoints = 2,
685 .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 743 .bInterfaceClass = USB_CLASS_COMM,
686 .bInterfaceSubClass = 0, 744 .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM,
687 .bInterfaceProtocol = 0, 745 .bInterfaceProtocol = 0,
688 .iInterface = STRING_DATA, 746 .iInterface = STRING_DATA,
689}; 747};
@@ -731,10 +789,15 @@ static const struct usb_descriptor_header *fs_eth_function [11] = {
731static inline void __init fs_subset_descriptors(void) 789static inline void __init fs_subset_descriptors(void)
732{ 790{
733#ifdef DEV_CONFIG_SUBSET 791#ifdef DEV_CONFIG_SUBSET
792 /* behavior is "CDC Subset"; extra descriptors say "SAFE" */
734 fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf; 793 fs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
735 fs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc; 794 fs_eth_function[2] = (struct usb_descriptor_header *) &header_desc;
736 fs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc; 795 fs_eth_function[3] = (struct usb_descriptor_header *) &mdlm_desc;
737 fs_eth_function[4] = NULL; 796 fs_eth_function[4] = (struct usb_descriptor_header *) &mdlm_detail_desc;
797 fs_eth_function[5] = (struct usb_descriptor_header *) &ether_desc;
798 fs_eth_function[6] = (struct usb_descriptor_header *) &fs_source_desc;
799 fs_eth_function[7] = (struct usb_descriptor_header *) &fs_sink_desc;
800 fs_eth_function[8] = NULL;
738#else 801#else
739 fs_eth_function[1] = NULL; 802 fs_eth_function[1] = NULL;
740#endif 803#endif
@@ -828,10 +891,15 @@ static const struct usb_descriptor_header *hs_eth_function [11] = {
828static inline void __init hs_subset_descriptors(void) 891static inline void __init hs_subset_descriptors(void)
829{ 892{
830#ifdef DEV_CONFIG_SUBSET 893#ifdef DEV_CONFIG_SUBSET
894 /* behavior is "CDC Subset"; extra descriptors say "SAFE" */
831 hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf; 895 hs_eth_function[1] = (struct usb_descriptor_header *) &subset_data_intf;
832 hs_eth_function[2] = (struct usb_descriptor_header *) &fs_source_desc; 896 hs_eth_function[2] = (struct usb_descriptor_header *) &header_desc;
833 hs_eth_function[3] = (struct usb_descriptor_header *) &fs_sink_desc; 897 hs_eth_function[3] = (struct usb_descriptor_header *) &mdlm_desc;
834 hs_eth_function[4] = NULL; 898 hs_eth_function[4] = (struct usb_descriptor_header *) &mdlm_detail_desc;
899 hs_eth_function[5] = (struct usb_descriptor_header *) &ether_desc;
900 hs_eth_function[6] = (struct usb_descriptor_header *) &hs_source_desc;
901 hs_eth_function[7] = (struct usb_descriptor_header *) &hs_sink_desc;
902 hs_eth_function[8] = NULL;
835#else 903#else
836 hs_eth_function[1] = NULL; 904 hs_eth_function[1] = NULL;
837#endif 905#endif
@@ -878,10 +946,8 @@ static char manufacturer [50];
878static char product_desc [40] = DRIVER_DESC; 946static char product_desc [40] = DRIVER_DESC;
879static char serial_number [20]; 947static char serial_number [20];
880 948
881#ifdef DEV_CONFIG_CDC
882/* address that the host will use ... usually assigned at random */ 949/* address that the host will use ... usually assigned at random */
883static char ethaddr [2 * ETH_ALEN + 1]; 950static char ethaddr [2 * ETH_ALEN + 1];
884#endif
885 951
886/* static strings, in UTF-8 */ 952/* static strings, in UTF-8 */
887static struct usb_string strings [] = { 953static struct usb_string strings [] = {
@@ -889,9 +955,9 @@ static struct usb_string strings [] = {
889 { STRING_PRODUCT, product_desc, }, 955 { STRING_PRODUCT, product_desc, },
890 { STRING_SERIALNUMBER, serial_number, }, 956 { STRING_SERIALNUMBER, serial_number, },
891 { STRING_DATA, "Ethernet Data", }, 957 { STRING_DATA, "Ethernet Data", },
958 { STRING_ETHADDR, ethaddr, },
892#ifdef DEV_CONFIG_CDC 959#ifdef DEV_CONFIG_CDC
893 { STRING_CDC, "CDC Ethernet", }, 960 { STRING_CDC, "CDC Ethernet", },
894 { STRING_ETHADDR, ethaddr, },
895 { STRING_CONTROL, "CDC Communications Control", }, 961 { STRING_CONTROL, "CDC Communications Control", },
896#endif 962#endif
897#ifdef DEV_CONFIG_SUBSET 963#ifdef DEV_CONFIG_SUBSET
@@ -986,10 +1052,10 @@ set_ether_config (struct eth_dev *dev, gfp_t gfp_flags)
986 } 1052 }
987#endif 1053#endif
988 1054
989 dev->in = ep_desc (dev->gadget, &hs_source_desc, &fs_source_desc); 1055 dev->in = ep_desc(gadget, &hs_source_desc, &fs_source_desc);
990 dev->in_ep->driver_data = dev; 1056 dev->in_ep->driver_data = dev;
991 1057
992 dev->out = ep_desc (dev->gadget, &hs_sink_desc, &fs_sink_desc); 1058 dev->out = ep_desc(gadget, &hs_sink_desc, &fs_sink_desc);
993 dev->out_ep->driver_data = dev; 1059 dev->out_ep->driver_data = dev;
994 1060
995 /* With CDC, the host isn't allowed to use these two data 1061 /* With CDC, the host isn't allowed to use these two data
@@ -2278,10 +2344,10 @@ eth_bind (struct usb_gadget *gadget)
2278 "RNDIS/%s", driver_desc); 2344 "RNDIS/%s", driver_desc);
2279 2345
2280 /* CDC subset ... recognized by Linux since 2.4.10, but Windows 2346 /* CDC subset ... recognized by Linux since 2.4.10, but Windows
2281 * drivers aren't widely available. 2347 * drivers aren't widely available. (That may be improved by
2348 * supporting one submode of the "SAFE" variant of MDLM.)
2282 */ 2349 */
2283 } else if (!cdc) { 2350 } else if (!cdc) {
2284 device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC;
2285 device_desc.idVendor = 2351 device_desc.idVendor =
2286 __constant_cpu_to_le16(SIMPLE_VENDOR_NUM); 2352 __constant_cpu_to_le16(SIMPLE_VENDOR_NUM);
2287 device_desc.idProduct = 2353 device_desc.idProduct =
@@ -2352,6 +2418,10 @@ autoconf_fail:
2352 if (!cdc) { 2418 if (!cdc) {
2353 eth_config.bNumInterfaces = 1; 2419 eth_config.bNumInterfaces = 1;
2354 eth_config.iConfiguration = STRING_SUBSET; 2420 eth_config.iConfiguration = STRING_SUBSET;
2421
2422 /* use functions to set these up, in case we're built to work
2423 * with multiple controllers and must override CDC Ethernet.
2424 */
2355 fs_subset_descriptors(); 2425 fs_subset_descriptors();
2356 hs_subset_descriptors(); 2426 hs_subset_descriptors();
2357 } 2427 }
@@ -2415,22 +2485,20 @@ autoconf_fail:
2415 2485
2416 /* Module params for these addresses should come from ID proms. 2486 /* Module params for these addresses should come from ID proms.
2417 * The host side address is used with CDC and RNDIS, and commonly 2487 * The host side address is used with CDC and RNDIS, and commonly
2418 * ends up in a persistent config database. 2488 * ends up in a persistent config database. It's not clear if
2489 * host side code for the SAFE thing cares -- its original BLAN
2490 * thing didn't, Sharp never assigned those addresses on Zaurii.
2419 */ 2491 */
2420 if (get_ether_addr(dev_addr, net->dev_addr)) 2492 if (get_ether_addr(dev_addr, net->dev_addr))
2421 dev_warn(&gadget->dev, 2493 dev_warn(&gadget->dev,
2422 "using random %s ethernet address\n", "self"); 2494 "using random %s ethernet address\n", "self");
2423 if (cdc || rndis) { 2495 if (get_ether_addr(host_addr, dev->host_mac))
2424 if (get_ether_addr(host_addr, dev->host_mac)) 2496 dev_warn(&gadget->dev,
2425 dev_warn(&gadget->dev, 2497 "using random %s ethernet address\n", "host");
2426 "using random %s ethernet address\n", "host"); 2498 snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X",
2427#ifdef DEV_CONFIG_CDC 2499 dev->host_mac [0], dev->host_mac [1],
2428 snprintf (ethaddr, sizeof ethaddr, "%02X%02X%02X%02X%02X%02X", 2500 dev->host_mac [2], dev->host_mac [3],
2429 dev->host_mac [0], dev->host_mac [1], 2501 dev->host_mac [4], dev->host_mac [5]);
2430 dev->host_mac [2], dev->host_mac [3],
2431 dev->host_mac [4], dev->host_mac [5]);
2432#endif
2433 }
2434 2502
2435 if (rndis) { 2503 if (rndis) {
2436 status = rndis_init(); 2504 status = rndis_init();
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 72f2ae96fbf3..f04a29a46646 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -253,7 +253,7 @@
253#include <linux/freezer.h> 253#include <linux/freezer.h>
254#include <linux/utsname.h> 254#include <linux/utsname.h>
255 255
256#include <linux/usb_ch9.h> 256#include <linux/usb/ch9.h>
257#include <linux/usb_gadget.h> 257#include <linux/usb_gadget.h>
258 258
259#include "gadget_chips.h" 259#include "gadget_chips.h"
@@ -1148,7 +1148,7 @@ static int ep0_queue(struct fsg_dev *fsg)
1148 1148
1149static void ep0_complete(struct usb_ep *ep, struct usb_request *req) 1149static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
1150{ 1150{
1151 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1151 struct fsg_dev *fsg = ep->driver_data;
1152 1152
1153 if (req->actual > 0) 1153 if (req->actual > 0)
1154 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual); 1154 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
@@ -1170,8 +1170,8 @@ static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
1170 1170
1171static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 1171static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
1172{ 1172{
1173 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1173 struct fsg_dev *fsg = ep->driver_data;
1174 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context; 1174 struct fsg_buffhd *bh = req->context;
1175 1175
1176 if (req->status || req->actual != req->length) 1176 if (req->status || req->actual != req->length)
1177 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__, 1177 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
@@ -1190,8 +1190,8 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
1190 1190
1191static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 1191static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
1192{ 1192{
1193 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1193 struct fsg_dev *fsg = ep->driver_data;
1194 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context; 1194 struct fsg_buffhd *bh = req->context;
1195 1195
1196 dump_msg(fsg, "bulk-out", req->buf, req->actual); 1196 dump_msg(fsg, "bulk-out", req->buf, req->actual);
1197 if (req->status || req->actual != bh->bulk_out_intended_length) 1197 if (req->status || req->actual != bh->bulk_out_intended_length)
@@ -1214,8 +1214,8 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
1214#ifdef CONFIG_USB_FILE_STORAGE_TEST 1214#ifdef CONFIG_USB_FILE_STORAGE_TEST
1215static void intr_in_complete(struct usb_ep *ep, struct usb_request *req) 1215static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
1216{ 1216{
1217 struct fsg_dev *fsg = (struct fsg_dev *) ep->driver_data; 1217 struct fsg_dev *fsg = ep->driver_data;
1218 struct fsg_buffhd *bh = (struct fsg_buffhd *) req->context; 1218 struct fsg_buffhd *bh = req->context;
1219 1219
1220 if (req->status || req->actual != req->length) 1220 if (req->status || req->actual != req->length)
1221 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__, 1221 DBG(fsg, "%s --> %d, %u/%u\n", __FUNCTION__,
@@ -2577,7 +2577,7 @@ static int send_status(struct fsg_dev *fsg)
2577 } 2577 }
2578 2578
2579 if (transport_is_bbb()) { 2579 if (transport_is_bbb()) {
2580 struct bulk_cs_wrap *csw = (struct bulk_cs_wrap *) bh->buf; 2580 struct bulk_cs_wrap *csw = bh->buf;
2581 2581
2582 /* Store and send the Bulk-only CSW */ 2582 /* Store and send the Bulk-only CSW */
2583 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG); 2583 csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
@@ -2596,8 +2596,7 @@ static int send_status(struct fsg_dev *fsg)
2596 return 0; 2596 return 0;
2597 2597
2598 } else { // USB_PR_CBI 2598 } else { // USB_PR_CBI
2599 struct interrupt_data *buf = (struct interrupt_data *) 2599 struct interrupt_data *buf = bh->buf;
2600 bh->buf;
2601 2600
2602 /* Store and send the Interrupt data. UFI sends the ASC 2601 /* Store and send the Interrupt data. UFI sends the ASC
2603 * and ASCQ bytes. Everything else sends a Type (which 2602 * and ASCQ bytes. Everything else sends a Type (which
@@ -2982,7 +2981,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
2982static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2981static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2983{ 2982{
2984 struct usb_request *req = bh->outreq; 2983 struct usb_request *req = bh->outreq;
2985 struct bulk_cb_wrap *cbw = (struct bulk_cb_wrap *) req->buf; 2984 struct bulk_cb_wrap *cbw = req->buf;
2986 2985
2987 /* Was this a real packet? */ 2986 /* Was this a real packet? */
2988 if (req->status) 2987 if (req->status)
@@ -3428,7 +3427,7 @@ static void handle_exception(struct fsg_dev *fsg)
3428 3427
3429static int fsg_main_thread(void *fsg_) 3428static int fsg_main_thread(void *fsg_)
3430{ 3429{
3431 struct fsg_dev *fsg = (struct fsg_dev *) fsg_; 3430 struct fsg_dev *fsg = fsg_;
3432 3431
3433 /* Allow the thread to be killed by a signal, but set the signal mask 3432 /* Allow the thread to be killed by a signal, but set the signal mask
3434 * to block everything but INT, TERM, KILL, and USR1. */ 3433 * to block everything but INT, TERM, KILL, and USR1. */
@@ -3600,7 +3599,7 @@ static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *
3600static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf) 3599static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf)
3601{ 3600{
3602 struct lun *curlun = dev_to_lun(dev); 3601 struct lun *curlun = dev_to_lun(dev);
3603 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3602 struct fsg_dev *fsg = dev_get_drvdata(dev);
3604 char *p; 3603 char *p;
3605 ssize_t rc; 3604 ssize_t rc;
3606 3605
@@ -3629,7 +3628,7 @@ static ssize_t store_ro(struct device *dev, struct device_attribute *attr, const
3629{ 3628{
3630 ssize_t rc = count; 3629 ssize_t rc = count;
3631 struct lun *curlun = dev_to_lun(dev); 3630 struct lun *curlun = dev_to_lun(dev);
3632 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3631 struct fsg_dev *fsg = dev_get_drvdata(dev);
3633 int i; 3632 int i;
3634 3633
3635 if (sscanf(buf, "%d", &i) != 1) 3634 if (sscanf(buf, "%d", &i) != 1)
@@ -3652,7 +3651,7 @@ static ssize_t store_ro(struct device *dev, struct device_attribute *attr, const
3652static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 3651static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
3653{ 3652{
3654 struct lun *curlun = dev_to_lun(dev); 3653 struct lun *curlun = dev_to_lun(dev);
3655 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3654 struct fsg_dev *fsg = dev_get_drvdata(dev);
3656 int rc = 0; 3655 int rc = 0;
3657 3656
3658 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) { 3657 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
@@ -3700,7 +3699,7 @@ static void fsg_release(struct kref *ref)
3700 3699
3701static void lun_release(struct device *dev) 3700static void lun_release(struct device *dev)
3702{ 3701{
3703 struct fsg_dev *fsg = (struct fsg_dev *) dev_get_drvdata(dev); 3702 struct fsg_dev *fsg = dev_get_drvdata(dev);
3704 3703
3705 kref_put(&fsg->ref, fsg_release); 3704 kref_put(&fsg->ref, fsg_release);
3706} 3705}
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index aa80f0910720..2e3d6620d216 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -75,6 +75,12 @@
75#define gadget_is_pxa27x(g) 0 75#define gadget_is_pxa27x(g) 0
76#endif 76#endif
77 77
78#ifdef CONFIG_USB_GADGET_HUSB2DEV
79#define gadget_is_husb2dev(g) !strcmp("husb2_udc", (g)->name)
80#else
81#define gadget_is_husb2dev(g) 0
82#endif
83
78#ifdef CONFIG_USB_GADGET_S3C2410 84#ifdef CONFIG_USB_GADGET_S3C2410
79#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name) 85#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
80#else 86#else
@@ -169,5 +175,7 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
169 return 0x16; 175 return 0x16;
170 else if (gadget_is_mpc8272(gadget)) 176 else if (gadget_is_mpc8272(gadget))
171 return 0x17; 177 return 0x17;
178 else if (gadget_is_husb2dev(gadget))
179 return 0x18;
172 return -ENOENT; 180 return -ENOENT;
173} 181}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index f1a679656c96..d08a8d0e6427 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -35,7 +35,7 @@
35#include <sound/initval.h> 35#include <sound/initval.h>
36#include <sound/rawmidi.h> 36#include <sound/rawmidi.h>
37 37
38#include <linux/usb_ch9.h> 38#include <linux/usb/ch9.h>
39#include <linux/usb_gadget.h> 39#include <linux/usb_gadget.h>
40#include <linux/usb/audio.h> 40#include <linux/usb/audio.h>
41#include <linux/usb/midi.h> 41#include <linux/usb/midi.h>
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index d0ef1d6b3fac..e873cf488246 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -39,7 +39,7 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/device.h> 41#include <linux/device.h>
42#include <linux/usb_ch9.h> 42#include <linux/usb/ch9.h>
43#include <linux/usb_gadget.h> 43#include <linux/usb_gadget.h>
44 44
45#include <asm/byteorder.h> 45#include <asm/byteorder.h>
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 3fb1044a4db0..34296e79edcf 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -20,7 +20,7 @@
20 */ 20 */
21 21
22 22
23// #define DEBUG /* data to help fault diagnosis */ 23// #define DEBUG /* data to help fault diagnosis */
24// #define VERBOSE /* extra debug messages (success too) */ 24// #define VERBOSE /* extra debug messages (success too) */
25 25
26#include <linux/init.h> 26#include <linux/init.h>
@@ -59,11 +59,11 @@
59 * may serve as a source of device events, used to handle all control 59 * may serve as a source of device events, used to handle all control
60 * requests other than basic enumeration. 60 * requests other than basic enumeration.
61 * 61 *
62 * - Then either immediately, or after a SET_CONFIGURATION control request, 62 * - Then, after a SET_CONFIGURATION control request, ep_config() is
63 * ep_config() is called when each /dev/gadget/ep* file is configured 63 * called when each /dev/gadget/ep* file is configured (by writing
64 * (by writing endpoint descriptors). Afterwards these files are used 64 * endpoint descriptors). Afterwards these files are used to write()
65 * to write() IN data or to read() OUT data. To halt the endpoint, a 65 * IN data or to read() OUT data. To halt the endpoint, a "wrong
66 * "wrong direction" request is issued (like reading an IN endpoint). 66 * direction" request is issued (like reading an IN endpoint).
67 * 67 *
68 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe 68 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
69 * not possible on all hardware. For example, precise fault handling with 69 * not possible on all hardware. For example, precise fault handling with
@@ -98,16 +98,16 @@ enum ep0_state {
98 * must always write descriptors to initialize the device, then 98 * must always write descriptors to initialize the device, then
99 * the device becomes UNCONNECTED until enumeration. 99 * the device becomes UNCONNECTED until enumeration.
100 */ 100 */
101 STATE_OPENED, 101 STATE_DEV_OPENED,
102 102
103 /* From then on, ep0 fd is in either of two basic modes: 103 /* From then on, ep0 fd is in either of two basic modes:
104 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it 104 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
105 * - SETUP: read/write will transfer control data and succeed; 105 * - SETUP: read/write will transfer control data and succeed;
106 * or if "wrong direction", performs protocol stall 106 * or if "wrong direction", performs protocol stall
107 */ 107 */
108 STATE_UNCONNECTED, 108 STATE_DEV_UNCONNECTED,
109 STATE_CONNECTED, 109 STATE_DEV_CONNECTED,
110 STATE_SETUP, 110 STATE_DEV_SETUP,
111 111
112 /* UNBOUND means the driver closed ep0, so the device won't be 112 /* UNBOUND means the driver closed ep0, so the device won't be
113 * accessible again (DEV_DISABLED) until all fds are closed. 113 * accessible again (DEV_DISABLED) until all fds are closed.
@@ -121,7 +121,7 @@ enum ep0_state {
121struct dev_data { 121struct dev_data {
122 spinlock_t lock; 122 spinlock_t lock;
123 atomic_t count; 123 atomic_t count;
124 enum ep0_state state; 124 enum ep0_state state; /* P: lock */
125 struct usb_gadgetfs_event event [N_EVENT]; 125 struct usb_gadgetfs_event event [N_EVENT];
126 unsigned ev_next; 126 unsigned ev_next;
127 struct fasync_struct *fasync; 127 struct fasync_struct *fasync;
@@ -188,7 +188,6 @@ static struct dev_data *dev_new (void)
188enum ep_state { 188enum ep_state {
189 STATE_EP_DISABLED = 0, 189 STATE_EP_DISABLED = 0,
190 STATE_EP_READY, 190 STATE_EP_READY,
191 STATE_EP_DEFER_ENABLE,
192 STATE_EP_ENABLED, 191 STATE_EP_ENABLED,
193 STATE_EP_UNBOUND, 192 STATE_EP_UNBOUND,
194}; 193};
@@ -313,18 +312,10 @@ nonblock:
313 312
314 if ((val = down_interruptible (&epdata->lock)) < 0) 313 if ((val = down_interruptible (&epdata->lock)) < 0)
315 return val; 314 return val;
316newstate: 315
317 switch (epdata->state) { 316 switch (epdata->state) {
318 case STATE_EP_ENABLED: 317 case STATE_EP_ENABLED:
319 break; 318 break;
320 case STATE_EP_DEFER_ENABLE:
321 DBG (epdata->dev, "%s wait for host\n", epdata->name);
322 if ((val = wait_event_interruptible (epdata->wait,
323 epdata->state != STATE_EP_DEFER_ENABLE
324 || epdata->dev->state == STATE_DEV_UNBOUND
325 )) < 0)
326 goto fail;
327 goto newstate;
328 // case STATE_EP_DISABLED: /* "can't happen" */ 319 // case STATE_EP_DISABLED: /* "can't happen" */
329 // case STATE_EP_READY: /* "can't happen" */ 320 // case STATE_EP_READY: /* "can't happen" */
330 default: /* error! */ 321 default: /* error! */
@@ -333,7 +324,6 @@ newstate:
333 // FALLTHROUGH 324 // FALLTHROUGH
334 case STATE_EP_UNBOUND: /* clean disconnect */ 325 case STATE_EP_UNBOUND: /* clean disconnect */
335 val = -ENODEV; 326 val = -ENODEV;
336fail:
337 up (&epdata->lock); 327 up (&epdata->lock);
338 } 328 }
339 return val; 329 return val;
@@ -565,29 +555,28 @@ static ssize_t ep_aio_read_retry(struct kiocb *iocb)
565 ssize_t len, total; 555 ssize_t len, total;
566 int i; 556 int i;
567 557
568 /* we "retry" to get the right mm context for this: */ 558 /* we "retry" to get the right mm context for this: */
569 559
570 /* copy stuff into user buffers */ 560 /* copy stuff into user buffers */
571 total = priv->actual; 561 total = priv->actual;
572 len = 0; 562 len = 0;
573 for (i=0; i < priv->nr_segs; i++) { 563 for (i=0; i < priv->nr_segs; i++) {
574 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); 564 ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
575 565
576 if (copy_to_user(priv->iv[i].iov_base, priv->buf, this)) { 566 if (copy_to_user(priv->iv[i].iov_base, priv->buf, this)) {
577 if (len == 0) 567 if (len == 0)
578 len = -EFAULT; 568 len = -EFAULT;
579 break; 569 break;
580 } 570 }
581 571
582 total -= this; 572 total -= this;
583 len += this; 573 len += this;
584 if (total == 0) 574 if (total == 0)
585 break; 575 break;
586 } 576 }
587 kfree(priv->buf); 577 kfree(priv->buf);
588 kfree(priv); 578 kfree(priv);
589 aio_put_req(iocb); 579 return len;
590 return len;
591} 580}
592 581
593static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) 582static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
@@ -600,18 +589,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
600 spin_lock(&epdata->dev->lock); 589 spin_lock(&epdata->dev->lock);
601 priv->req = NULL; 590 priv->req = NULL;
602 priv->epdata = NULL; 591 priv->epdata = NULL;
603 if (priv->iv == NULL 592
604 || unlikely(req->actual == 0) 593 /* if this was a write or a read returning no data then we
605 || unlikely(kiocbIsCancelled(iocb))) { 594 * don't need to copy anything to userspace, so we can
595 * complete the aio request immediately.
596 */
597 if (priv->iv == NULL || unlikely(req->actual == 0)) {
606 kfree(req->buf); 598 kfree(req->buf);
607 kfree(priv); 599 kfree(priv);
608 iocb->private = NULL; 600 iocb->private = NULL;
609 /* aio_complete() reports bytes-transferred _and_ faults */ 601 /* aio_complete() reports bytes-transferred _and_ faults */
610 if (unlikely(kiocbIsCancelled(iocb))) 602 aio_complete(iocb, req->actual ? req->actual : req->status,
611 aio_put_req(iocb);
612 else
613 aio_complete(iocb,
614 req->actual ? req->actual : req->status,
615 req->status); 603 req->status);
616 } else { 604 } else {
617 /* retry() won't report both; so we hide some faults */ 605 /* retry() won't report both; so we hide some faults */
@@ -636,7 +624,7 @@ ep_aio_rwtail(
636 size_t len, 624 size_t len,
637 struct ep_data *epdata, 625 struct ep_data *epdata,
638 const struct iovec *iv, 626 const struct iovec *iv,
639 unsigned long nr_segs 627 unsigned long nr_segs
640) 628)
641{ 629{
642 struct kiocb_priv *priv; 630 struct kiocb_priv *priv;
@@ -852,9 +840,9 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
852 break; 840 break;
853#endif 841#endif
854 default: 842 default:
855 DBG (data->dev, "unconnected, %s init deferred\n", 843 DBG(data->dev, "unconnected, %s init abandoned\n",
856 data->name); 844 data->name);
857 data->state = STATE_EP_DEFER_ENABLE; 845 value = -EINVAL;
858 } 846 }
859 if (value == 0) { 847 if (value == 0) {
860 fd->f_op = &ep_io_operations; 848 fd->f_op = &ep_io_operations;
@@ -943,22 +931,24 @@ static void clean_req (struct usb_ep *ep, struct usb_request *req)
943static void ep0_complete (struct usb_ep *ep, struct usb_request *req) 931static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
944{ 932{
945 struct dev_data *dev = ep->driver_data; 933 struct dev_data *dev = ep->driver_data;
934 unsigned long flags;
946 int free = 1; 935 int free = 1;
947 936
948 /* for control OUT, data must still get to userspace */ 937 /* for control OUT, data must still get to userspace */
938 spin_lock_irqsave(&dev->lock, flags);
949 if (!dev->setup_in) { 939 if (!dev->setup_in) {
950 dev->setup_out_error = (req->status != 0); 940 dev->setup_out_error = (req->status != 0);
951 if (!dev->setup_out_error) 941 if (!dev->setup_out_error)
952 free = 0; 942 free = 0;
953 dev->setup_out_ready = 1; 943 dev->setup_out_ready = 1;
954 ep0_readable (dev); 944 ep0_readable (dev);
955 } else if (dev->state == STATE_SETUP) 945 }
956 dev->state = STATE_CONNECTED;
957 946
958 /* clean up as appropriate */ 947 /* clean up as appropriate */
959 if (free && req->buf != &dev->rbuf) 948 if (free && req->buf != &dev->rbuf)
960 clean_req (ep, req); 949 clean_req (ep, req);
961 req->complete = epio_complete; 950 req->complete = epio_complete;
951 spin_unlock_irqrestore(&dev->lock, flags);
962} 952}
963 953
964static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len) 954static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
@@ -998,13 +988,13 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
998 } 988 }
999 989
1000 /* control DATA stage */ 990 /* control DATA stage */
1001 if ((state = dev->state) == STATE_SETUP) { 991 if ((state = dev->state) == STATE_DEV_SETUP) {
1002 992
1003 if (dev->setup_in) { /* stall IN */ 993 if (dev->setup_in) { /* stall IN */
1004 VDEBUG(dev, "ep0in stall\n"); 994 VDEBUG(dev, "ep0in stall\n");
1005 (void) usb_ep_set_halt (dev->gadget->ep0); 995 (void) usb_ep_set_halt (dev->gadget->ep0);
1006 retval = -EL2HLT; 996 retval = -EL2HLT;
1007 dev->state = STATE_CONNECTED; 997 dev->state = STATE_DEV_CONNECTED;
1008 998
1009 } else if (len == 0) { /* ack SET_CONFIGURATION etc */ 999 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
1010 struct usb_ep *ep = dev->gadget->ep0; 1000 struct usb_ep *ep = dev->gadget->ep0;
@@ -1012,7 +1002,7 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
1012 1002
1013 if ((retval = setup_req (ep, req, 0)) == 0) 1003 if ((retval = setup_req (ep, req, 0)) == 0)
1014 retval = usb_ep_queue (ep, req, GFP_ATOMIC); 1004 retval = usb_ep_queue (ep, req, GFP_ATOMIC);
1015 dev->state = STATE_CONNECTED; 1005 dev->state = STATE_DEV_CONNECTED;
1016 1006
1017 /* assume that was SET_CONFIGURATION */ 1007 /* assume that was SET_CONFIGURATION */
1018 if (dev->current_config) { 1008 if (dev->current_config) {
@@ -1040,6 +1030,13 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
1040 spin_lock_irq (&dev->lock); 1030 spin_lock_irq (&dev->lock);
1041 if (retval) 1031 if (retval)
1042 goto done; 1032 goto done;
1033
1034 if (dev->state != STATE_DEV_SETUP) {
1035 retval = -ECANCELED;
1036 goto done;
1037 }
1038 dev->state = STATE_DEV_CONNECTED;
1039
1043 if (dev->setup_out_error) 1040 if (dev->setup_out_error)
1044 retval = -EIO; 1041 retval = -EIO;
1045 else { 1042 else {
@@ -1066,39 +1063,36 @@ scan:
1066 /* return queued events right away */ 1063 /* return queued events right away */
1067 if (dev->ev_next != 0) { 1064 if (dev->ev_next != 0) {
1068 unsigned i, n; 1065 unsigned i, n;
1069 int tmp = dev->ev_next;
1070 1066
1071 len = min (len, tmp * sizeof (struct usb_gadgetfs_event));
1072 n = len / sizeof (struct usb_gadgetfs_event); 1067 n = len / sizeof (struct usb_gadgetfs_event);
1068 if (dev->ev_next < n)
1069 n = dev->ev_next;
1073 1070
1074 /* ep0 can't deliver events when STATE_SETUP */ 1071 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1075 for (i = 0; i < n; i++) { 1072 for (i = 0; i < n; i++) {
1076 if (dev->event [i].type == GADGETFS_SETUP) { 1073 if (dev->event [i].type == GADGETFS_SETUP) {
1077 len = i + 1; 1074 dev->state = STATE_DEV_SETUP;
1078 len *= sizeof (struct usb_gadgetfs_event); 1075 n = i + 1;
1079 n = 0;
1080 break; 1076 break;
1081 } 1077 }
1082 } 1078 }
1083 spin_unlock_irq (&dev->lock); 1079 spin_unlock_irq (&dev->lock);
1080 len = n * sizeof (struct usb_gadgetfs_event);
1084 if (copy_to_user (buf, &dev->event, len)) 1081 if (copy_to_user (buf, &dev->event, len))
1085 retval = -EFAULT; 1082 retval = -EFAULT;
1086 else 1083 else
1087 retval = len; 1084 retval = len;
1088 if (len > 0) { 1085 if (len > 0) {
1089 len /= sizeof (struct usb_gadgetfs_event);
1090
1091 /* NOTE this doesn't guard against broken drivers; 1086 /* NOTE this doesn't guard against broken drivers;
1092 * concurrent ep0 readers may lose events. 1087 * concurrent ep0 readers may lose events.
1093 */ 1088 */
1094 spin_lock_irq (&dev->lock); 1089 spin_lock_irq (&dev->lock);
1095 dev->ev_next -= len; 1090 if (dev->ev_next > n) {
1096 if (dev->ev_next != 0) 1091 memmove(&dev->event[0], &dev->event[n],
1097 memmove (&dev->event, &dev->event [len],
1098 sizeof (struct usb_gadgetfs_event) 1092 sizeof (struct usb_gadgetfs_event)
1099 * (tmp - len)); 1093 * (dev->ev_next - n));
1100 if (n == 0) 1094 }
1101 dev->state = STATE_SETUP; 1095 dev->ev_next -= n;
1102 spin_unlock_irq (&dev->lock); 1096 spin_unlock_irq (&dev->lock);
1103 } 1097 }
1104 return retval; 1098 return retval;
@@ -1113,8 +1107,8 @@ scan:
1113 DBG (dev, "fail %s, state %d\n", __FUNCTION__, state); 1107 DBG (dev, "fail %s, state %d\n", __FUNCTION__, state);
1114 retval = -ESRCH; 1108 retval = -ESRCH;
1115 break; 1109 break;
1116 case STATE_UNCONNECTED: 1110 case STATE_DEV_UNCONNECTED:
1117 case STATE_CONNECTED: 1111 case STATE_DEV_CONNECTED:
1118 spin_unlock_irq (&dev->lock); 1112 spin_unlock_irq (&dev->lock);
1119 DBG (dev, "%s wait\n", __FUNCTION__); 1113 DBG (dev, "%s wait\n", __FUNCTION__);
1120 1114
@@ -1141,7 +1135,7 @@ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1141 switch (type) { 1135 switch (type) {
1142 /* these events purge the queue */ 1136 /* these events purge the queue */
1143 case GADGETFS_DISCONNECT: 1137 case GADGETFS_DISCONNECT:
1144 if (dev->state == STATE_SETUP) 1138 if (dev->state == STATE_DEV_SETUP)
1145 dev->setup_abort = 1; 1139 dev->setup_abort = 1;
1146 // FALL THROUGH 1140 // FALL THROUGH
1147 case GADGETFS_CONNECT: 1141 case GADGETFS_CONNECT:
@@ -1153,7 +1147,7 @@ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1153 for (i = 0; i != dev->ev_next; i++) { 1147 for (i = 0; i != dev->ev_next; i++) {
1154 if (dev->event [i].type != type) 1148 if (dev->event [i].type != type)
1155 continue; 1149 continue;
1156 DBG (dev, "discard old event %d\n", type); 1150 DBG(dev, "discard old event[%d] %d\n", i, type);
1157 dev->ev_next--; 1151 dev->ev_next--;
1158 if (i == dev->ev_next) 1152 if (i == dev->ev_next)
1159 break; 1153 break;
@@ -1166,9 +1160,9 @@ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1166 default: 1160 default:
1167 BUG (); 1161 BUG ();
1168 } 1162 }
1163 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1169 event = &dev->event [dev->ev_next++]; 1164 event = &dev->event [dev->ev_next++];
1170 BUG_ON (dev->ev_next > N_EVENT); 1165 BUG_ON (dev->ev_next > N_EVENT);
1171 VDEBUG (dev, "ev %d, next %d\n", type, dev->ev_next);
1172 memset (event, 0, sizeof *event); 1166 memset (event, 0, sizeof *event);
1173 event->type = type; 1167 event->type = type;
1174 return event; 1168 return event;
@@ -1188,12 +1182,13 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1188 retval = -EIDRM; 1182 retval = -EIDRM;
1189 1183
1190 /* data and/or status stage for control request */ 1184 /* data and/or status stage for control request */
1191 } else if (dev->state == STATE_SETUP) { 1185 } else if (dev->state == STATE_DEV_SETUP) {
1192 1186
1193 /* IN DATA+STATUS caller makes len <= wLength */ 1187 /* IN DATA+STATUS caller makes len <= wLength */
1194 if (dev->setup_in) { 1188 if (dev->setup_in) {
1195 retval = setup_req (dev->gadget->ep0, dev->req, len); 1189 retval = setup_req (dev->gadget->ep0, dev->req, len);
1196 if (retval == 0) { 1190 if (retval == 0) {
1191 dev->state = STATE_DEV_CONNECTED;
1197 spin_unlock_irq (&dev->lock); 1192 spin_unlock_irq (&dev->lock);
1198 if (copy_from_user (dev->req->buf, buf, len)) 1193 if (copy_from_user (dev->req->buf, buf, len))
1199 retval = -EFAULT; 1194 retval = -EFAULT;
@@ -1219,7 +1214,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1219 VDEBUG(dev, "ep0out stall\n"); 1214 VDEBUG(dev, "ep0out stall\n");
1220 (void) usb_ep_set_halt (dev->gadget->ep0); 1215 (void) usb_ep_set_halt (dev->gadget->ep0);
1221 retval = -EL2HLT; 1216 retval = -EL2HLT;
1222 dev->state = STATE_CONNECTED; 1217 dev->state = STATE_DEV_CONNECTED;
1223 } else { 1218 } else {
1224 DBG(dev, "bogus ep0out stall!\n"); 1219 DBG(dev, "bogus ep0out stall!\n");
1225 } 1220 }
@@ -1261,7 +1256,9 @@ dev_release (struct inode *inode, struct file *fd)
1261 put_dev (dev); 1256 put_dev (dev);
1262 1257
1263 /* other endpoints were all decoupled from this device */ 1258 /* other endpoints were all decoupled from this device */
1259 spin_lock_irq(&dev->lock);
1264 dev->state = STATE_DEV_DISABLED; 1260 dev->state = STATE_DEV_DISABLED;
1261 spin_unlock_irq(&dev->lock);
1265 return 0; 1262 return 0;
1266} 1263}
1267 1264
@@ -1282,7 +1279,7 @@ ep0_poll (struct file *fd, poll_table *wait)
1282 goto out; 1279 goto out;
1283 } 1280 }
1284 1281
1285 if (dev->state == STATE_SETUP) { 1282 if (dev->state == STATE_DEV_SETUP) {
1286 if (dev->setup_in || dev->setup_can_stall) 1283 if (dev->setup_in || dev->setup_can_stall)
1287 mask = POLLOUT; 1284 mask = POLLOUT;
1288 } else { 1285 } else {
@@ -1392,52 +1389,29 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1392 1389
1393 spin_lock (&dev->lock); 1390 spin_lock (&dev->lock);
1394 dev->setup_abort = 0; 1391 dev->setup_abort = 0;
1395 if (dev->state == STATE_UNCONNECTED) { 1392 if (dev->state == STATE_DEV_UNCONNECTED) {
1396 struct usb_ep *ep;
1397 struct ep_data *data;
1398
1399 dev->state = STATE_CONNECTED;
1400 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
1401
1402#ifdef CONFIG_USB_GADGET_DUALSPEED 1393#ifdef CONFIG_USB_GADGET_DUALSPEED
1403 if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) { 1394 if (gadget->speed == USB_SPEED_HIGH && dev->hs_config == 0) {
1395 spin_unlock(&dev->lock);
1404 ERROR (dev, "no high speed config??\n"); 1396 ERROR (dev, "no high speed config??\n");
1405 return -EINVAL; 1397 return -EINVAL;
1406 } 1398 }
1407#endif /* CONFIG_USB_GADGET_DUALSPEED */ 1399#endif /* CONFIG_USB_GADGET_DUALSPEED */
1408 1400
1401 dev->state = STATE_DEV_CONNECTED;
1402 dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
1403
1409 INFO (dev, "connected\n"); 1404 INFO (dev, "connected\n");
1410 event = next_event (dev, GADGETFS_CONNECT); 1405 event = next_event (dev, GADGETFS_CONNECT);
1411 event->u.speed = gadget->speed; 1406 event->u.speed = gadget->speed;
1412 ep0_readable (dev); 1407 ep0_readable (dev);
1413 1408
1414 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
1415 data = ep->driver_data;
1416 /* ... down_trylock (&data->lock) ... */
1417 if (data->state != STATE_EP_DEFER_ENABLE)
1418 continue;
1419#ifdef CONFIG_USB_GADGET_DUALSPEED
1420 if (gadget->speed == USB_SPEED_HIGH)
1421 value = usb_ep_enable (ep, &data->hs_desc);
1422 else
1423#endif /* CONFIG_USB_GADGET_DUALSPEED */
1424 value = usb_ep_enable (ep, &data->desc);
1425 if (value) {
1426 ERROR (dev, "deferred %s enable --> %d\n",
1427 data->name, value);
1428 continue;
1429 }
1430 data->state = STATE_EP_ENABLED;
1431 wake_up (&data->wait);
1432 DBG (dev, "woke up %s waiters\n", data->name);
1433 }
1434
1435 /* host may have given up waiting for response. we can miss control 1409 /* host may have given up waiting for response. we can miss control
1436 * requests handled lower down (device/endpoint status and features); 1410 * requests handled lower down (device/endpoint status and features);
1437 * then ep0_{read,write} will report the wrong status. controller 1411 * then ep0_{read,write} will report the wrong status. controller
1438 * driver will have aborted pending i/o. 1412 * driver will have aborted pending i/o.
1439 */ 1413 */
1440 } else if (dev->state == STATE_SETUP) 1414 } else if (dev->state == STATE_DEV_SETUP)
1441 dev->setup_abort = 1; 1415 dev->setup_abort = 1;
1442 1416
1443 req->buf = dev->rbuf; 1417 req->buf = dev->rbuf;
@@ -1583,7 +1557,7 @@ delegate:
1583 } 1557 }
1584 1558
1585 /* proceed with data transfer and status phases? */ 1559 /* proceed with data transfer and status phases? */
1586 if (value >= 0 && dev->state != STATE_SETUP) { 1560 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1587 req->length = value; 1561 req->length = value;
1588 req->zero = value < w_length; 1562 req->zero = value < w_length;
1589 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); 1563 value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
@@ -1747,7 +1721,9 @@ gadgetfs_bind (struct usb_gadget *gadget)
1747 goto enomem; 1721 goto enomem;
1748 1722
1749 INFO (dev, "bound to %s driver\n", gadget->name); 1723 INFO (dev, "bound to %s driver\n", gadget->name);
1750 dev->state = STATE_UNCONNECTED; 1724 spin_lock_irq(&dev->lock);
1725 dev->state = STATE_DEV_UNCONNECTED;
1726 spin_unlock_irq(&dev->lock);
1751 get_dev (dev); 1727 get_dev (dev);
1752 return 0; 1728 return 0;
1753 1729
@@ -1762,11 +1738,9 @@ gadgetfs_disconnect (struct usb_gadget *gadget)
1762 struct dev_data *dev = get_gadget_data (gadget); 1738 struct dev_data *dev = get_gadget_data (gadget);
1763 1739
1764 spin_lock (&dev->lock); 1740 spin_lock (&dev->lock);
1765 if (dev->state == STATE_UNCONNECTED) { 1741 if (dev->state == STATE_DEV_UNCONNECTED)
1766 DBG (dev, "already unconnected\n");
1767 goto exit; 1742 goto exit;
1768 } 1743 dev->state = STATE_DEV_UNCONNECTED;
1769 dev->state = STATE_UNCONNECTED;
1770 1744
1771 INFO (dev, "disconnected\n"); 1745 INFO (dev, "disconnected\n");
1772 next_event (dev, GADGETFS_DISCONNECT); 1746 next_event (dev, GADGETFS_DISCONNECT);
@@ -1783,9 +1757,9 @@ gadgetfs_suspend (struct usb_gadget *gadget)
1783 INFO (dev, "suspended from state %d\n", dev->state); 1757 INFO (dev, "suspended from state %d\n", dev->state);
1784 spin_lock (&dev->lock); 1758 spin_lock (&dev->lock);
1785 switch (dev->state) { 1759 switch (dev->state) {
1786 case STATE_SETUP: // VERY odd... host died?? 1760 case STATE_DEV_SETUP: // VERY odd... host died??
1787 case STATE_CONNECTED: 1761 case STATE_DEV_CONNECTED:
1788 case STATE_UNCONNECTED: 1762 case STATE_DEV_UNCONNECTED:
1789 next_event (dev, GADGETFS_SUSPEND); 1763 next_event (dev, GADGETFS_SUSPEND);
1790 ep0_readable (dev); 1764 ep0_readable (dev);
1791 /* FALLTHROUGH */ 1765 /* FALLTHROUGH */
@@ -1808,7 +1782,7 @@ static struct usb_gadget_driver gadgetfs_driver = {
1808 .disconnect = gadgetfs_disconnect, 1782 .disconnect = gadgetfs_disconnect,
1809 .suspend = gadgetfs_suspend, 1783 .suspend = gadgetfs_suspend,
1810 1784
1811 .driver = { 1785 .driver = {
1812 .name = (char *) shortname, 1786 .name = (char *) shortname,
1813 }, 1787 },
1814}; 1788};
@@ -1829,7 +1803,7 @@ static struct usb_gadget_driver probe_driver = {
1829 .unbind = gadgetfs_nop, 1803 .unbind = gadgetfs_nop,
1830 .setup = (void *)gadgetfs_nop, 1804 .setup = (void *)gadgetfs_nop,
1831 .disconnect = gadgetfs_nop, 1805 .disconnect = gadgetfs_nop,
1832 .driver = { 1806 .driver = {
1833 .name = "nop", 1807 .name = "nop",
1834 }, 1808 },
1835}; 1809};
@@ -1849,19 +1823,16 @@ static struct usb_gadget_driver probe_driver = {
1849 * . full/low speed config ... all wTotalLength bytes (with interface, 1823 * . full/low speed config ... all wTotalLength bytes (with interface,
1850 * class, altsetting, endpoint, and other descriptors) 1824 * class, altsetting, endpoint, and other descriptors)
1851 * . high speed config ... all descriptors, for high speed operation; 1825 * . high speed config ... all descriptors, for high speed operation;
1852 * this one's optional except for high-speed hardware 1826 * this one's optional except for high-speed hardware
1853 * . device descriptor 1827 * . device descriptor
1854 * 1828 *
1855 * Endpoints are not yet enabled. Drivers may want to immediately 1829 * Endpoints are not yet enabled. Drivers must wait until device
1856 * initialize them, using the /dev/gadget/ep* files that are available 1830 * configuration and interface altsetting changes create
1857 * as soon as the kernel sees the configuration, or they can wait
1858 * until device configuration and interface altsetting changes create
1859 * the need to configure (or unconfigure) them. 1831 * the need to configure (or unconfigure) them.
1860 * 1832 *
1861 * After initialization, the device stays active for as long as that 1833 * After initialization, the device stays active for as long as that
1862 * $CHIP file is open. Events may then be read from that descriptor, 1834 * $CHIP file is open. Events must then be read from that descriptor,
1863 * such as configuration notifications. More complex drivers will handle 1835 * such as configuration notifications.
1864 * some control requests in user space.
1865 */ 1836 */
1866 1837
1867static int is_valid_config (struct usb_config_descriptor *config) 1838static int is_valid_config (struct usb_config_descriptor *config)
@@ -1884,9 +1855,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1884 u32 tag; 1855 u32 tag;
1885 char *kbuf; 1856 char *kbuf;
1886 1857
1887 if (dev->state != STATE_OPENED)
1888 return -EEXIST;
1889
1890 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) 1858 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
1891 return -EINVAL; 1859 return -EINVAL;
1892 1860
@@ -1978,13 +1946,15 @@ dev_open (struct inode *inode, struct file *fd)
1978 struct dev_data *dev = inode->i_private; 1946 struct dev_data *dev = inode->i_private;
1979 int value = -EBUSY; 1947 int value = -EBUSY;
1980 1948
1949 spin_lock_irq(&dev->lock);
1981 if (dev->state == STATE_DEV_DISABLED) { 1950 if (dev->state == STATE_DEV_DISABLED) {
1982 dev->ev_next = 0; 1951 dev->ev_next = 0;
1983 dev->state = STATE_OPENED; 1952 dev->state = STATE_DEV_OPENED;
1984 fd->private_data = dev; 1953 fd->private_data = dev;
1985 get_dev (dev); 1954 get_dev (dev);
1986 value = 0; 1955 value = 0;
1987 } 1956 }
1957 spin_unlock_irq(&dev->lock);
1988 return value; 1958 return value;
1989} 1959}
1990 1960
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
index e3bb78524c88..b3fe197e1eeb 100644
--- a/drivers/usb/gadget/lh7a40x_udc.h
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -49,7 +49,7 @@
49#include <asm/unaligned.h> 49#include <asm/unaligned.h>
50#include <asm/hardware.h> 50#include <asm/hardware.h>
51 51
52#include <linux/usb_ch9.h> 52#include <linux/usb/ch9.h>
53#include <linux/usb_gadget.h> 53#include <linux/usb_gadget.h>
54 54
55/* 55/*
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 569eb8ccf232..7617ff7bd5ac 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -63,7 +63,7 @@
63#include <linux/interrupt.h> 63#include <linux/interrupt.h>
64#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
65#include <linux/device.h> 65#include <linux/device.h>
66#include <linux/usb_ch9.h> 66#include <linux/usb/ch9.h>
67#include <linux/usb_gadget.h> 67#include <linux/usb_gadget.h>
68 68
69#include <asm/byteorder.h> 69#include <asm/byteorder.h>
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index cdcfd42843d4..140104341db4 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -38,7 +38,7 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/platform_device.h> 40#include <linux/platform_device.h>
41#include <linux/usb_ch9.h> 41#include <linux/usb/ch9.h>
42#include <linux/usb_gadget.h> 42#include <linux/usb_gadget.h>
43#include <linux/usb/otg.h> 43#include <linux/usb/otg.h>
44#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index b78de9694665..0d225369847d 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -56,7 +56,7 @@
56#include <asm/arch/pxa-regs.h> 56#include <asm/arch/pxa-regs.h>
57#endif 57#endif
58 58
59#include <linux/usb_ch9.h> 59#include <linux/usb/ch9.h>
60#include <linux/usb_gadget.h> 60#include <linux/usb_gadget.h>
61 61
62#include <asm/arch/udc.h> 62#include <asm/arch/udc.h>
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index f8a3ec64635d..6c742a909225 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -43,7 +43,7 @@
43#include <asm/unaligned.h> 43#include <asm/unaligned.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45 45
46#include <linux/usb_ch9.h> 46#include <linux/usb/ch9.h>
47#include <linux/usb/cdc.h> 47#include <linux/usb/cdc.h>
48#include <linux/usb_gadget.h> 48#include <linux/usb_gadget.h>
49 49
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index b1735767660b..3459ea6c6c0b 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -14,7 +14,7 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/init.h> 15#include <linux/init.h>
16 16
17#include <linux/usb_ch9.h> 17#include <linux/usb/ch9.h>
18#include <linux/usb_gadget.h> 18#include <linux/usb_gadget.h>
19 19
20#include <asm/unaligned.h> 20#include <asm/unaligned.h>
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 40710ea1b490..ebe04e0d2879 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -84,7 +84,7 @@
84#include <asm/system.h> 84#include <asm/system.h>
85#include <asm/unaligned.h> 85#include <asm/unaligned.h>
86 86
87#include <linux/usb_ch9.h> 87#include <linux/usb/ch9.h>
88#include <linux/usb_gadget.h> 88#include <linux/usb_gadget.h>
89 89
90#include "gadget_chips.h" 90#include "gadget_chips.h"
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index cc60759083bf..62711870f8ee 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -67,6 +67,11 @@ config USB_EHCI_TT_NEWSCHED
67 67
68 If unsure, say N. 68 If unsure, say N.
69 69
70config USB_EHCI_BIG_ENDIAN_MMIO
71 bool
72 depends on USB_EHCI_HCD
73 default n
74
70config USB_ISP116X_HCD 75config USB_ISP116X_HCD
71 tristate "ISP116X HCD support" 76 tristate "ISP116X HCD support"
72 depends on USB 77 depends on USB
@@ -101,21 +106,48 @@ config USB_OHCI_HCD_PPC_SOC
101 bool "OHCI support for on-chip PPC USB controller" 106 bool "OHCI support for on-chip PPC USB controller"
102 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx) 107 depends on USB_OHCI_HCD && (STB03xxx || PPC_MPC52xx)
103 default y 108 default y
104 select USB_OHCI_BIG_ENDIAN 109 select USB_OHCI_BIG_ENDIAN_DESC
110 select USB_OHCI_BIG_ENDIAN_MMIO
105 ---help--- 111 ---help---
106 Enables support for the USB controller on the MPC52xx or 112 Enables support for the USB controller on the MPC52xx or
107 STB03xxx processor chip. If unsure, say Y. 113 STB03xxx processor chip. If unsure, say Y.
108 114
115config USB_OHCI_HCD_PPC_OF
116 bool "OHCI support for PPC USB controller on OF platform bus"
117 depends on USB_OHCI_HCD && PPC_OF
118 default y
119 ---help---
120 Enables support for the USB controller PowerPC present on the
121 OpenFirmware platform bus.
122
123config USB_OHCI_HCD_PPC_OF_BE
124 bool "Support big endian HC"
125 depends on USB_OHCI_HCD_PPC_OF
126 default y
127 select USB_OHCI_BIG_ENDIAN_DESC
128 select USB_OHCI_BIG_ENDIAN_MMIO
129
130config USB_OHCI_HCD_PPC_OF_LE
131 bool "Support little endian HC"
132 depends on USB_OHCI_HCD_PPC_OF
133 default n
134 select USB_OHCI_LITTLE_ENDIAN
135
109config USB_OHCI_HCD_PCI 136config USB_OHCI_HCD_PCI
110 bool "OHCI support for PCI-bus USB controllers" 137 bool "OHCI support for PCI-bus USB controllers"
111 depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx) 138 depends on USB_OHCI_HCD && PCI && (STB03xxx || PPC_MPC52xx || USB_OHCI_HCD_PPC_OF)
112 default y 139 default y
113 select USB_OHCI_LITTLE_ENDIAN 140 select USB_OHCI_LITTLE_ENDIAN
114 ---help--- 141 ---help---
115 Enables support for PCI-bus plug-in USB controller cards. 142 Enables support for PCI-bus plug-in USB controller cards.
116 If unsure, say Y. 143 If unsure, say Y.
117 144
118config USB_OHCI_BIG_ENDIAN 145config USB_OHCI_BIG_ENDIAN_DESC
146 bool
147 depends on USB_OHCI_HCD
148 default n
149
150config USB_OHCI_BIG_ENDIAN_MMIO
119 bool 151 bool
120 depends on USB_OHCI_HCD 152 depends on USB_OHCI_HCD
121 default n 153 default n
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 56349d21e6ea..246afea9e83b 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -43,7 +43,7 @@
43 */ 43 */
44static void dbg_hcs_params (struct ehci_hcd *ehci, char *label) 44static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
45{ 45{
46 u32 params = readl (&ehci->caps->hcs_params); 46 u32 params = ehci_readl(ehci, &ehci->caps->hcs_params);
47 47
48 ehci_dbg (ehci, 48 ehci_dbg (ehci,
49 "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n", 49 "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
@@ -87,7 +87,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
87 * */ 87 * */
88static void dbg_hcc_params (struct ehci_hcd *ehci, char *label) 88static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
89{ 89{
90 u32 params = readl (&ehci->caps->hcc_params); 90 u32 params = ehci_readl(ehci, &ehci->caps->hcc_params);
91 91
92 if (HCC_ISOC_CACHE (params)) { 92 if (HCC_ISOC_CACHE (params)) {
93 ehci_dbg (ehci, 93 ehci_dbg (ehci,
@@ -653,7 +653,7 @@ show_registers (struct class_device *class_dev, char *buf)
653 } 653 }
654 654
655 /* Capability Registers */ 655 /* Capability Registers */
656 i = HC_VERSION(readl (&ehci->caps->hc_capbase)); 656 i = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
657 temp = scnprintf (next, size, 657 temp = scnprintf (next, size,
658 "bus %s, device %s (driver " DRIVER_VERSION ")\n" 658 "bus %s, device %s (driver " DRIVER_VERSION ")\n"
659 "%s\n" 659 "%s\n"
@@ -673,7 +673,7 @@ show_registers (struct class_device *class_dev, char *buf)
673 unsigned count = 256/4; 673 unsigned count = 256/4;
674 674
675 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); 675 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
676 offset = HCC_EXT_CAPS (readl (&ehci->caps->hcc_params)); 676 offset = HCC_EXT_CAPS (ehci_readl(ehci, &ehci->caps->hcc_params));
677 while (offset && count--) { 677 while (offset && count--) {
678 pci_read_config_dword (pdev, offset, &cap); 678 pci_read_config_dword (pdev, offset, &cap);
679 switch (cap & 0xff) { 679 switch (cap & 0xff) {
@@ -704,50 +704,50 @@ show_registers (struct class_device *class_dev, char *buf)
704#endif 704#endif
705 705
706 // FIXME interpret both types of params 706 // FIXME interpret both types of params
707 i = readl (&ehci->caps->hcs_params); 707 i = ehci_readl(ehci, &ehci->caps->hcs_params);
708 temp = scnprintf (next, size, "structural params 0x%08x\n", i); 708 temp = scnprintf (next, size, "structural params 0x%08x\n", i);
709 size -= temp; 709 size -= temp;
710 next += temp; 710 next += temp;
711 711
712 i = readl (&ehci->caps->hcc_params); 712 i = ehci_readl(ehci, &ehci->caps->hcc_params);
713 temp = scnprintf (next, size, "capability params 0x%08x\n", i); 713 temp = scnprintf (next, size, "capability params 0x%08x\n", i);
714 size -= temp; 714 size -= temp;
715 next += temp; 715 next += temp;
716 716
717 /* Operational Registers */ 717 /* Operational Registers */
718 temp = dbg_status_buf (scratch, sizeof scratch, label, 718 temp = dbg_status_buf (scratch, sizeof scratch, label,
719 readl (&ehci->regs->status)); 719 ehci_readl(ehci, &ehci->regs->status));
720 temp = scnprintf (next, size, fmt, temp, scratch); 720 temp = scnprintf (next, size, fmt, temp, scratch);
721 size -= temp; 721 size -= temp;
722 next += temp; 722 next += temp;
723 723
724 temp = dbg_command_buf (scratch, sizeof scratch, label, 724 temp = dbg_command_buf (scratch, sizeof scratch, label,
725 readl (&ehci->regs->command)); 725 ehci_readl(ehci, &ehci->regs->command));
726 temp = scnprintf (next, size, fmt, temp, scratch); 726 temp = scnprintf (next, size, fmt, temp, scratch);
727 size -= temp; 727 size -= temp;
728 next += temp; 728 next += temp;
729 729
730 temp = dbg_intr_buf (scratch, sizeof scratch, label, 730 temp = dbg_intr_buf (scratch, sizeof scratch, label,
731 readl (&ehci->regs->intr_enable)); 731 ehci_readl(ehci, &ehci->regs->intr_enable));
732 temp = scnprintf (next, size, fmt, temp, scratch); 732 temp = scnprintf (next, size, fmt, temp, scratch);
733 size -= temp; 733 size -= temp;
734 next += temp; 734 next += temp;
735 735
736 temp = scnprintf (next, size, "uframe %04x\n", 736 temp = scnprintf (next, size, "uframe %04x\n",
737 readl (&ehci->regs->frame_index)); 737 ehci_readl(ehci, &ehci->regs->frame_index));
738 size -= temp; 738 size -= temp;
739 next += temp; 739 next += temp;
740 740
741 for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) { 741 for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) {
742 temp = dbg_port_buf (scratch, sizeof scratch, label, i, 742 temp = dbg_port_buf (scratch, sizeof scratch, label, i,
743 readl (&ehci->regs->port_status [i - 1])); 743 ehci_readl(ehci, &ehci->regs->port_status [i - 1]));
744 temp = scnprintf (next, size, fmt, temp, scratch); 744 temp = scnprintf (next, size, fmt, temp, scratch);
745 size -= temp; 745 size -= temp;
746 next += temp; 746 next += temp;
747 if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) { 747 if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
748 temp = scnprintf (next, size, 748 temp = scnprintf (next, size,
749 " debug control %08x\n", 749 " debug control %08x\n",
750 readl (&ehci->debug->control)); 750 ehci_readl(ehci, &ehci->debug->control));
751 size -= temp; 751 size -= temp;
752 next += temp; 752 next += temp;
753 } 753 }
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 1a915e982c1c..a52480505f78 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -177,7 +177,7 @@ static void mpc83xx_setup_phy(struct ehci_hcd *ehci,
177 case FSL_USB2_PHY_NONE: 177 case FSL_USB2_PHY_NONE:
178 break; 178 break;
179 } 179 }
180 writel(portsc, &ehci->regs->port_status[port_offset]); 180 ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
181} 181}
182 182
183static void mpc83xx_usb_setup(struct usb_hcd *hcd) 183static void mpc83xx_usb_setup(struct usb_hcd *hcd)
@@ -214,7 +214,7 @@ static void mpc83xx_usb_setup(struct usb_hcd *hcd)
214 } 214 }
215 215
216 /* put controller in host mode. */ 216 /* put controller in host mode. */
217 writel(0x00000003, non_ehci + FSL_SOC_USB_USBMODE); 217 ehci_writel(ehci, 0x00000003, non_ehci + FSL_SOC_USB_USBMODE);
218 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c); 218 out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
219 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040); 219 out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
220 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001); 220 out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
@@ -238,12 +238,12 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
238 /* EHCI registers start at offset 0x100 */ 238 /* EHCI registers start at offset 0x100 */
239 ehci->caps = hcd->regs + 0x100; 239 ehci->caps = hcd->regs + 0x100;
240 ehci->regs = hcd->regs + 0x100 + 240 ehci->regs = hcd->regs + 0x100 +
241 HC_LENGTH(readl(&ehci->caps->hc_capbase)); 241 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
242 dbg_hcs_params(ehci, "reset"); 242 dbg_hcs_params(ehci, "reset");
243 dbg_hcc_params(ehci, "reset"); 243 dbg_hcc_params(ehci, "reset");
244 244
245 /* cache this readonly data; minimize chip reads */ 245 /* cache this readonly data; minimize chip reads */
246 ehci->hcs_params = readl(&ehci->caps->hcs_params); 246 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
247 247
248 retval = ehci_halt(ehci); 248 retval = ehci_halt(ehci);
249 if (retval) 249 if (retval)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 025d33313681..185721dba42b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -157,12 +157,13 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
157 * before driver shutdown. But it also seems to be caused by bugs in cardbus 157 * before driver shutdown. But it also seems to be caused by bugs in cardbus
158 * bridge shutdown: shutting down the bridge before the devices using it. 158 * bridge shutdown: shutting down the bridge before the devices using it.
159 */ 159 */
160static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec) 160static int handshake (struct ehci_hcd *ehci, void __iomem *ptr,
161 u32 mask, u32 done, int usec)
161{ 162{
162 u32 result; 163 u32 result;
163 164
164 do { 165 do {
165 result = readl (ptr); 166 result = ehci_readl(ehci, ptr);
166 if (result == ~(u32)0) /* card removed */ 167 if (result == ~(u32)0) /* card removed */
167 return -ENODEV; 168 return -ENODEV;
168 result &= mask; 169 result &= mask;
@@ -177,18 +178,19 @@ static int handshake (void __iomem *ptr, u32 mask, u32 done, int usec)
177/* force HC to halt state from unknown (EHCI spec section 2.3) */ 178/* force HC to halt state from unknown (EHCI spec section 2.3) */
178static int ehci_halt (struct ehci_hcd *ehci) 179static int ehci_halt (struct ehci_hcd *ehci)
179{ 180{
180 u32 temp = readl (&ehci->regs->status); 181 u32 temp = ehci_readl(ehci, &ehci->regs->status);
181 182
182 /* disable any irqs left enabled by previous code */ 183 /* disable any irqs left enabled by previous code */
183 writel (0, &ehci->regs->intr_enable); 184 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
184 185
185 if ((temp & STS_HALT) != 0) 186 if ((temp & STS_HALT) != 0)
186 return 0; 187 return 0;
187 188
188 temp = readl (&ehci->regs->command); 189 temp = ehci_readl(ehci, &ehci->regs->command);
189 temp &= ~CMD_RUN; 190 temp &= ~CMD_RUN;
190 writel (temp, &ehci->regs->command); 191 ehci_writel(ehci, temp, &ehci->regs->command);
191 return handshake (&ehci->regs->status, STS_HALT, STS_HALT, 16 * 125); 192 return handshake (ehci, &ehci->regs->status,
193 STS_HALT, STS_HALT, 16 * 125);
192} 194}
193 195
194/* put TDI/ARC silicon into EHCI mode */ 196/* put TDI/ARC silicon into EHCI mode */
@@ -198,23 +200,24 @@ static void tdi_reset (struct ehci_hcd *ehci)
198 u32 tmp; 200 u32 tmp;
199 201
200 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68); 202 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + 0x68);
201 tmp = readl (reg_ptr); 203 tmp = ehci_readl(ehci, reg_ptr);
202 tmp |= 0x3; 204 tmp |= 0x3;
203 writel (tmp, reg_ptr); 205 ehci_writel(ehci, tmp, reg_ptr);
204} 206}
205 207
206/* reset a non-running (STS_HALT == 1) controller */ 208/* reset a non-running (STS_HALT == 1) controller */
207static int ehci_reset (struct ehci_hcd *ehci) 209static int ehci_reset (struct ehci_hcd *ehci)
208{ 210{
209 int retval; 211 int retval;
210 u32 command = readl (&ehci->regs->command); 212 u32 command = ehci_readl(ehci, &ehci->regs->command);
211 213
212 command |= CMD_RESET; 214 command |= CMD_RESET;
213 dbg_cmd (ehci, "reset", command); 215 dbg_cmd (ehci, "reset", command);
214 writel (command, &ehci->regs->command); 216 ehci_writel(ehci, command, &ehci->regs->command);
215 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 217 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
216 ehci->next_statechange = jiffies; 218 ehci->next_statechange = jiffies;
217 retval = handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000); 219 retval = handshake (ehci, &ehci->regs->command,
220 CMD_RESET, 0, 250 * 1000);
218 221
219 if (retval) 222 if (retval)
220 return retval; 223 return retval;
@@ -236,21 +239,21 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
236#endif 239#endif
237 240
238 /* wait for any schedule enables/disables to take effect */ 241 /* wait for any schedule enables/disables to take effect */
239 temp = readl (&ehci->regs->command) << 10; 242 temp = ehci_readl(ehci, &ehci->regs->command) << 10;
240 temp &= STS_ASS | STS_PSS; 243 temp &= STS_ASS | STS_PSS;
241 if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, 244 if (handshake (ehci, &ehci->regs->status, STS_ASS | STS_PSS,
242 temp, 16 * 125) != 0) { 245 temp, 16 * 125) != 0) {
243 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 246 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
244 return; 247 return;
245 } 248 }
246 249
247 /* then disable anything that's still active */ 250 /* then disable anything that's still active */
248 temp = readl (&ehci->regs->command); 251 temp = ehci_readl(ehci, &ehci->regs->command);
249 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); 252 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
250 writel (temp, &ehci->regs->command); 253 ehci_writel(ehci, temp, &ehci->regs->command);
251 254
252 /* hardware can take 16 microframes to turn off ... */ 255 /* hardware can take 16 microframes to turn off ... */
253 if (handshake (&ehci->regs->status, STS_ASS | STS_PSS, 256 if (handshake (ehci, &ehci->regs->status, STS_ASS | STS_PSS,
254 0, 16 * 125) != 0) { 257 0, 16 * 125) != 0) {
255 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 258 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
256 return; 259 return;
@@ -277,11 +280,11 @@ static void ehci_watchdog (unsigned long param)
277 280
278 /* lost IAA irqs wedge things badly; seen with a vt8235 */ 281 /* lost IAA irqs wedge things badly; seen with a vt8235 */
279 if (ehci->reclaim) { 282 if (ehci->reclaim) {
280 u32 status = readl (&ehci->regs->status); 283 u32 status = ehci_readl(ehci, &ehci->regs->status);
281 if (status & STS_IAA) { 284 if (status & STS_IAA) {
282 ehci_vdbg (ehci, "lost IAA\n"); 285 ehci_vdbg (ehci, "lost IAA\n");
283 COUNT (ehci->stats.lost_iaa); 286 COUNT (ehci->stats.lost_iaa);
284 writel (STS_IAA, &ehci->regs->status); 287 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
285 ehci->reclaim_ready = 1; 288 ehci->reclaim_ready = 1;
286 } 289 }
287 } 290 }
@@ -309,7 +312,7 @@ ehci_shutdown (struct usb_hcd *hcd)
309 (void) ehci_halt (ehci); 312 (void) ehci_halt (ehci);
310 313
311 /* make BIOS/etc use companion controller during reboot */ 314 /* make BIOS/etc use companion controller during reboot */
312 writel (0, &ehci->regs->configured_flag); 315 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
313} 316}
314 317
315static void ehci_port_power (struct ehci_hcd *ehci, int is_on) 318static void ehci_port_power (struct ehci_hcd *ehci, int is_on)
@@ -379,12 +382,13 @@ static void ehci_stop (struct usb_hcd *hcd)
379 ehci_quiesce (ehci); 382 ehci_quiesce (ehci);
380 383
381 ehci_reset (ehci); 384 ehci_reset (ehci);
382 writel (0, &ehci->regs->intr_enable); 385 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
383 spin_unlock_irq(&ehci->lock); 386 spin_unlock_irq(&ehci->lock);
384 387
385 /* let companion controllers work when we aren't */ 388 /* let companion controllers work when we aren't */
386 writel (0, &ehci->regs->configured_flag); 389 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
387 390
391 remove_companion_file(ehci);
388 remove_debug_files (ehci); 392 remove_debug_files (ehci);
389 393
390 /* root hub is shut down separately (first, when possible) */ 394 /* root hub is shut down separately (first, when possible) */
@@ -402,7 +406,8 @@ static void ehci_stop (struct usb_hcd *hcd)
402 ehci->stats.complete, ehci->stats.unlink); 406 ehci->stats.complete, ehci->stats.unlink);
403#endif 407#endif
404 408
405 dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status)); 409 dbg_status (ehci, "ehci_stop completed",
410 ehci_readl(ehci, &ehci->regs->status));
406} 411}
407 412
408/* one-time init, only for memory state */ 413/* one-time init, only for memory state */
@@ -428,7 +433,7 @@ static int ehci_init(struct usb_hcd *hcd)
428 return retval; 433 return retval;
429 434
430 /* controllers may cache some of the periodic schedule ... */ 435 /* controllers may cache some of the periodic schedule ... */
431 hcc_params = readl(&ehci->caps->hcc_params); 436 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
432 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 437 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
433 ehci->i_thresh = 8; 438 ehci->i_thresh = 8;
434 else // N microframes cached 439 else // N microframes cached
@@ -496,13 +501,16 @@ static int ehci_run (struct usb_hcd *hcd)
496 u32 temp; 501 u32 temp;
497 u32 hcc_params; 502 u32 hcc_params;
498 503
504 hcd->uses_new_polling = 1;
505 hcd->poll_rh = 0;
506
499 /* EHCI spec section 4.1 */ 507 /* EHCI spec section 4.1 */
500 if ((retval = ehci_reset(ehci)) != 0) { 508 if ((retval = ehci_reset(ehci)) != 0) {
501 ehci_mem_cleanup(ehci); 509 ehci_mem_cleanup(ehci);
502 return retval; 510 return retval;
503 } 511 }
504 writel(ehci->periodic_dma, &ehci->regs->frame_list); 512 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
505 writel((u32)ehci->async->qh_dma, &ehci->regs->async_next); 513 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
506 514
507 /* 515 /*
508 * hcc_params controls whether ehci->regs->segment must (!!!) 516 * hcc_params controls whether ehci->regs->segment must (!!!)
@@ -516,9 +524,9 @@ static int ehci_run (struct usb_hcd *hcd)
516 * Scsi_Host.highmem_io, and so forth. It's readonly to all 524 * Scsi_Host.highmem_io, and so forth. It's readonly to all
517 * host side drivers though. 525 * host side drivers though.
518 */ 526 */
519 hcc_params = readl(&ehci->caps->hcc_params); 527 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
520 if (HCC_64BIT_ADDR(hcc_params)) { 528 if (HCC_64BIT_ADDR(hcc_params)) {
521 writel(0, &ehci->regs->segment); 529 ehci_writel(ehci, 0, &ehci->regs->segment);
522#if 0 530#if 0
523// this is deeply broken on almost all architectures 531// this is deeply broken on almost all architectures
524 if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK)) 532 if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
@@ -531,7 +539,7 @@ static int ehci_run (struct usb_hcd *hcd)
531 // root hub will detect new devices (why?); NEC doesn't 539 // root hub will detect new devices (why?); NEC doesn't
532 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); 540 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
533 ehci->command |= CMD_RUN; 541 ehci->command |= CMD_RUN;
534 writel (ehci->command, &ehci->regs->command); 542 ehci_writel(ehci, ehci->command, &ehci->regs->command);
535 dbg_cmd (ehci, "init", ehci->command); 543 dbg_cmd (ehci, "init", ehci->command);
536 544
537 /* 545 /*
@@ -541,23 +549,25 @@ static int ehci_run (struct usb_hcd *hcd)
541 * and there's no companion controller unless maybe for USB OTG.) 549 * and there's no companion controller unless maybe for USB OTG.)
542 */ 550 */
543 hcd->state = HC_STATE_RUNNING; 551 hcd->state = HC_STATE_RUNNING;
544 writel (FLAG_CF, &ehci->regs->configured_flag); 552 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
545 readl (&ehci->regs->command); /* unblock posted writes */ 553 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
546 554
547 temp = HC_VERSION(readl (&ehci->caps->hc_capbase)); 555 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
548 ehci_info (ehci, 556 ehci_info (ehci,
549 "USB %x.%x started, EHCI %x.%02x, driver %s%s\n", 557 "USB %x.%x started, EHCI %x.%02x, driver %s%s\n",
550 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), 558 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
551 temp >> 8, temp & 0xff, DRIVER_VERSION, 559 temp >> 8, temp & 0xff, DRIVER_VERSION,
552 ignore_oc ? ", overcurrent ignored" : ""); 560 ignore_oc ? ", overcurrent ignored" : "");
553 561
554 writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */ 562 ehci_writel(ehci, INTR_MASK,
563 &ehci->regs->intr_enable); /* Turn On Interrupts */
555 564
556 /* GRR this is run-once init(), being done every time the HC starts. 565 /* GRR this is run-once init(), being done every time the HC starts.
557 * So long as they're part of class devices, we can't do it init() 566 * So long as they're part of class devices, we can't do it init()
558 * since the class device isn't created that early. 567 * since the class device isn't created that early.
559 */ 568 */
560 create_debug_files(ehci); 569 create_debug_files(ehci);
570 create_companion_file(ehci);
561 571
562 return 0; 572 return 0;
563} 573}
@@ -567,12 +577,12 @@ static int ehci_run (struct usb_hcd *hcd)
567static irqreturn_t ehci_irq (struct usb_hcd *hcd) 577static irqreturn_t ehci_irq (struct usb_hcd *hcd)
568{ 578{
569 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 579 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
570 u32 status; 580 u32 status, pcd_status = 0;
571 int bh; 581 int bh;
572 582
573 spin_lock (&ehci->lock); 583 spin_lock (&ehci->lock);
574 584
575 status = readl (&ehci->regs->status); 585 status = ehci_readl(ehci, &ehci->regs->status);
576 586
577 /* e.g. cardbus physical eject */ 587 /* e.g. cardbus physical eject */
578 if (status == ~(u32) 0) { 588 if (status == ~(u32) 0) {
@@ -587,8 +597,8 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
587 } 597 }
588 598
589 /* clear (just) interrupts */ 599 /* clear (just) interrupts */
590 writel (status, &ehci->regs->status); 600 ehci_writel(ehci, status, &ehci->regs->status);
591 readl (&ehci->regs->command); /* unblock posted write */ 601 ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
592 bh = 0; 602 bh = 0;
593 603
594#ifdef EHCI_VERBOSE_DEBUG 604#ifdef EHCI_VERBOSE_DEBUG
@@ -617,13 +627,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
617 /* remote wakeup [4.3.1] */ 627 /* remote wakeup [4.3.1] */
618 if (status & STS_PCD) { 628 if (status & STS_PCD) {
619 unsigned i = HCS_N_PORTS (ehci->hcs_params); 629 unsigned i = HCS_N_PORTS (ehci->hcs_params);
630 pcd_status = status;
620 631
621 /* resume root hub? */ 632 /* resume root hub? */
622 if (!(readl(&ehci->regs->command) & CMD_RUN)) 633 if (!(ehci_readl(ehci, &ehci->regs->command) & CMD_RUN))
623 usb_hcd_resume_root_hub(hcd); 634 usb_hcd_resume_root_hub(hcd);
624 635
625 while (i--) { 636 while (i--) {
626 int pstatus = readl (&ehci->regs->port_status [i]); 637 int pstatus = ehci_readl(ehci,
638 &ehci->regs->port_status [i]);
627 639
628 if (pstatus & PORT_OWNER) 640 if (pstatus & PORT_OWNER)
629 continue; 641 continue;
@@ -643,14 +655,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
643 /* PCI errors [4.15.2.4] */ 655 /* PCI errors [4.15.2.4] */
644 if (unlikely ((status & STS_FATAL) != 0)) { 656 if (unlikely ((status & STS_FATAL) != 0)) {
645 /* bogus "fatal" IRQs appear on some chips... why? */ 657 /* bogus "fatal" IRQs appear on some chips... why? */
646 status = readl (&ehci->regs->status); 658 status = ehci_readl(ehci, &ehci->regs->status);
647 dbg_cmd (ehci, "fatal", readl (&ehci->regs->command)); 659 dbg_cmd (ehci, "fatal", ehci_readl(ehci,
660 &ehci->regs->command));
648 dbg_status (ehci, "fatal", status); 661 dbg_status (ehci, "fatal", status);
649 if (status & STS_HALT) { 662 if (status & STS_HALT) {
650 ehci_err (ehci, "fatal error\n"); 663 ehci_err (ehci, "fatal error\n");
651dead: 664dead:
652 ehci_reset (ehci); 665 ehci_reset (ehci);
653 writel (0, &ehci->regs->configured_flag); 666 ehci_writel(ehci, 0, &ehci->regs->configured_flag);
654 /* generic layer kills/unlinks all urbs, then 667 /* generic layer kills/unlinks all urbs, then
655 * uses ehci_stop to clean up the rest 668 * uses ehci_stop to clean up the rest
656 */ 669 */
@@ -661,6 +674,8 @@ dead:
661 if (bh) 674 if (bh)
662 ehci_work (ehci); 675 ehci_work (ehci);
663 spin_unlock (&ehci->lock); 676 spin_unlock (&ehci->lock);
677 if (pcd_status & STS_PCD)
678 usb_hcd_poll_rh_status(hcd);
664 return IRQ_HANDLED; 679 return IRQ_HANDLED;
665} 680}
666 681
@@ -873,7 +888,8 @@ done:
873static int ehci_get_frame (struct usb_hcd *hcd) 888static int ehci_get_frame (struct usb_hcd *hcd)
874{ 889{
875 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 890 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
876 return (readl (&ehci->regs->frame_index) >> 3) % ehci->periodic_size; 891 return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) %
892 ehci->periodic_size;
877} 893}
878 894
879/*-------------------------------------------------------------------------*/ 895/*-------------------------------------------------------------------------*/
@@ -899,7 +915,13 @@ MODULE_LICENSE ("GPL");
899#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 915#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
900#endif 916#endif
901 917
902#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) 918#ifdef CONFIG_PPC_PS3
919#include "ehci-ps3.c"
920#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_sb_driver
921#endif
922
923#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
924 !defined(PS3_SYSTEM_BUS_DRIVER)
903#error "missing bus glue for ehci-hcd" 925#error "missing bus glue for ehci-hcd"
904#endif 926#endif
905 927
@@ -924,6 +946,20 @@ static int __init ehci_hcd_init(void)
924#ifdef PLATFORM_DRIVER 946#ifdef PLATFORM_DRIVER
925 platform_driver_unregister(&PLATFORM_DRIVER); 947 platform_driver_unregister(&PLATFORM_DRIVER);
926#endif 948#endif
949 return retval;
950 }
951#endif
952
953#ifdef PS3_SYSTEM_BUS_DRIVER
954 retval = ps3_system_bus_driver_register(&PS3_SYSTEM_BUS_DRIVER);
955 if (retval < 0) {
956#ifdef PLATFORM_DRIVER
957 platform_driver_unregister(&PLATFORM_DRIVER);
958#endif
959#ifdef PCI_DRIVER
960 pci_unregister_driver(&PCI_DRIVER);
961#endif
962 return retval;
927 } 963 }
928#endif 964#endif
929 965
@@ -939,6 +975,9 @@ static void __exit ehci_hcd_cleanup(void)
939#ifdef PCI_DRIVER 975#ifdef PCI_DRIVER
940 pci_unregister_driver(&PCI_DRIVER); 976 pci_unregister_driver(&PCI_DRIVER);
941#endif 977#endif
978#ifdef PS3_SYSTEM_BUS_DRIVER
979 ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
980#endif
942} 981}
943module_exit(ehci_hcd_cleanup); 982module_exit(ehci_hcd_cleanup);
944 983
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index bfe5f307cba6..0d83c6df1a3b 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -47,7 +47,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
47 ehci_quiesce (ehci); 47 ehci_quiesce (ehci);
48 hcd->state = HC_STATE_QUIESCING; 48 hcd->state = HC_STATE_QUIESCING;
49 } 49 }
50 ehci->command = readl (&ehci->regs->command); 50 ehci->command = ehci_readl(ehci, &ehci->regs->command);
51 if (ehci->reclaim) 51 if (ehci->reclaim)
52 ehci->reclaim_ready = 1; 52 ehci->reclaim_ready = 1;
53 ehci_work(ehci); 53 ehci_work(ehci);
@@ -60,7 +60,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
60 ehci->bus_suspended = 0; 60 ehci->bus_suspended = 0;
61 while (port--) { 61 while (port--) {
62 u32 __iomem *reg = &ehci->regs->port_status [port]; 62 u32 __iomem *reg = &ehci->regs->port_status [port];
63 u32 t1 = readl (reg) & ~PORT_RWC_BITS; 63 u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
64 u32 t2 = t1; 64 u32 t2 = t1;
65 65
66 /* keep track of which ports we suspend */ 66 /* keep track of which ports we suspend */
@@ -79,7 +79,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
79 if (t1 != t2) { 79 if (t1 != t2) {
80 ehci_vdbg (ehci, "port %d, %08x -> %08x\n", 80 ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
81 port + 1, t1, t2); 81 port + 1, t1, t2);
82 writel (t2, reg); 82 ehci_writel(ehci, t2, reg);
83 } 83 }
84 } 84 }
85 85
@@ -92,8 +92,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
92 mask = INTR_MASK; 92 mask = INTR_MASK;
93 if (!device_may_wakeup(&hcd->self.root_hub->dev)) 93 if (!device_may_wakeup(&hcd->self.root_hub->dev))
94 mask &= ~STS_PCD; 94 mask &= ~STS_PCD;
95 writel(mask, &ehci->regs->intr_enable); 95 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
96 readl(&ehci->regs->intr_enable); 96 ehci_readl(ehci, &ehci->regs->intr_enable);
97 97
98 ehci->next_statechange = jiffies + msecs_to_jiffies(10); 98 ehci->next_statechange = jiffies + msecs_to_jiffies(10);
99 spin_unlock_irq (&ehci->lock); 99 spin_unlock_irq (&ehci->lock);
@@ -118,26 +118,26 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
118 * the last user of the controller, not reset/pm hardware keeping 118 * the last user of the controller, not reset/pm hardware keeping
119 * state we gave to it. 119 * state we gave to it.
120 */ 120 */
121 temp = readl(&ehci->regs->intr_enable); 121 temp = ehci_readl(ehci, &ehci->regs->intr_enable);
122 ehci_dbg(ehci, "resume root hub%s\n", temp ? "" : " after power loss"); 122 ehci_dbg(ehci, "resume root hub%s\n", temp ? "" : " after power loss");
123 123
124 /* at least some APM implementations will try to deliver 124 /* at least some APM implementations will try to deliver
125 * IRQs right away, so delay them until we're ready. 125 * IRQs right away, so delay them until we're ready.
126 */ 126 */
127 writel(0, &ehci->regs->intr_enable); 127 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
128 128
129 /* re-init operational registers */ 129 /* re-init operational registers */
130 writel(0, &ehci->regs->segment); 130 ehci_writel(ehci, 0, &ehci->regs->segment);
131 writel(ehci->periodic_dma, &ehci->regs->frame_list); 131 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
132 writel((u32) ehci->async->qh_dma, &ehci->regs->async_next); 132 ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
133 133
134 /* restore CMD_RUN, framelist size, and irq threshold */ 134 /* restore CMD_RUN, framelist size, and irq threshold */
135 writel (ehci->command, &ehci->regs->command); 135 ehci_writel(ehci, ehci->command, &ehci->regs->command);
136 136
137 /* manually resume the ports we suspended during bus_suspend() */ 137 /* manually resume the ports we suspended during bus_suspend() */
138 i = HCS_N_PORTS (ehci->hcs_params); 138 i = HCS_N_PORTS (ehci->hcs_params);
139 while (i--) { 139 while (i--) {
140 temp = readl (&ehci->regs->port_status [i]); 140 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
141 temp &= ~(PORT_RWC_BITS 141 temp &= ~(PORT_RWC_BITS
142 | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E); 142 | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
143 if (test_bit(i, &ehci->bus_suspended) && 143 if (test_bit(i, &ehci->bus_suspended) &&
@@ -145,20 +145,20 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
145 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); 145 ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
146 temp |= PORT_RESUME; 146 temp |= PORT_RESUME;
147 } 147 }
148 writel (temp, &ehci->regs->port_status [i]); 148 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
149 } 149 }
150 i = HCS_N_PORTS (ehci->hcs_params); 150 i = HCS_N_PORTS (ehci->hcs_params);
151 mdelay (20); 151 mdelay (20);
152 while (i--) { 152 while (i--) {
153 temp = readl (&ehci->regs->port_status [i]); 153 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
154 if (test_bit(i, &ehci->bus_suspended) && 154 if (test_bit(i, &ehci->bus_suspended) &&
155 (temp & PORT_SUSPEND)) { 155 (temp & PORT_SUSPEND)) {
156 temp &= ~(PORT_RWC_BITS | PORT_RESUME); 156 temp &= ~(PORT_RWC_BITS | PORT_RESUME);
157 writel (temp, &ehci->regs->port_status [i]); 157 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
158 ehci_vdbg (ehci, "resumed port %d\n", i + 1); 158 ehci_vdbg (ehci, "resumed port %d\n", i + 1);
159 } 159 }
160 } 160 }
161 (void) readl (&ehci->regs->command); 161 (void) ehci_readl(ehci, &ehci->regs->command);
162 162
163 /* maybe re-activate the schedule(s) */ 163 /* maybe re-activate the schedule(s) */
164 temp = 0; 164 temp = 0;
@@ -168,14 +168,14 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
168 temp |= CMD_PSE; 168 temp |= CMD_PSE;
169 if (temp) { 169 if (temp) {
170 ehci->command |= temp; 170 ehci->command |= temp;
171 writel (ehci->command, &ehci->regs->command); 171 ehci_writel(ehci, ehci->command, &ehci->regs->command);
172 } 172 }
173 173
174 ehci->next_statechange = jiffies + msecs_to_jiffies(5); 174 ehci->next_statechange = jiffies + msecs_to_jiffies(5);
175 hcd->state = HC_STATE_RUNNING; 175 hcd->state = HC_STATE_RUNNING;
176 176
177 /* Now we can safely re-enable irqs */ 177 /* Now we can safely re-enable irqs */
178 writel(INTR_MASK, &ehci->regs->intr_enable); 178 ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
179 179
180 spin_unlock_irq (&ehci->lock); 180 spin_unlock_irq (&ehci->lock);
181 return 0; 181 return 0;
@@ -190,9 +190,107 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
190 190
191/*-------------------------------------------------------------------------*/ 191/*-------------------------------------------------------------------------*/
192 192
193/* Display the ports dedicated to the companion controller */
194static ssize_t show_companion(struct class_device *class_dev, char *buf)
195{
196 struct ehci_hcd *ehci;
197 int nports, index, n;
198 int count = PAGE_SIZE;
199 char *ptr = buf;
200
201 ehci = hcd_to_ehci(bus_to_hcd(class_get_devdata(class_dev)));
202 nports = HCS_N_PORTS(ehci->hcs_params);
203
204 for (index = 0; index < nports; ++index) {
205 if (test_bit(index, &ehci->companion_ports)) {
206 n = scnprintf(ptr, count, "%d\n", index + 1);
207 ptr += n;
208 count -= n;
209 }
210 }
211 return ptr - buf;
212}
213
214/*
215 * Dedicate or undedicate a port to the companion controller.
216 * Syntax is "[-]portnum", where a leading '-' sign means
217 * return control of the port to the EHCI controller.
218 */
219static ssize_t store_companion(struct class_device *class_dev,
220 const char *buf, size_t count)
221{
222 struct ehci_hcd *ehci;
223 int portnum, new_owner, try;
224 u32 __iomem *status_reg;
225 u32 port_status;
226
227 ehci = hcd_to_ehci(bus_to_hcd(class_get_devdata(class_dev)));
228 new_owner = PORT_OWNER; /* Owned by companion */
229 if (sscanf(buf, "%d", &portnum) != 1)
230 return -EINVAL;
231 if (portnum < 0) {
232 portnum = - portnum;
233 new_owner = 0; /* Owned by EHCI */
234 }
235 if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
236 return -ENOENT;
237 status_reg = &ehci->regs->port_status[--portnum];
238 if (new_owner)
239 set_bit(portnum, &ehci->companion_ports);
240 else
241 clear_bit(portnum, &ehci->companion_ports);
242
243 /*
244 * The controller won't set the OWNER bit if the port is
245 * enabled, so this loop will sometimes require at least two
246 * iterations: one to disable the port and one to set OWNER.
247 */
248
249 for (try = 4; try > 0; --try) {
250 spin_lock_irq(&ehci->lock);
251 port_status = ehci_readl(ehci, status_reg);
252 if ((port_status & PORT_OWNER) == new_owner
253 || (port_status & (PORT_OWNER | PORT_CONNECT))
254 == 0)
255 try = 0;
256 else {
257 port_status ^= PORT_OWNER;
258 port_status &= ~(PORT_PE | PORT_RWC_BITS);
259 ehci_writel(ehci, port_status, status_reg);
260 }
261 spin_unlock_irq(&ehci->lock);
262 if (try > 1)
263 msleep(5);
264 }
265 return count;
266}
267static CLASS_DEVICE_ATTR(companion, 0644, show_companion, store_companion);
268
269static inline void create_companion_file(struct ehci_hcd *ehci)
270{
271 int i;
272
273 /* with integrated TT there is no companion! */
274 if (!ehci_is_TDI(ehci))
275 i = class_device_create_file(ehci_to_hcd(ehci)->self.class_dev,
276 &class_device_attr_companion);
277}
278
279static inline void remove_companion_file(struct ehci_hcd *ehci)
280{
281 /* with integrated TT there is no companion! */
282 if (!ehci_is_TDI(ehci))
283 class_device_remove_file(ehci_to_hcd(ehci)->self.class_dev,
284 &class_device_attr_companion);
285}
286
287
288/*-------------------------------------------------------------------------*/
289
193static int check_reset_complete ( 290static int check_reset_complete (
194 struct ehci_hcd *ehci, 291 struct ehci_hcd *ehci,
195 int index, 292 int index,
293 u32 __iomem *status_reg,
196 int port_status 294 int port_status
197) { 295) {
198 if (!(port_status & PORT_CONNECT)) { 296 if (!(port_status & PORT_CONNECT)) {
@@ -217,7 +315,7 @@ static int check_reset_complete (
217 // what happens if HCS_N_CC(params) == 0 ? 315 // what happens if HCS_N_CC(params) == 0 ?
218 port_status |= PORT_OWNER; 316 port_status |= PORT_OWNER;
219 port_status &= ~PORT_RWC_BITS; 317 port_status &= ~PORT_RWC_BITS;
220 writel (port_status, &ehci->regs->port_status [index]); 318 ehci_writel(ehci, port_status, status_reg);
221 319
222 } else 320 } else
223 ehci_dbg (ehci, "port %d high speed\n", index + 1); 321 ehci_dbg (ehci, "port %d high speed\n", index + 1);
@@ -268,22 +366,21 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
268 /* port N changes (bit N)? */ 366 /* port N changes (bit N)? */
269 spin_lock_irqsave (&ehci->lock, flags); 367 spin_lock_irqsave (&ehci->lock, flags);
270 for (i = 0; i < ports; i++) { 368 for (i = 0; i < ports; i++) {
271 temp = readl (&ehci->regs->port_status [i]); 369 temp = ehci_readl(ehci, &ehci->regs->port_status [i]);
272 if (temp & PORT_OWNER) { 370
273 /* don't report this in GetPortStatus */ 371 /*
274 if (temp & PORT_CSC) { 372 * Return status information even for ports with OWNER set.
275 temp &= ~PORT_RWC_BITS; 373 * Otherwise khubd wouldn't see the disconnect event when a
276 temp |= PORT_CSC; 374 * high-speed device is switched over to the companion
277 writel (temp, &ehci->regs->port_status [i]); 375 * controller by the user.
278 } 376 */
279 continue; 377
280 }
281 if (!(temp & PORT_CONNECT)) 378 if (!(temp & PORT_CONNECT))
282 ehci->reset_done [i] = 0; 379 ehci->reset_done [i] = 0;
283 if ((temp & mask) != 0 380 if ((temp & mask) != 0
284 || ((temp & PORT_RESUME) != 0 381 || ((temp & PORT_RESUME) != 0
285 && time_after (jiffies, 382 && time_after_eq(jiffies,
286 ehci->reset_done [i]))) { 383 ehci->reset_done[i]))) {
287 if (i < 7) 384 if (i < 7)
288 buf [0] |= 1 << (i + 1); 385 buf [0] |= 1 << (i + 1);
289 else 386 else
@@ -345,6 +442,7 @@ static int ehci_hub_control (
345) { 442) {
346 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 443 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
347 int ports = HCS_N_PORTS (ehci->hcs_params); 444 int ports = HCS_N_PORTS (ehci->hcs_params);
445 u32 __iomem *status_reg = &ehci->regs->port_status[wIndex - 1];
348 u32 temp, status; 446 u32 temp, status;
349 unsigned long flags; 447 unsigned long flags;
350 int retval = 0; 448 int retval = 0;
@@ -373,18 +471,22 @@ static int ehci_hub_control (
373 if (!wIndex || wIndex > ports) 471 if (!wIndex || wIndex > ports)
374 goto error; 472 goto error;
375 wIndex--; 473 wIndex--;
376 temp = readl (&ehci->regs->port_status [wIndex]); 474 temp = ehci_readl(ehci, status_reg);
377 if (temp & PORT_OWNER) 475
378 break; 476 /*
477 * Even if OWNER is set, so the port is owned by the
478 * companion controller, khubd needs to be able to clear
479 * the port-change status bits (especially
480 * USB_PORT_FEAT_C_CONNECTION).
481 */
379 482
380 switch (wValue) { 483 switch (wValue) {
381 case USB_PORT_FEAT_ENABLE: 484 case USB_PORT_FEAT_ENABLE:
382 writel (temp & ~PORT_PE, 485 ehci_writel(ehci, temp & ~PORT_PE, status_reg);
383 &ehci->regs->port_status [wIndex]);
384 break; 486 break;
385 case USB_PORT_FEAT_C_ENABLE: 487 case USB_PORT_FEAT_C_ENABLE:
386 writel((temp & ~PORT_RWC_BITS) | PORT_PEC, 488 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_PEC,
387 &ehci->regs->port_status [wIndex]); 489 status_reg);
388 break; 490 break;
389 case USB_PORT_FEAT_SUSPEND: 491 case USB_PORT_FEAT_SUSPEND:
390 if (temp & PORT_RESET) 492 if (temp & PORT_RESET)
@@ -396,8 +498,8 @@ static int ehci_hub_control (
396 goto error; 498 goto error;
397 /* resume signaling for 20 msec */ 499 /* resume signaling for 20 msec */
398 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); 500 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
399 writel (temp | PORT_RESUME, 501 ehci_writel(ehci, temp | PORT_RESUME,
400 &ehci->regs->port_status [wIndex]); 502 status_reg);
401 ehci->reset_done [wIndex] = jiffies 503 ehci->reset_done [wIndex] = jiffies
402 + msecs_to_jiffies (20); 504 + msecs_to_jiffies (20);
403 } 505 }
@@ -407,16 +509,17 @@ static int ehci_hub_control (
407 break; 509 break;
408 case USB_PORT_FEAT_POWER: 510 case USB_PORT_FEAT_POWER:
409 if (HCS_PPC (ehci->hcs_params)) 511 if (HCS_PPC (ehci->hcs_params))
410 writel (temp & ~(PORT_RWC_BITS | PORT_POWER), 512 ehci_writel(ehci,
411 &ehci->regs->port_status [wIndex]); 513 temp & ~(PORT_RWC_BITS | PORT_POWER),
514 status_reg);
412 break; 515 break;
413 case USB_PORT_FEAT_C_CONNECTION: 516 case USB_PORT_FEAT_C_CONNECTION:
414 writel((temp & ~PORT_RWC_BITS) | PORT_CSC, 517 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_CSC,
415 &ehci->regs->port_status [wIndex]); 518 status_reg);
416 break; 519 break;
417 case USB_PORT_FEAT_C_OVER_CURRENT: 520 case USB_PORT_FEAT_C_OVER_CURRENT:
418 writel((temp & ~PORT_RWC_BITS) | PORT_OCC, 521 ehci_writel(ehci, (temp & ~PORT_RWC_BITS) | PORT_OCC,
419 &ehci->regs->port_status [wIndex]); 522 status_reg);
420 break; 523 break;
421 case USB_PORT_FEAT_C_RESET: 524 case USB_PORT_FEAT_C_RESET:
422 /* GetPortStatus clears reset */ 525 /* GetPortStatus clears reset */
@@ -424,7 +527,7 @@ static int ehci_hub_control (
424 default: 527 default:
425 goto error; 528 goto error;
426 } 529 }
427 readl (&ehci->regs->command); /* unblock posted write */ 530 ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */
428 break; 531 break;
429 case GetHubDescriptor: 532 case GetHubDescriptor:
430 ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *) 533 ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *)
@@ -440,7 +543,7 @@ static int ehci_hub_control (
440 goto error; 543 goto error;
441 wIndex--; 544 wIndex--;
442 status = 0; 545 status = 0;
443 temp = readl (&ehci->regs->port_status [wIndex]); 546 temp = ehci_readl(ehci, status_reg);
444 547
445 // wPortChange bits 548 // wPortChange bits
446 if (temp & PORT_CSC) 549 if (temp & PORT_CSC)
@@ -451,42 +554,55 @@ static int ehci_hub_control (
451 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT; 554 status |= 1 << USB_PORT_FEAT_C_OVER_CURRENT;
452 555
453 /* whoever resumes must GetPortStatus to complete it!! */ 556 /* whoever resumes must GetPortStatus to complete it!! */
454 if ((temp & PORT_RESUME) 557 if (temp & PORT_RESUME) {
455 && time_after (jiffies,
456 ehci->reset_done [wIndex])) {
457 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
458 ehci->reset_done [wIndex] = 0;
459 558
460 /* stop resume signaling */ 559 /* Remote Wakeup received? */
461 temp = readl (&ehci->regs->port_status [wIndex]); 560 if (!ehci->reset_done[wIndex]) {
462 writel (temp & ~(PORT_RWC_BITS | PORT_RESUME), 561 /* resume signaling for 20 msec */
463 &ehci->regs->port_status [wIndex]); 562 ehci->reset_done[wIndex] = jiffies
464 retval = handshake ( 563 + msecs_to_jiffies(20);
465 &ehci->regs->port_status [wIndex], 564 /* check the port again */
466 PORT_RESUME, 0, 2000 /* 2msec */); 565 mod_timer(&ehci_to_hcd(ehci)->rh_timer,
467 if (retval != 0) { 566 ehci->reset_done[wIndex]);
468 ehci_err (ehci, "port %d resume error %d\n", 567 }
469 wIndex + 1, retval); 568
470 goto error; 569 /* resume completed? */
570 else if (time_after_eq(jiffies,
571 ehci->reset_done[wIndex])) {
572 status |= 1 << USB_PORT_FEAT_C_SUSPEND;
573 ehci->reset_done[wIndex] = 0;
574
575 /* stop resume signaling */
576 temp = ehci_readl(ehci, status_reg);
577 ehci_writel(ehci,
578 temp & ~(PORT_RWC_BITS | PORT_RESUME),
579 status_reg);
580 retval = handshake(ehci, status_reg,
581 PORT_RESUME, 0, 2000 /* 2msec */);
582 if (retval != 0) {
583 ehci_err(ehci,
584 "port %d resume error %d\n",
585 wIndex + 1, retval);
586 goto error;
587 }
588 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
471 } 589 }
472 temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
473 } 590 }
474 591
475 /* whoever resets must GetPortStatus to complete it!! */ 592 /* whoever resets must GetPortStatus to complete it!! */
476 if ((temp & PORT_RESET) 593 if ((temp & PORT_RESET)
477 && time_after (jiffies, 594 && time_after_eq(jiffies,
478 ehci->reset_done [wIndex])) { 595 ehci->reset_done[wIndex])) {
479 status |= 1 << USB_PORT_FEAT_C_RESET; 596 status |= 1 << USB_PORT_FEAT_C_RESET;
480 ehci->reset_done [wIndex] = 0; 597 ehci->reset_done [wIndex] = 0;
481 598
482 /* force reset to complete */ 599 /* force reset to complete */
483 writel (temp & ~(PORT_RWC_BITS | PORT_RESET), 600 ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
484 &ehci->regs->port_status [wIndex]); 601 status_reg);
485 /* REVISIT: some hardware needs 550+ usec to clear 602 /* REVISIT: some hardware needs 550+ usec to clear
486 * this bit; seems too long to spin routinely... 603 * this bit; seems too long to spin routinely...
487 */ 604 */
488 retval = handshake ( 605 retval = handshake(ehci, status_reg,
489 &ehci->regs->port_status [wIndex],
490 PORT_RESET, 0, 750); 606 PORT_RESET, 0, 750);
491 if (retval != 0) { 607 if (retval != 0) {
492 ehci_err (ehci, "port %d reset error %d\n", 608 ehci_err (ehci, "port %d reset error %d\n",
@@ -495,28 +611,41 @@ static int ehci_hub_control (
495 } 611 }
496 612
497 /* see what we found out */ 613 /* see what we found out */
498 temp = check_reset_complete (ehci, wIndex, 614 temp = check_reset_complete (ehci, wIndex, status_reg,
499 readl (&ehci->regs->port_status [wIndex])); 615 ehci_readl(ehci, status_reg));
500 } 616 }
501 617
502 // don't show wPortStatus if it's owned by a companion hc 618 /* transfer dedicated ports to the companion hc */
503 if (!(temp & PORT_OWNER)) { 619 if ((temp & PORT_CONNECT) &&
504 if (temp & PORT_CONNECT) { 620 test_bit(wIndex, &ehci->companion_ports)) {
505 status |= 1 << USB_PORT_FEAT_CONNECTION; 621 temp &= ~PORT_RWC_BITS;
506 // status may be from integrated TT 622 temp |= PORT_OWNER;
507 status |= ehci_port_speed(ehci, temp); 623 ehci_writel(ehci, temp, status_reg);
508 } 624 ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1);
509 if (temp & PORT_PE) 625 temp = ehci_readl(ehci, status_reg);
510 status |= 1 << USB_PORT_FEAT_ENABLE; 626 }
511 if (temp & (PORT_SUSPEND|PORT_RESUME)) 627
512 status |= 1 << USB_PORT_FEAT_SUSPEND; 628 /*
513 if (temp & PORT_OC) 629 * Even if OWNER is set, there's no harm letting khubd
514 status |= 1 << USB_PORT_FEAT_OVER_CURRENT; 630 * see the wPortStatus values (they should all be 0 except
515 if (temp & PORT_RESET) 631 * for PORT_POWER anyway).
516 status |= 1 << USB_PORT_FEAT_RESET; 632 */
517 if (temp & PORT_POWER) 633
518 status |= 1 << USB_PORT_FEAT_POWER; 634 if (temp & PORT_CONNECT) {
635 status |= 1 << USB_PORT_FEAT_CONNECTION;
636 // status may be from integrated TT
637 status |= ehci_port_speed(ehci, temp);
519 } 638 }
639 if (temp & PORT_PE)
640 status |= 1 << USB_PORT_FEAT_ENABLE;
641 if (temp & (PORT_SUSPEND|PORT_RESUME))
642 status |= 1 << USB_PORT_FEAT_SUSPEND;
643 if (temp & PORT_OC)
644 status |= 1 << USB_PORT_FEAT_OVER_CURRENT;
645 if (temp & PORT_RESET)
646 status |= 1 << USB_PORT_FEAT_RESET;
647 if (temp & PORT_POWER)
648 status |= 1 << USB_PORT_FEAT_POWER;
520 649
521#ifndef EHCI_VERBOSE_DEBUG 650#ifndef EHCI_VERBOSE_DEBUG
522 if (status & ~0xffff) /* only if wPortChange is interesting */ 651 if (status & ~0xffff) /* only if wPortChange is interesting */
@@ -541,7 +670,7 @@ static int ehci_hub_control (
541 if (!wIndex || wIndex > ports) 670 if (!wIndex || wIndex > ports)
542 goto error; 671 goto error;
543 wIndex--; 672 wIndex--;
544 temp = readl (&ehci->regs->port_status [wIndex]); 673 temp = ehci_readl(ehci, status_reg);
545 if (temp & PORT_OWNER) 674 if (temp & PORT_OWNER)
546 break; 675 break;
547 676
@@ -555,13 +684,12 @@ static int ehci_hub_control (
555 goto error; 684 goto error;
556 if (device_may_wakeup(&hcd->self.root_hub->dev)) 685 if (device_may_wakeup(&hcd->self.root_hub->dev))
557 temp |= PORT_WAKE_BITS; 686 temp |= PORT_WAKE_BITS;
558 writel (temp | PORT_SUSPEND, 687 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
559 &ehci->regs->port_status [wIndex]);
560 break; 688 break;
561 case USB_PORT_FEAT_POWER: 689 case USB_PORT_FEAT_POWER:
562 if (HCS_PPC (ehci->hcs_params)) 690 if (HCS_PPC (ehci->hcs_params))
563 writel (temp | PORT_POWER, 691 ehci_writel(ehci, temp | PORT_POWER,
564 &ehci->regs->port_status [wIndex]); 692 status_reg);
565 break; 693 break;
566 case USB_PORT_FEAT_RESET: 694 case USB_PORT_FEAT_RESET:
567 if (temp & PORT_RESUME) 695 if (temp & PORT_RESUME)
@@ -589,7 +717,7 @@ static int ehci_hub_control (
589 ehci->reset_done [wIndex] = jiffies 717 ehci->reset_done [wIndex] = jiffies
590 + msecs_to_jiffies (50); 718 + msecs_to_jiffies (50);
591 } 719 }
592 writel (temp, &ehci->regs->port_status [wIndex]); 720 ehci_writel(ehci, temp, status_reg);
593 break; 721 break;
594 722
595 /* For downstream facing ports (these): one hub port is put 723 /* For downstream facing ports (these): one hub port is put
@@ -604,13 +732,13 @@ static int ehci_hub_control (
604 ehci_quiesce(ehci); 732 ehci_quiesce(ehci);
605 ehci_halt(ehci); 733 ehci_halt(ehci);
606 temp |= selector << 16; 734 temp |= selector << 16;
607 writel (temp, &ehci->regs->port_status [wIndex]); 735 ehci_writel(ehci, temp, status_reg);
608 break; 736 break;
609 737
610 default: 738 default:
611 goto error; 739 goto error;
612 } 740 }
613 readl (&ehci->regs->command); /* unblock posted writes */ 741 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
614 break; 742 break;
615 743
616 default: 744 default:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 4bc7970ba3ef..12edc723ec73 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -38,7 +38,7 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
38 if ((temp & (3 << 13)) == (1 << 13)) { 38 if ((temp & (3 << 13)) == (1 << 13)) {
39 temp &= 0x1fff; 39 temp &= 0x1fff;
40 ehci->debug = ehci_to_hcd(ehci)->regs + temp; 40 ehci->debug = ehci_to_hcd(ehci)->regs + temp;
41 temp = readl(&ehci->debug->control); 41 temp = ehci_readl(ehci, &ehci->debug->control);
42 ehci_info(ehci, "debug port %d%s\n", 42 ehci_info(ehci, "debug port %d%s\n",
43 HCS_DEBUG_PORT(ehci->hcs_params), 43 HCS_DEBUG_PORT(ehci->hcs_params),
44 (temp & DBGP_ENABLED) 44 (temp & DBGP_ENABLED)
@@ -71,8 +71,24 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
71 u32 temp; 71 u32 temp;
72 int retval; 72 int retval;
73 73
74 switch (pdev->vendor) {
75 case PCI_VENDOR_ID_TOSHIBA_2:
76 /* celleb's companion chip */
77 if (pdev->device == 0x01b5) {
78#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
79 ehci->big_endian_mmio = 1;
80#else
81 ehci_warn(ehci,
82 "unsupported big endian Toshiba quirk\n");
83#endif
84 }
85 break;
86 }
87
74 ehci->caps = hcd->regs; 88 ehci->caps = hcd->regs;
75 ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase)); 89 ehci->regs = hcd->regs +
90 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
91
76 dbg_hcs_params(ehci, "reset"); 92 dbg_hcs_params(ehci, "reset");
77 dbg_hcc_params(ehci, "reset"); 93 dbg_hcc_params(ehci, "reset");
78 94
@@ -101,7 +117,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
101 } 117 }
102 118
103 /* cache this readonly data; minimize chip reads */ 119 /* cache this readonly data; minimize chip reads */
104 ehci->hcs_params = readl(&ehci->caps->hcs_params); 120 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
105 121
106 retval = ehci_halt(ehci); 122 retval = ehci_halt(ehci);
107 if (retval) 123 if (retval)
@@ -235,8 +251,8 @@ static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
235 rc = -EINVAL; 251 rc = -EINVAL;
236 goto bail; 252 goto bail;
237 } 253 }
238 writel (0, &ehci->regs->intr_enable); 254 ehci_writel(ehci, 0, &ehci->regs->intr_enable);
239 (void)readl(&ehci->regs->intr_enable); 255 (void)ehci_readl(ehci, &ehci->regs->intr_enable);
240 256
241 /* make sure snapshot being resumed re-enumerates everything */ 257 /* make sure snapshot being resumed re-enumerates everything */
242 if (message.event == PM_EVENT_PRETHAW) { 258 if (message.event == PM_EVENT_PRETHAW) {
@@ -270,13 +286,13 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
270 /* If CF is still set, we maintained PCI Vaux power. 286 /* If CF is still set, we maintained PCI Vaux power.
271 * Just undo the effect of ehci_pci_suspend(). 287 * Just undo the effect of ehci_pci_suspend().
272 */ 288 */
273 if (readl(&ehci->regs->configured_flag) == FLAG_CF) { 289 if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
274 int mask = INTR_MASK; 290 int mask = INTR_MASK;
275 291
276 if (!device_may_wakeup(&hcd->self.root_hub->dev)) 292 if (!device_may_wakeup(&hcd->self.root_hub->dev))
277 mask &= ~STS_PCD; 293 mask &= ~STS_PCD;
278 writel(mask, &ehci->regs->intr_enable); 294 ehci_writel(ehci, mask, &ehci->regs->intr_enable);
279 readl(&ehci->regs->intr_enable); 295 ehci_readl(ehci, &ehci->regs->intr_enable);
280 return 0; 296 return 0;
281 } 297 }
282 298
@@ -300,9 +316,9 @@ static int ehci_pci_resume(struct usb_hcd *hcd)
300 /* here we "know" root ports should always stay powered */ 316 /* here we "know" root ports should always stay powered */
301 ehci_port_power(ehci, 1); 317 ehci_port_power(ehci, 1);
302 318
303 writel(ehci->command, &ehci->regs->command); 319 ehci_writel(ehci, ehci->command, &ehci->regs->command);
304 writel(FLAG_CF, &ehci->regs->configured_flag); 320 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
305 readl(&ehci->regs->command); /* unblock posted writes */ 321 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
306 322
307 hcd->state = HC_STATE_SUSPENDED; 323 hcd->state = HC_STATE_SUSPENDED;
308 return 0; 324 return 0;
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
new file mode 100644
index 000000000000..4d781a2a9807
--- /dev/null
+++ b/drivers/usb/host/ehci-ps3.c
@@ -0,0 +1,193 @@
1/*
2 * PS3 EHCI Host Controller driver
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <asm/ps3.h>
22
23static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
24{
25 int result;
26 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
27
28 ehci->big_endian_mmio = 1;
29
30 ehci->caps = hcd->regs;
31 ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci,
32 &ehci->caps->hc_capbase));
33
34 dbg_hcs_params(ehci, "reset");
35 dbg_hcc_params(ehci, "reset");
36
37 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
38
39 result = ehci_halt(ehci);
40
41 if (result)
42 return result;
43
44 result = ehci_init(hcd);
45
46 if (result)
47 return result;
48
49 ehci_port_power(ehci, 0);
50
51 return result;
52}
53
54static const struct hc_driver ps3_ehci_hc_driver = {
55 .description = hcd_name,
56 .product_desc = "PS3 EHCI Host Controller",
57 .hcd_priv_size = sizeof(struct ehci_hcd),
58 .irq = ehci_irq,
59 .flags = HCD_MEMORY | HCD_USB2,
60 .reset = ps3_ehci_hc_reset,
61 .start = ehci_run,
62 .stop = ehci_stop,
63 .shutdown = ehci_shutdown,
64 .urb_enqueue = ehci_urb_enqueue,
65 .urb_dequeue = ehci_urb_dequeue,
66 .endpoint_disable = ehci_endpoint_disable,
67 .get_frame_number = ehci_get_frame,
68 .hub_status_data = ehci_hub_status_data,
69 .hub_control = ehci_hub_control,
70#if defined(CONFIG_PM)
71 .bus_suspend = ehci_bus_suspend,
72 .bus_resume = ehci_bus_resume,
73#endif
74};
75
76#if !defined(DEBUG)
77#undef dev_dbg
78static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg(
79 const struct device *_dev, const char *fmt, ...) {return 0;}
80#endif
81
82
83static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev)
84{
85 int result;
86 struct usb_hcd *hcd;
87 unsigned int virq;
88 static u64 dummy_mask = DMA_32BIT_MASK;
89
90 if (usb_disabled()) {
91 result = -ENODEV;
92 goto fail_start;
93 }
94
95 result = ps3_mmio_region_create(dev->m_region);
96
97 if (result) {
98 dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
99 __func__, __LINE__);
100 result = -EPERM;
101 goto fail_mmio;
102 }
103
104 dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
105 __LINE__, dev->m_region->lpar_addr);
106
107 result = ps3_alloc_io_irq(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
108
109 if (result) {
110 dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
111 __func__, __LINE__, virq);
112 result = -EPERM;
113 goto fail_irq;
114 }
115
116 dev->core.power.power_state = PMSG_ON;
117 dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */
118
119 hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev->core.bus_id);
120
121 if (!hcd) {
122 dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
123 __LINE__);
124 result = -ENOMEM;
125 goto fail_create_hcd;
126 }
127
128 hcd->rsrc_start = dev->m_region->lpar_addr;
129 hcd->rsrc_len = dev->m_region->len;
130 hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
131
132 if (!hcd->regs) {
133 dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
134 __LINE__);
135 result = -EPERM;
136 goto fail_ioremap;
137 }
138
139 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
140 (unsigned long)hcd->rsrc_start);
141 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
142 (unsigned long)hcd->rsrc_len);
143 dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
144 (unsigned long)hcd->regs);
145 dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
146 (unsigned long)virq);
147
148 ps3_system_bus_set_driver_data(dev, hcd);
149
150 result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
151
152 if (result) {
153 dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
154 __func__, __LINE__, result);
155 goto fail_add_hcd;
156 }
157
158 return result;
159
160fail_add_hcd:
161 iounmap(hcd->regs);
162fail_ioremap:
163 usb_put_hcd(hcd);
164fail_create_hcd:
165 ps3_free_io_irq(virq);
166fail_irq:
167 ps3_free_mmio_region(dev->m_region);
168fail_mmio:
169fail_start:
170 return result;
171}
172
173static int ps3_ehci_sb_remove(struct ps3_system_bus_device *dev)
174{
175 struct usb_hcd *hcd =
176 (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
177
178 usb_put_hcd(hcd);
179 ps3_system_bus_set_driver_data(dev, NULL);
180
181 return 0;
182}
183
184MODULE_ALIAS("ps3-ehci");
185
186static struct ps3_system_bus_driver ps3_ehci_sb_driver = {
187 .match_id = PS3_MATCH_ID_EHCI,
188 .core = {
189 .name = "ps3-ehci-driver",
190 },
191 .probe = ps3_ehci_sb_probe,
192 .remove = ps3_ehci_sb_remove,
193};
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 62e46dc60e86..e7fbbd00e7cd 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -789,13 +789,14 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
789 head = ehci->async; 789 head = ehci->async;
790 timer_action_done (ehci, TIMER_ASYNC_OFF); 790 timer_action_done (ehci, TIMER_ASYNC_OFF);
791 if (!head->qh_next.qh) { 791 if (!head->qh_next.qh) {
792 u32 cmd = readl (&ehci->regs->command); 792 u32 cmd = ehci_readl(ehci, &ehci->regs->command);
793 793
794 if (!(cmd & CMD_ASE)) { 794 if (!(cmd & CMD_ASE)) {
795 /* in case a clear of CMD_ASE didn't take yet */ 795 /* in case a clear of CMD_ASE didn't take yet */
796 (void) handshake (&ehci->regs->status, STS_ASS, 0, 150); 796 (void)handshake(ehci, &ehci->regs->status,
797 STS_ASS, 0, 150);
797 cmd |= CMD_ASE | CMD_RUN; 798 cmd |= CMD_ASE | CMD_RUN;
798 writel (cmd, &ehci->regs->command); 799 ehci_writel(ehci, cmd, &ehci->regs->command);
799 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 800 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
800 /* posted write need not be known to HC yet ... */ 801 /* posted write need not be known to HC yet ... */
801 } 802 }
@@ -1007,7 +1008,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
1007 1008
1008static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 1009static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1009{ 1010{
1010 int cmd = readl (&ehci->regs->command); 1011 int cmd = ehci_readl(ehci, &ehci->regs->command);
1011 struct ehci_qh *prev; 1012 struct ehci_qh *prev;
1012 1013
1013#ifdef DEBUG 1014#ifdef DEBUG
@@ -1025,7 +1026,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1025 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT 1026 if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
1026 && !ehci->reclaim) { 1027 && !ehci->reclaim) {
1027 /* ... and CMD_IAAD clear */ 1028 /* ... and CMD_IAAD clear */
1028 writel (cmd & ~CMD_ASE, &ehci->regs->command); 1029 ehci_writel(ehci, cmd & ~CMD_ASE,
1030 &ehci->regs->command);
1029 wmb (); 1031 wmb ();
1030 // handshake later, if we need to 1032 // handshake later, if we need to
1031 timer_action_done (ehci, TIMER_ASYNC_OFF); 1033 timer_action_done (ehci, TIMER_ASYNC_OFF);
@@ -1054,8 +1056,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1054 1056
1055 ehci->reclaim_ready = 0; 1057 ehci->reclaim_ready = 0;
1056 cmd |= CMD_IAAD; 1058 cmd |= CMD_IAAD;
1057 writel (cmd, &ehci->regs->command); 1059 ehci_writel(ehci, cmd, &ehci->regs->command);
1058 (void) readl (&ehci->regs->command); 1060 (void)ehci_readl(ehci, &ehci->regs->command);
1059 timer_action (ehci, TIMER_IAA_WATCHDOG); 1061 timer_action (ehci, TIMER_IAA_WATCHDOG);
1060} 1062}
1061 1063
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 65c402a0fa7a..7b5ae7111f23 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -433,20 +433,20 @@ static int enable_periodic (struct ehci_hcd *ehci)
433 /* did clearing PSE did take effect yet? 433 /* did clearing PSE did take effect yet?
434 * takes effect only at frame boundaries... 434 * takes effect only at frame boundaries...
435 */ 435 */
436 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); 436 status = handshake(ehci, &ehci->regs->status, STS_PSS, 0, 9 * 125);
437 if (status != 0) { 437 if (status != 0) {
438 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 438 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
439 return status; 439 return status;
440 } 440 }
441 441
442 cmd = readl (&ehci->regs->command) | CMD_PSE; 442 cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
443 writel (cmd, &ehci->regs->command); 443 ehci_writel(ehci, cmd, &ehci->regs->command);
444 /* posted write ... PSS happens later */ 444 /* posted write ... PSS happens later */
445 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; 445 ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
446 446
447 /* make sure ehci_work scans these */ 447 /* make sure ehci_work scans these */
448 ehci->next_uframe = readl (&ehci->regs->frame_index) 448 ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
449 % (ehci->periodic_size << 3); 449 % (ehci->periodic_size << 3);
450 return 0; 450 return 0;
451} 451}
452 452
@@ -458,14 +458,14 @@ static int disable_periodic (struct ehci_hcd *ehci)
458 /* did setting PSE not take effect yet? 458 /* did setting PSE not take effect yet?
459 * takes effect only at frame boundaries... 459 * takes effect only at frame boundaries...
460 */ 460 */
461 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); 461 status = handshake(ehci, &ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
462 if (status != 0) { 462 if (status != 0) {
463 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 463 ehci_to_hcd(ehci)->state = HC_STATE_HALT;
464 return status; 464 return status;
465 } 465 }
466 466
467 cmd = readl (&ehci->regs->command) & ~CMD_PSE; 467 cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
468 writel (cmd, &ehci->regs->command); 468 ehci_writel(ehci, cmd, &ehci->regs->command);
469 /* posted write ... */ 469 /* posted write ... */
470 470
471 ehci->next_uframe = -1; 471 ehci->next_uframe = -1;
@@ -1336,7 +1336,7 @@ iso_stream_schedule (
1336 goto fail; 1336 goto fail;
1337 } 1337 }
1338 1338
1339 now = readl (&ehci->regs->frame_index) % mod; 1339 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
1340 1340
1341 /* when's the last uframe this urb could start? */ 1341 /* when's the last uframe this urb could start? */
1342 max = now + mod; 1342 max = now + mod;
@@ -2088,7 +2088,7 @@ scan_periodic (struct ehci_hcd *ehci)
2088 */ 2088 */
2089 now_uframe = ehci->next_uframe; 2089 now_uframe = ehci->next_uframe;
2090 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2090 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
2091 clock = readl (&ehci->regs->frame_index); 2091 clock = ehci_readl(ehci, &ehci->regs->frame_index);
2092 else 2092 else
2093 clock = now_uframe + mod - 1; 2093 clock = now_uframe + mod - 1;
2094 clock %= mod; 2094 clock %= mod;
@@ -2213,7 +2213,7 @@ restart:
2213 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 2213 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
2214 break; 2214 break;
2215 ehci->next_uframe = now_uframe; 2215 ehci->next_uframe = now_uframe;
2216 now = readl (&ehci->regs->frame_index) % mod; 2216 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
2217 if (now_uframe == now) 2217 if (now_uframe == now)
2218 break; 2218 break;
2219 2219
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 74dbc6c8228f..46fa57a520d0 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -74,7 +74,11 @@ struct ehci_hcd { /* one per controller */
74 74
75 /* per root hub port */ 75 /* per root hub port */
76 unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; 76 unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
77 unsigned long bus_suspended; 77 /* bit vectors (one bit per port) */
78 unsigned long bus_suspended; /* which ports were
79 already suspended at the start of a bus suspend */
80 unsigned long companion_ports; /* which ports are
81 dedicated to the companion controller */
78 82
79 /* per-HC memory pools (could be per-bus, but ...) */ 83 /* per-HC memory pools (could be per-bus, but ...) */
80 struct dma_pool *qh_pool; /* qh per active urb */ 84 struct dma_pool *qh_pool; /* qh per active urb */
@@ -92,6 +96,7 @@ struct ehci_hcd { /* one per controller */
92 unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */ 96 unsigned is_tdi_rh_tt:1; /* TDI roothub with TT */
93 unsigned no_selective_suspend:1; 97 unsigned no_selective_suspend:1;
94 unsigned has_fsl_port_bug:1; /* FreeScale */ 98 unsigned has_fsl_port_bug:1; /* FreeScale */
99 unsigned big_endian_mmio:1;
95 100
96 u8 sbrn; /* packed release number */ 101 u8 sbrn; /* packed release number */
97 102
@@ -651,6 +656,45 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
651#define ehci_has_fsl_portno_bug(e) (0) 656#define ehci_has_fsl_portno_bug(e) (0)
652#endif 657#endif
653 658
659/*
660 * While most USB host controllers implement their registers in
661 * little-endian format, a minority (celleb companion chip) implement
662 * them in big endian format.
663 *
664 * This attempts to support either format at compile time without a
665 * runtime penalty, or both formats with the additional overhead
666 * of checking a flag bit.
667 */
668
669#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
670#define ehci_big_endian_mmio(e) ((e)->big_endian_mmio)
671#else
672#define ehci_big_endian_mmio(e) 0
673#endif
674
675static inline unsigned int ehci_readl (const struct ehci_hcd *ehci,
676 __u32 __iomem * regs)
677{
678#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
679 return ehci_big_endian_mmio(ehci) ?
680 readl_be(regs) :
681 readl(regs);
682#else
683 return readl(regs);
684#endif
685}
686
687static inline void ehci_writel (const struct ehci_hcd *ehci,
688 const unsigned int val, __u32 __iomem *regs)
689{
690#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
691 ehci_big_endian_mmio(ehci) ?
692 writel_be(val, regs) :
693 writel(val, regs);
694#else
695 writel(val, regs);
696#endif
697}
654 698
655/*-------------------------------------------------------------------------*/ 699/*-------------------------------------------------------------------------*/
656 700
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index cc405512fa1c..930346487278 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -170,7 +170,6 @@ static int usb_hcd_at91_remove(struct usb_hcd *hcd,
170 at91_stop_hc(pdev); 170 at91_stop_hc(pdev);
171 iounmap(hcd->regs); 171 iounmap(hcd->regs);
172 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 172 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
173 disable_irq_wake(hcd->irq);
174 173
175 clk_put(fclk); 174 clk_put(fclk);
176 clk_put(iclk); 175 clk_put(iclk);
@@ -271,8 +270,6 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
271 270
272 if (device_may_wakeup(&pdev->dev)) 271 if (device_may_wakeup(&pdev->dev))
273 enable_irq_wake(hcd->irq); 272 enable_irq_wake(hcd->irq);
274 else
275 disable_irq_wake(hcd->irq);
276 273
277 /* 274 /*
278 * The integrated transceivers seem unable to notice disconnect, 275 * The integrated transceivers seem unable to notice disconnect,
@@ -293,6 +290,11 @@ ohci_hcd_at91_drv_suspend(struct platform_device *pdev, pm_message_t mesg)
293 290
294static int ohci_hcd_at91_drv_resume(struct platform_device *pdev) 291static int ohci_hcd_at91_drv_resume(struct platform_device *pdev)
295{ 292{
293 struct usb_hcd *hcd = platform_get_drvdata(pdev);
294
295 if (device_may_wakeup(&pdev->dev))
296 disable_irq_wake(hcd->irq);
297
296 if (!clocked) { 298 if (!clocked) {
297 clk_enable(iclk); 299 clk_enable(iclk);
298 clk_enable(fclk); 300 clk_enable(fclk);
@@ -320,18 +322,3 @@ static struct platform_driver ohci_hcd_at91_driver = {
320 }, 322 },
321}; 323};
322 324
323static int __init ohci_hcd_at91_init (void)
324{
325 if (usb_disabled())
326 return -ENODEV;
327
328 return platform_driver_register(&ohci_hcd_at91_driver);
329}
330
331static void __exit ohci_hcd_at91_cleanup (void)
332{
333 platform_driver_unregister(&ohci_hcd_at91_driver);
334}
335
336module_init (ohci_hcd_at91_init);
337module_exit (ohci_hcd_at91_cleanup);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index e70b2430e2a9..663a0600b6e7 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -345,19 +345,3 @@ static struct platform_driver ohci_hcd_au1xxx_driver = {
345 }, 345 },
346}; 346};
347 347
348static int __init ohci_hcd_au1xxx_init (void)
349{
350 pr_debug (DRIVER_INFO " (Au1xxx)");
351 pr_debug ("block sizes: ed %d td %d\n",
352 sizeof (struct ed), sizeof (struct td));
353
354 return platform_driver_register(&ohci_hcd_au1xxx_driver);
355}
356
357static void __exit ohci_hcd_au1xxx_cleanup (void)
358{
359 platform_driver_unregister(&ohci_hcd_au1xxx_driver);
360}
361
362module_init (ohci_hcd_au1xxx_init);
363module_exit (ohci_hcd_au1xxx_cleanup);
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 3348b07f0fe5..44c60fba76e1 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -214,15 +214,3 @@ static struct platform_driver ohci_hcd_ep93xx_driver = {
214 }, 214 },
215}; 215};
216 216
217static int __init ohci_hcd_ep93xx_init(void)
218{
219 return platform_driver_register(&ohci_hcd_ep93xx_driver);
220}
221
222static void __exit ohci_hcd_ep93xx_cleanup(void)
223{
224 platform_driver_unregister(&ohci_hcd_ep93xx_driver);
225}
226
227module_init(ohci_hcd_ep93xx_init);
228module_exit(ohci_hcd_ep93xx_cleanup);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index c1c1d871aba4..fa6a7ceaa0db 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -855,63 +855,167 @@ MODULE_LICENSE ("GPL");
855 855
856#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
857#include "ohci-pci.c" 857#include "ohci-pci.c"
858#define PCI_DRIVER ohci_pci_driver
858#endif 859#endif
859 860
860#ifdef CONFIG_SA1111 861#ifdef CONFIG_SA1111
861#include "ohci-sa1111.c" 862#include "ohci-sa1111.c"
863#define SA1111_DRIVER ohci_hcd_sa1111_driver
862#endif 864#endif
863 865
864#ifdef CONFIG_ARCH_S3C2410 866#ifdef CONFIG_ARCH_S3C2410
865#include "ohci-s3c2410.c" 867#include "ohci-s3c2410.c"
868#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
866#endif 869#endif
867 870
868#ifdef CONFIG_ARCH_OMAP 871#ifdef CONFIG_ARCH_OMAP
869#include "ohci-omap.c" 872#include "ohci-omap.c"
873#define PLATFORM_DRIVER ohci_hcd_omap_driver
870#endif 874#endif
871 875
872#ifdef CONFIG_ARCH_LH7A404 876#ifdef CONFIG_ARCH_LH7A404
873#include "ohci-lh7a404.c" 877#include "ohci-lh7a404.c"
878#define PLATFORM_DRIVER ohci_hcd_lh7a404_driver
874#endif 879#endif
875 880
876#ifdef CONFIG_PXA27x 881#ifdef CONFIG_PXA27x
877#include "ohci-pxa27x.c" 882#include "ohci-pxa27x.c"
883#define PLATFORM_DRIVER ohci_hcd_pxa27x_driver
878#endif 884#endif
879 885
880#ifdef CONFIG_ARCH_EP93XX 886#ifdef CONFIG_ARCH_EP93XX
881#include "ohci-ep93xx.c" 887#include "ohci-ep93xx.c"
888#define PLATFORM_DRIVER ohci_hcd_ep93xx_driver
882#endif 889#endif
883 890
884#ifdef CONFIG_SOC_AU1X00 891#ifdef CONFIG_SOC_AU1X00
885#include "ohci-au1xxx.c" 892#include "ohci-au1xxx.c"
893#define PLATFORM_DRIVER ohci_hcd_au1xxx_driver
886#endif 894#endif
887 895
888#ifdef CONFIG_PNX8550 896#ifdef CONFIG_PNX8550
889#include "ohci-pnx8550.c" 897#include "ohci-pnx8550.c"
898#define PLATFORM_DRIVER ohci_hcd_pnx8550_driver
890#endif 899#endif
891 900
892#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC 901#ifdef CONFIG_USB_OHCI_HCD_PPC_SOC
893#include "ohci-ppc-soc.c" 902#include "ohci-ppc-soc.c"
903#define PLATFORM_DRIVER ohci_hcd_ppc_soc_driver
894#endif 904#endif
895 905
896#ifdef CONFIG_ARCH_AT91 906#ifdef CONFIG_ARCH_AT91
897#include "ohci-at91.c" 907#include "ohci-at91.c"
908#define PLATFORM_DRIVER ohci_hcd_at91_driver
898#endif 909#endif
899 910
900#ifdef CONFIG_ARCH_PNX4008 911#ifdef CONFIG_ARCH_PNX4008
901#include "ohci-pnx4008.c" 912#include "ohci-pnx4008.c"
913#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
902#endif 914#endif
903 915
904#if !(defined(CONFIG_PCI) \ 916
905 || defined(CONFIG_SA1111) \ 917#ifdef CONFIG_USB_OHCI_HCD_PPC_OF
906 || defined(CONFIG_ARCH_S3C2410) \ 918#include "ohci-ppc-of.c"
907 || defined(CONFIG_ARCH_OMAP) \ 919#define OF_PLATFORM_DRIVER ohci_hcd_ppc_of_driver
908 || defined (CONFIG_ARCH_LH7A404) \ 920#endif
909 || defined (CONFIG_PXA27x) \ 921
910 || defined (CONFIG_ARCH_EP93XX) \ 922#ifdef CONFIG_PPC_PS3
911 || defined (CONFIG_SOC_AU1X00) \ 923#include "ohci-ps3.c"
912 || defined (CONFIG_USB_OHCI_HCD_PPC_SOC) \ 924#define PS3_SYSTEM_BUS_DRIVER ps3_ohci_sb_driver
913 || defined (CONFIG_ARCH_AT91) \ 925#endif
914 || defined (CONFIG_ARCH_PNX4008) \ 926
915 ) 927#if !defined(PCI_DRIVER) && \
928 !defined(PLATFORM_DRIVER) && \
929 !defined(OF_PLATFORM_DRIVER) && \
930 !defined(SA1111_DRIVER) && \
931 !defined(PS3_SYSTEM_BUS_DRIVER)
916#error "missing bus glue for ohci-hcd" 932#error "missing bus glue for ohci-hcd"
917#endif 933#endif
934
935static int __init ohci_hcd_mod_init(void)
936{
937 int retval = 0;
938
939 if (usb_disabled())
940 return -ENODEV;
941
942 printk (KERN_DEBUG "%s: " DRIVER_INFO "\n", hcd_name);
943 pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
944 sizeof (struct ed), sizeof (struct td));
945
946#ifdef PS3_SYSTEM_BUS_DRIVER
947 retval = ps3_system_bus_driver_register(&PS3_SYSTEM_BUS_DRIVER);
948 if (retval < 0)
949 goto error_ps3;
950#endif
951
952#ifdef PLATFORM_DRIVER
953 retval = platform_driver_register(&PLATFORM_DRIVER);
954 if (retval < 0)
955 goto error_platform;
956#endif
957
958#ifdef OF_PLATFORM_DRIVER
959 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER);
960 if (retval < 0)
961 goto error_of_platform;
962#endif
963
964#ifdef SA1111_DRIVER
965 retval = sa1111_driver_register(&SA1111_DRIVER);
966 if (retval < 0)
967 goto error_sa1111;
968#endif
969
970#ifdef PCI_DRIVER
971 retval = pci_register_driver(&PCI_DRIVER);
972 if (retval < 0)
973 goto error_pci;
974#endif
975
976 return retval;
977
978 /* Error path */
979#ifdef PCI_DRIVER
980 error_pci:
981#endif
982#ifdef SA1111_DRIVER
983 sa1111_driver_unregister(&SA1111_DRIVER);
984 error_sa1111:
985#endif
986#ifdef OF_PLATFORM_DRIVER
987 of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
988 error_of_platform:
989#endif
990#ifdef PLATFORM_DRIVER
991 platform_driver_unregister(&PLATFORM_DRIVER);
992 error_platform:
993#endif
994#ifdef PS3_SYSTEM_BUS_DRIVER
995 ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
996 error_ps3:
997#endif
998 return retval;
999}
1000module_init(ohci_hcd_mod_init);
1001
1002static void __exit ohci_hcd_mod_exit(void)
1003{
1004#ifdef PCI_DRIVER
1005 pci_unregister_driver(&PCI_DRIVER);
1006#endif
1007#ifdef SA1111_DRIVER
1008 sa1111_driver_unregister(&SA1111_DRIVER);
1009#endif
1010#ifdef OF_PLATFORM_DRIVER
1011 of_unregister_platform_driver(&OF_PLATFORM_DRIVER);
1012#endif
1013#ifdef PLATFORM_DRIVER
1014 platform_driver_unregister(&PLATFORM_DRIVER);
1015#endif
1016#ifdef PS3_SYSTEM_BUS_DRIVER
1017 ps3_system_bus_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
1018#endif
1019}
1020module_exit(ohci_hcd_mod_exit);
1021
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index e9807cf73a2f..4a043abd85ea 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -251,19 +251,3 @@ static struct platform_driver ohci_hcd_lh7a404_driver = {
251 }, 251 },
252}; 252};
253 253
254static int __init ohci_hcd_lh7a404_init (void)
255{
256 pr_debug (DRIVER_INFO " (LH7A404)");
257 pr_debug ("block sizes: ed %d td %d\n",
258 sizeof (struct ed), sizeof (struct td));
259
260 return platform_driver_register(&ohci_hcd_lh7a404_driver);
261}
262
263static void __exit ohci_hcd_lh7a404_cleanup (void)
264{
265 platform_driver_unregister(&ohci_hcd_lh7a404_driver);
266}
267
268module_init (ohci_hcd_lh7a404_init);
269module_exit (ohci_hcd_lh7a404_cleanup);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 27be1f936885..5cfa3d1c4413 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -544,22 +544,3 @@ static struct platform_driver ohci_hcd_omap_driver = {
544 }, 544 },
545}; 545};
546 546
547static int __init ohci_hcd_omap_init (void)
548{
549 printk (KERN_DEBUG "%s: " DRIVER_INFO " (OMAP)\n", hcd_name);
550 if (usb_disabled())
551 return -ENODEV;
552
553 pr_debug("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
554 sizeof (struct ed), sizeof (struct td));
555
556 return platform_driver_register(&ohci_hcd_omap_driver);
557}
558
559static void __exit ohci_hcd_omap_cleanup (void)
560{
561 platform_driver_unregister(&ohci_hcd_omap_driver);
562}
563
564module_init (ohci_hcd_omap_init);
565module_exit (ohci_hcd_omap_cleanup);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 596e0b41e606..b331ac4d0d62 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -20,79 +20,154 @@
20 20
21/*-------------------------------------------------------------------------*/ 21/*-------------------------------------------------------------------------*/
22 22
23static int 23/* AMD 756, for most chips (early revs), corrupts register
24ohci_pci_reset (struct usb_hcd *hcd) 24 * values on read ... so enable the vendor workaround.
25 */
26static int __devinit ohci_quirk_amd756(struct usb_hcd *hcd)
25{ 27{
26 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 28 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
27 29
28 ohci_hcd_init (ohci); 30 ohci->flags = OHCI_QUIRK_AMD756;
29 return ohci_init (ohci); 31 ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
32
33 /* also erratum 10 (suspend/resume issues) */
34 device_init_wakeup(&hcd->self.root_hub->dev, 0);
35
36 return 0;
30} 37}
31 38
32static int __devinit 39/* Apple's OHCI driver has a lot of bizarre workarounds
33ohci_pci_start (struct usb_hcd *hcd) 40 * for this chip. Evidently control and bulk lists
41 * can get confused. (B&W G3 models, and ...)
42 */
43static int __devinit ohci_quirk_opti(struct usb_hcd *hcd)
34{ 44{
35 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 45 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
36 int ret;
37 46
38 /* REVISIT this whole block should move to reset(), which handles 47 ohci_dbg (ohci, "WARNING: OPTi workarounds unavailable\n");
39 * all the other one-time init. 48
49 return 0;
50}
51
52/* Check for NSC87560. We have to look at the bridge (fn1) to
53 * identify the USB (fn2). This quirk might apply to more or
54 * even all NSC stuff.
55 */
56static int __devinit ohci_quirk_ns(struct usb_hcd *hcd)
57{
58 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
59 struct pci_dev *b;
60
61 b = pci_get_slot (pdev->bus, PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
62 if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
63 && b->vendor == PCI_VENDOR_ID_NS) {
64 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
65
66 ohci->flags |= OHCI_QUIRK_SUPERIO;
67 ohci_dbg (ohci, "Using NSC SuperIO setup\n");
68 }
69 pci_dev_put(b);
70
71 return 0;
72}
73
74/* Check for Compaq's ZFMicro chipset, which needs short
75 * delays before control or bulk queues get re-activated
76 * in finish_unlinks()
77 */
78static int __devinit ohci_quirk_zfmicro(struct usb_hcd *hcd)
79{
80 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
81
82 ohci->flags |= OHCI_QUIRK_ZFMICRO;
83 ohci_dbg (ohci, "enabled Compaq ZFMicro chipset quirk\n");
84
85 return 0;
86}
87
88/* Check for Toshiba SCC OHCI which has big endian registers
89 * and little endian in memory data structures
90 */
91static int __devinit ohci_quirk_toshiba_scc(struct usb_hcd *hcd)
92{
93 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
94
95 /* That chip is only present in the southbridge of some
96 * cell based platforms which are supposed to select
97 * CONFIG_USB_OHCI_BIG_ENDIAN_MMIO. We verify here if
98 * that was the case though.
99 */
100#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
101 ohci->flags |= OHCI_QUIRK_BE_MMIO;
102 ohci_dbg (ohci, "enabled big endian Toshiba quirk\n");
103 return 0;
104#else
105 ohci_err (ohci, "unsupported big endian Toshiba quirk\n");
106 return -ENXIO;
107#endif
108}
109
110/* List of quirks for OHCI */
111static const struct pci_device_id ohci_pci_quirks[] = {
112 {
113 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x740c),
114 .driver_data = (unsigned long)ohci_quirk_amd756,
115 },
116 {
117 PCI_DEVICE(PCI_VENDOR_ID_OPTI, 0xc861),
118 .driver_data = (unsigned long)ohci_quirk_opti,
119 },
120 {
121 PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_ANY_ID),
122 .driver_data = (unsigned long)ohci_quirk_ns,
123 },
124 {
125 PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xa0f8),
126 .driver_data = (unsigned long)ohci_quirk_zfmicro,
127 },
128 {
129 PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, 0x01b6),
130 .driver_data = (unsigned long)ohci_quirk_toshiba_scc,
131 },
132 /* FIXME for some of the early AMD 760 southbridges, OHCI
133 * won't work at all. blacklist them.
40 */ 134 */
135
136 {},
137};
138
139static int ohci_pci_reset (struct usb_hcd *hcd)
140{
141 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
142 int ret = 0;
143
41 if (hcd->self.controller) { 144 if (hcd->self.controller) {
42 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 145 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
146 const struct pci_device_id *quirk_id;
43 147
44 /* AMD 756, for most chips (early revs), corrupts register 148 quirk_id = pci_match_id(ohci_pci_quirks, pdev);
45 * values on read ... so enable the vendor workaround. 149 if (quirk_id != NULL) {
46 */ 150 int (*quirk)(struct usb_hcd *ohci);
47 if (pdev->vendor == PCI_VENDOR_ID_AMD 151 quirk = (void *)quirk_id->driver_data;
48 && pdev->device == 0x740c) { 152 ret = quirk(hcd);
49 ohci->flags = OHCI_QUIRK_AMD756;
50 ohci_dbg (ohci, "AMD756 erratum 4 workaround\n");
51 /* also erratum 10 (suspend/resume issues) */
52 device_init_wakeup(&hcd->self.root_hub->dev, 0);
53 } 153 }
154 }
155 if (ret == 0) {
156 ohci_hcd_init (ohci);
157 return ohci_init (ohci);
158 }
159 return ret;
160}
54 161
55 /* FIXME for some of the early AMD 760 southbridges, OHCI
56 * won't work at all. blacklist them.
57 */
58
59 /* Apple's OHCI driver has a lot of bizarre workarounds
60 * for this chip. Evidently control and bulk lists
61 * can get confused. (B&W G3 models, and ...)
62 */
63 else if (pdev->vendor == PCI_VENDOR_ID_OPTI
64 && pdev->device == 0xc861) {
65 ohci_dbg (ohci,
66 "WARNING: OPTi workarounds unavailable\n");
67 }
68 162
69 /* Check for NSC87560. We have to look at the bridge (fn1) to 163static int __devinit ohci_pci_start (struct usb_hcd *hcd)
70 * identify the USB (fn2). This quirk might apply to more or 164{
71 * even all NSC stuff. 165 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
72 */ 166 int ret;
73 else if (pdev->vendor == PCI_VENDOR_ID_NS) {
74 struct pci_dev *b;
75
76 b = pci_get_slot (pdev->bus,
77 PCI_DEVFN (PCI_SLOT (pdev->devfn), 1));
78 if (b && b->device == PCI_DEVICE_ID_NS_87560_LIO
79 && b->vendor == PCI_VENDOR_ID_NS) {
80 ohci->flags |= OHCI_QUIRK_SUPERIO;
81 ohci_dbg (ohci, "Using NSC SuperIO setup\n");
82 }
83 pci_dev_put(b);
84 }
85 167
86 /* Check for Compaq's ZFMicro chipset, which needs short 168#ifdef CONFIG_PM /* avoid warnings about unused pdev */
87 * delays before control or bulk queues get re-activated 169 if (hcd->self.controller) {
88 * in finish_unlinks() 170 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
89 */
90 else if (pdev->vendor == PCI_VENDOR_ID_COMPAQ
91 && pdev->device == 0xa0f8) {
92 ohci->flags |= OHCI_QUIRK_ZFMICRO;
93 ohci_dbg (ohci,
94 "enabled Compaq ZFMicro chipset quirk\n");
95 }
96 171
97 /* RWC may not be set for add-in PCI cards, since boot 172 /* RWC may not be set for add-in PCI cards, since boot
98 * firmware probably ignored them. This transfers PCI 173 * firmware probably ignored them. This transfers PCI
@@ -101,16 +176,14 @@ ohci_pci_start (struct usb_hcd *hcd)
101 if (device_may_wakeup(&pdev->dev)) 176 if (device_may_wakeup(&pdev->dev))
102 ohci->hc_control |= OHCI_CTRL_RWC; 177 ohci->hc_control |= OHCI_CTRL_RWC;
103 } 178 }
179#endif /* CONFIG_PM */
104 180
105 /* NOTE: there may have already been a first reset, to 181 ret = ohci_run (ohci);
106 * keep bios/smm irqs from making trouble 182 if (ret < 0) {
107 */
108 if ((ret = ohci_run (ohci)) < 0) {
109 ohci_err (ohci, "can't start\n"); 183 ohci_err (ohci, "can't start\n");
110 ohci_stop (hcd); 184 ohci_stop (hcd);
111 return ret;
112 } 185 }
113 return 0; 186 return ret;
114} 187}
115 188
116#ifdef CONFIG_PM 189#ifdef CONFIG_PM
@@ -238,23 +311,3 @@ static struct pci_driver ohci_pci_driver = {
238 .shutdown = usb_hcd_pci_shutdown, 311 .shutdown = usb_hcd_pci_shutdown,
239}; 312};
240 313
241
242static int __init ohci_hcd_pci_init (void)
243{
244 printk (KERN_DEBUG "%s: " DRIVER_INFO " (PCI)\n", hcd_name);
245 if (usb_disabled())
246 return -ENODEV;
247
248 pr_debug ("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
249 sizeof (struct ed), sizeof (struct td));
250 return pci_register_driver (&ohci_pci_driver);
251}
252module_init (ohci_hcd_pci_init);
253
254/*-------------------------------------------------------------------------*/
255
256static void __exit ohci_hcd_pci_cleanup (void)
257{
258 pci_unregister_driver (&ohci_pci_driver);
259}
260module_exit (ohci_hcd_pci_cleanup);
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 3a8cbfb69054..893b172384da 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -465,15 +465,3 @@ static struct platform_driver usb_hcd_pnx4008_driver = {
465 .remove = usb_hcd_pnx4008_remove, 465 .remove = usb_hcd_pnx4008_remove,
466}; 466};
467 467
468static int __init usb_hcd_pnx4008_init(void)
469{
470 return platform_driver_register(&usb_hcd_pnx4008_driver);
471}
472
473static void __exit usb_hcd_pnx4008_cleanup(void)
474{
475 return platform_driver_unregister(&usb_hcd_pnx4008_driver);
476}
477
478module_init(usb_hcd_pnx4008_init);
479module_exit(usb_hcd_pnx4008_cleanup);
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c
index 6922b91b1704..de45eb0051a7 100644
--- a/drivers/usb/host/ohci-pnx8550.c
+++ b/drivers/usb/host/ohci-pnx8550.c
@@ -240,19 +240,3 @@ static struct platform_driver ohci_hcd_pnx8550_driver = {
240 .remove = ohci_hcd_pnx8550_drv_remove, 240 .remove = ohci_hcd_pnx8550_drv_remove,
241}; 241};
242 242
243static int __init ohci_hcd_pnx8550_init (void)
244{
245 pr_debug (DRIVER_INFO " (pnx8550)");
246 pr_debug ("block sizes: ed %d td %d\n",
247 sizeof (struct ed), sizeof (struct td));
248
249 return platform_driver_register(&ohci_hcd_pnx8550_driver);
250}
251
252static void __exit ohci_hcd_pnx8550_cleanup (void)
253{
254 platform_driver_unregister(&ohci_hcd_pnx8550_driver);
255}
256
257module_init (ohci_hcd_pnx8550_init);
258module_exit (ohci_hcd_pnx8550_cleanup);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
new file mode 100644
index 000000000000..08e237c7bc43
--- /dev/null
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -0,0 +1,232 @@
1/*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 * (C) Copyright 2002 Hewlett-Packard Company
7 * (C) Copyright 2006 Sylvain Munaut <tnt@246tNt.com>
8 *
9 * Bus glue for OHCI HC on the of_platform bus
10 *
11 * Modified for of_platform bus from ohci-sa1111.c
12 *
13 * This file is licenced under the GPL.
14 */
15
16#include <linux/signal.h>
17
18#include <asm/of_platform.h>
19#include <asm/prom.h>
20
21
22static int __devinit
23ohci_ppc_of_start(struct usb_hcd *hcd)
24{
25 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
26 int ret;
27
28 if ((ret = ohci_init(ohci)) < 0)
29 return ret;
30
31 if ((ret = ohci_run(ohci)) < 0) {
32 err("can't start %s", ohci_to_hcd(ohci)->self.bus_name);
33 ohci_stop(hcd);
34 return ret;
35 }
36
37 return 0;
38}
39
40static const struct hc_driver ohci_ppc_of_hc_driver = {
41 .description = hcd_name,
42 .product_desc = "OF OHCI",
43 .hcd_priv_size = sizeof(struct ohci_hcd),
44
45 /*
46 * generic hardware linkage
47 */
48 .irq = ohci_irq,
49 .flags = HCD_USB11 | HCD_MEMORY,
50
51 /*
52 * basic lifecycle operations
53 */
54 .start = ohci_ppc_of_start,
55 .stop = ohci_stop,
56 .shutdown = ohci_shutdown,
57
58 /*
59 * managing i/o requests and associated device resources
60 */
61 .urb_enqueue = ohci_urb_enqueue,
62 .urb_dequeue = ohci_urb_dequeue,
63 .endpoint_disable = ohci_endpoint_disable,
64
65 /*
66 * scheduling support
67 */
68 .get_frame_number = ohci_get_frame,
69
70 /*
71 * root hub support
72 */
73 .hub_status_data = ohci_hub_status_data,
74 .hub_control = ohci_hub_control,
75 .hub_irq_enable = ohci_rhsc_enable,
76#ifdef CONFIG_PM
77 .bus_suspend = ohci_bus_suspend,
78 .bus_resume = ohci_bus_resume,
79#endif
80 .start_port_reset = ohci_start_port_reset,
81};
82
83
84static int __devinit
85ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
86{
87 struct device_node *dn = op->node;
88 struct usb_hcd *hcd;
89 struct ohci_hcd *ohci;
90 struct resource res;
91 int irq;
92
93 int rv;
94 int is_bigendian;
95
96 if (usb_disabled())
97 return -ENODEV;
98
99 is_bigendian =
100 device_is_compatible(dn, "ohci-bigendian") ||
101 device_is_compatible(dn, "ohci-be");
102
103 dev_dbg(&op->dev, "initializing PPC-OF USB Controller\n");
104
105 rv = of_address_to_resource(dn, 0, &res);
106 if (rv)
107 return rv;
108
109 hcd = usb_create_hcd(&ohci_ppc_of_hc_driver, &op->dev, "PPC-OF USB");
110 if (!hcd)
111 return -ENOMEM;
112
113 hcd->rsrc_start = res.start;
114 hcd->rsrc_len = res.end - res.start + 1;
115
116 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
117 printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
118 rv = -EBUSY;
119 goto err_rmr;
120 }
121
122 irq = irq_of_parse_and_map(dn, 0);
123 if (irq == NO_IRQ) {
124 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
125 rv = -EBUSY;
126 goto err_irq;
127 }
128
129 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
130 if (!hcd->regs) {
131 printk(KERN_ERR __FILE__ ": ioremap failed\n");
132 rv = -ENOMEM;
133 goto err_ioremap;
134 }
135
136 ohci = hcd_to_ohci(hcd);
137 if (is_bigendian)
138 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
139
140 ohci_hcd_init(ohci);
141
142 rv = usb_add_hcd(hcd, irq, 0);
143 if (rv == 0)
144 return 0;
145
146 iounmap(hcd->regs);
147err_ioremap:
148 irq_dispose_mapping(irq);
149err_irq:
150 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
151err_rmr:
152 usb_put_hcd(hcd);
153
154 return rv;
155}
156
157static int ohci_hcd_ppc_of_remove(struct of_device *op)
158{
159 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
160 dev_set_drvdata(&op->dev, NULL);
161
162 dev_dbg(&op->dev, "stopping PPC-OF USB Controller\n");
163
164 usb_remove_hcd(hcd);
165
166 iounmap(hcd->regs);
167 irq_dispose_mapping(hcd->irq);
168 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
169
170 usb_put_hcd(hcd);
171
172 return 0;
173}
174
175static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
176{
177 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
178
179 if (hcd->driver->shutdown)
180 hcd->driver->shutdown(hcd);
181
182 return 0;
183}
184
185
186static struct of_device_id ohci_hcd_ppc_of_match[] = {
187#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
188 {
189 .name = "usb",
190 .compatible = "ohci-bigendian",
191 },
192 {
193 .name = "usb",
194 .compatible = "ohci-be",
195 },
196#endif
197#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_LE
198 {
199 .name = "usb",
200 .compatible = "ohci-littledian",
201 },
202 {
203 .name = "usb",
204 .compatible = "ohci-le",
205 },
206#endif
207 {},
208};
209MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
210
211#if !defined(CONFIG_USB_OHCI_HCD_PPC_OF_BE) && \
212 !defined(CONFIG_USB_OHCI_HCD_PPC_OF_LE)
213#error "No endianess selected for ppc-of-ohci"
214#endif
215
216
217static struct of_platform_driver ohci_hcd_ppc_of_driver = {
218 .name = "ppc-of-ohci",
219 .match_table = ohci_hcd_ppc_of_match,
220 .probe = ohci_hcd_ppc_of_probe,
221 .remove = ohci_hcd_ppc_of_remove,
222 .shutdown = ohci_hcd_ppc_of_shutdown,
223#ifdef CONFIG_PM
224 /*.suspend = ohci_hcd_ppc_soc_drv_suspend,*/
225 /*.resume = ohci_hcd_ppc_soc_drv_resume,*/
226#endif
227 .driver = {
228 .name = "ppc-of-ohci",
229 .owner = THIS_MODULE,
230 },
231};
232
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index e1a7eb817313..1a2e1777ca61 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -72,7 +72,7 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
72 } 72 }
73 73
74 ohci = hcd_to_ohci(hcd); 74 ohci = hcd_to_ohci(hcd);
75 ohci->flags |= OHCI_BIG_ENDIAN; 75 ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC;
76 ohci_hcd_init(ohci); 76 ohci_hcd_init(ohci);
77 77
78 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED); 78 retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
@@ -208,19 +208,3 @@ static struct platform_driver ohci_hcd_ppc_soc_driver = {
208 }, 208 },
209}; 209};
210 210
211static int __init ohci_hcd_ppc_soc_init(void)
212{
213 pr_debug(DRIVER_INFO " (PPC SOC)\n");
214 pr_debug("block sizes: ed %d td %d\n", sizeof(struct ed),
215 sizeof(struct td));
216
217 return platform_driver_register(&ohci_hcd_ppc_soc_driver);
218}
219
220static void __exit ohci_hcd_ppc_soc_cleanup(void)
221{
222 platform_driver_unregister(&ohci_hcd_ppc_soc_driver);
223}
224
225module_init(ohci_hcd_ppc_soc_init);
226module_exit(ohci_hcd_ppc_soc_cleanup);
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
new file mode 100644
index 000000000000..62283a3926de
--- /dev/null
+++ b/drivers/usb/host/ohci-ps3.c
@@ -0,0 +1,196 @@
1/*
2 * PS3 OHCI Host Controller driver
3 *
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <asm/ps3.h>
22
23static int ps3_ohci_hc_reset(struct usb_hcd *hcd)
24{
25 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
26
27 ohci->flags |= OHCI_QUIRK_BE_MMIO;
28 ohci_hcd_init(ohci);
29 return ohci_init(ohci);
30}
31
32static int __devinit ps3_ohci_hc_start(struct usb_hcd *hcd)
33{
34 int result;
35 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
36
37 /* Handle root hub init quirk in spider south bridge. */
38 /* Also set PwrOn2PwrGood to 0x7f (254ms). */
39
40 ohci_writel(ohci, 0x7f000000 | RH_A_PSM | RH_A_OCPM,
41 &ohci->regs->roothub.a);
42 ohci_writel(ohci, 0x00060000, &ohci->regs->roothub.b);
43
44 result = ohci_run(ohci);
45
46 if (result < 0) {
47 err("can't start %s", hcd->self.bus_name);
48 ohci_stop(hcd);
49 }
50
51 return result;
52}
53
54static const struct hc_driver ps3_ohci_hc_driver = {
55 .description = hcd_name,
56 .product_desc = "PS3 OHCI Host Controller",
57 .hcd_priv_size = sizeof(struct ohci_hcd),
58 .irq = ohci_irq,
59 .flags = HCD_MEMORY | HCD_USB11,
60 .reset = ps3_ohci_hc_reset,
61 .start = ps3_ohci_hc_start,
62 .stop = ohci_stop,
63 .shutdown = ohci_shutdown,
64 .urb_enqueue = ohci_urb_enqueue,
65 .urb_dequeue = ohci_urb_dequeue,
66 .endpoint_disable = ohci_endpoint_disable,
67 .get_frame_number = ohci_get_frame,
68 .hub_status_data = ohci_hub_status_data,
69 .hub_control = ohci_hub_control,
70 .hub_irq_enable = ohci_rhsc_enable,
71 .start_port_reset = ohci_start_port_reset,
72#if defined(CONFIG_PM)
73 .bus_suspend = ohci_bus_suspend,
74 .bus_resume = ohci_bus_resume,
75#endif
76};
77
78/* redefine dev_dbg to do a syntax check */
79
80#if !defined(DEBUG)
81#undef dev_dbg
82static inline int __attribute__ ((format (printf, 2, 3))) dev_dbg(
83 const struct device *_dev, const char *fmt, ...) {return 0;}
84#endif
85
86static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev)
87{
88 int result;
89 struct usb_hcd *hcd;
90 unsigned int virq;
91 static u64 dummy_mask = DMA_32BIT_MASK;
92
93 if (usb_disabled()) {
94 result = -ENODEV;
95 goto fail_start;
96 }
97
98 result = ps3_mmio_region_create(dev->m_region);
99
100 if (result) {
101 dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n",
102 __func__, __LINE__);
103 result = -EPERM;
104 goto fail_mmio;
105 }
106
107 dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__,
108 __LINE__, dev->m_region->lpar_addr);
109
110 result = ps3_alloc_io_irq(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq);
111
112 if (result) {
113 dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n",
114 __func__, __LINE__, virq);
115 result = -EPERM;
116 goto fail_irq;
117 }
118
119 dev->core.power.power_state = PMSG_ON;
120 dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */
121
122 hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev->core.bus_id);
123
124 if (!hcd) {
125 dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__,
126 __LINE__);
127 result = -ENOMEM;
128 goto fail_create_hcd;
129 }
130
131 hcd->rsrc_start = dev->m_region->lpar_addr;
132 hcd->rsrc_len = dev->m_region->len;
133 hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len);
134
135 if (!hcd->regs) {
136 dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__,
137 __LINE__);
138 result = -EPERM;
139 goto fail_ioremap;
140 }
141
142 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__,
143 (unsigned long)hcd->rsrc_start);
144 dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__,
145 (unsigned long)hcd->rsrc_len);
146 dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__,
147 (unsigned long)hcd->regs);
148 dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__,
149 (unsigned long)virq);
150
151 ps3_system_bus_set_driver_data(dev, hcd);
152
153 result = usb_add_hcd(hcd, virq, IRQF_DISABLED);
154
155 if (result) {
156 dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n",
157 __func__, __LINE__, result);
158 goto fail_add_hcd;
159 }
160
161 return result;
162
163fail_add_hcd:
164 iounmap(hcd->regs);
165fail_ioremap:
166 usb_put_hcd(hcd);
167fail_create_hcd:
168 ps3_free_io_irq(virq);
169fail_irq:
170 ps3_free_mmio_region(dev->m_region);
171fail_mmio:
172fail_start:
173 return result;
174}
175
176static int ps3_ohci_sb_remove (struct ps3_system_bus_device *dev)
177{
178 struct usb_hcd *hcd =
179 (struct usb_hcd *)ps3_system_bus_get_driver_data(dev);
180
181 usb_put_hcd(hcd);
182 ps3_system_bus_set_driver_data(dev, NULL);
183
184 return 0;
185}
186
187MODULE_ALIAS("ps3-ohci");
188
189static struct ps3_system_bus_driver ps3_ohci_sb_driver = {
190 .match_id = PS3_MATCH_ID_OHCI,
191 .core = {
192 .name = "ps3-ohci-driver",
193 },
194 .probe = ps3_ohci_sb_probe,
195 .remove = ps3_ohci_sb_remove,
196};
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 3bbea844a9e3..f1563dc319d3 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -369,19 +369,3 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
369 }, 369 },
370}; 370};
371 371
372static int __init ohci_hcd_pxa27x_init (void)
373{
374 pr_debug (DRIVER_INFO " (pxa27x)");
375 pr_debug ("block sizes: ed %d td %d\n",
376 sizeof (struct ed), sizeof (struct td));
377
378 return platform_driver_register(&ohci_hcd_pxa27x_driver);
379}
380
381static void __exit ohci_hcd_pxa27x_cleanup (void)
382{
383 platform_driver_unregister(&ohci_hcd_pxa27x_driver);
384}
385
386module_init (ohci_hcd_pxa27x_init);
387module_exit (ohci_hcd_pxa27x_cleanup);
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index b350d45033e7..6829814b7aaf 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -501,15 +501,3 @@ static struct platform_driver ohci_hcd_s3c2410_driver = {
501 }, 501 },
502}; 502};
503 503
504static int __init ohci_hcd_s3c2410_init (void)
505{
506 return platform_driver_register(&ohci_hcd_s3c2410_driver);
507}
508
509static void __exit ohci_hcd_s3c2410_cleanup (void)
510{
511 platform_driver_unregister(&ohci_hcd_s3c2410_driver);
512}
513
514module_init (ohci_hcd_s3c2410_init);
515module_exit (ohci_hcd_s3c2410_cleanup);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index fe0090e33675..0f48f2d99226 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -269,19 +269,3 @@ static struct sa1111_driver ohci_hcd_sa1111_driver = {
269 .remove = ohci_hcd_sa1111_drv_remove, 269 .remove = ohci_hcd_sa1111_drv_remove,
270}; 270};
271 271
272static int __init ohci_hcd_sa1111_init (void)
273{
274 dbg (DRIVER_INFO " (SA-1111)");
275 dbg ("block sizes: ed %d td %d",
276 sizeof (struct ed), sizeof (struct td));
277
278 return sa1111_driver_register(&ohci_hcd_sa1111_driver);
279}
280
281static void __exit ohci_hcd_sa1111_cleanup (void)
282{
283 sa1111_driver_unregister(&ohci_hcd_sa1111_driver);
284}
285
286module_init (ohci_hcd_sa1111_init);
287module_exit (ohci_hcd_sa1111_cleanup);
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 405257f3e853..c2b5ecfe5e9f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -394,8 +394,9 @@ struct ohci_hcd {
394#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */ 394#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */
395#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */ 395#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */
396#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */ 396#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */
397#define OHCI_BIG_ENDIAN 0x08 /* big endian HC */ 397#define OHCI_QUIRK_BE_DESC 0x08 /* BE descriptors */
398#define OHCI_QUIRK_ZFMICRO 0x10 /* Compaq ZFMicro chipset*/ 398#define OHCI_QUIRK_BE_MMIO 0x10 /* BE registers */
399#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
399 // there are also chip quirks/bugs in init logic 400 // there are also chip quirks/bugs in init logic
400 401
401}; 402};
@@ -439,117 +440,164 @@ static inline struct usb_hcd *ohci_to_hcd (const struct ohci_hcd *ohci)
439 * a minority (notably the IBM STB04XXX and the Motorola MPC5200 440 * a minority (notably the IBM STB04XXX and the Motorola MPC5200
440 * processors) implement them in big endian format. 441 * processors) implement them in big endian format.
441 * 442 *
443 * In addition some more exotic implementations like the Toshiba
444 * Spider (aka SCC) cell southbridge are "mixed" endian, that is,
445 * they have a different endianness for registers vs. in-memory
446 * descriptors.
447 *
442 * This attempts to support either format at compile time without a 448 * This attempts to support either format at compile time without a
443 * runtime penalty, or both formats with the additional overhead 449 * runtime penalty, or both formats with the additional overhead
444 * of checking a flag bit. 450 * of checking a flag bit.
451 *
452 * That leads to some tricky Kconfig rules howevber. There are
453 * different defaults based on some arch/ppc platforms, though
454 * the basic rules are:
455 *
456 * Controller type Kconfig options needed
457 * --------------- ----------------------
458 * little endian CONFIG_USB_OHCI_LITTLE_ENDIAN
459 *
460 * fully big endian CONFIG_USB_OHCI_BIG_ENDIAN_DESC _and_
461 * CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
462 *
463 * mixed endian CONFIG_USB_OHCI_LITTLE_ENDIAN _and_
464 * CONFIG_USB_OHCI_BIG_ENDIAN_{MMIO,DESC}
465 *
466 * (If you have a mixed endian controller, you -must- also define
467 * CONFIG_USB_OHCI_LITTLE_ENDIAN or things will not work when building
468 * both your mixed endian and a fully big endian controller support in
469 * the same kernel image).
445 */ 470 */
446 471
447#ifdef CONFIG_USB_OHCI_BIG_ENDIAN 472#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_DESC
473#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
474#define big_endian_desc(ohci) (ohci->flags & OHCI_QUIRK_BE_DESC)
475#else
476#define big_endian_desc(ohci) 1 /* only big endian */
477#endif
478#else
479#define big_endian_desc(ohci) 0 /* only little endian */
480#endif
448 481
482#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
449#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN 483#ifdef CONFIG_USB_OHCI_LITTLE_ENDIAN
450#define big_endian(ohci) (ohci->flags & OHCI_BIG_ENDIAN) /* either */ 484#define big_endian_mmio(ohci) (ohci->flags & OHCI_QUIRK_BE_MMIO)
451#else 485#else
452#define big_endian(ohci) 1 /* only big endian */ 486#define big_endian_mmio(ohci) 1 /* only big endian */
487#endif
488#else
489#define big_endian_mmio(ohci) 0 /* only little endian */
453#endif 490#endif
454 491
455/* 492/*
456 * Big-endian read/write functions are arch-specific. 493 * Big-endian read/write functions are arch-specific.
457 * Other arches can be added if/when they're needed. 494 * Other arches can be added if/when they're needed.
495 *
496 * REVISIT: arch/powerpc now has readl/writel_be, so the
497 * definition below can die once the STB04xxx support is
498 * finally ported over.
458 */ 499 */
459#if defined(CONFIG_PPC) 500#if defined(CONFIG_PPC) && !defined(CONFIG_PPC_MERGE)
460#define readl_be(addr) in_be32((__force unsigned *)addr) 501#define readl_be(addr) in_be32((__force unsigned *)addr)
461#define writel_be(val, addr) out_be32((__force unsigned *)addr, val) 502#define writel_be(val, addr) out_be32((__force unsigned *)addr, val)
462#endif 503#endif
463 504
464static inline unsigned int ohci_readl (const struct ohci_hcd *ohci, 505static inline unsigned int _ohci_readl (const struct ohci_hcd *ohci,
465 __hc32 __iomem * regs) 506 __hc32 __iomem * regs)
466{ 507{
467 return big_endian(ohci) ? readl_be (regs) : readl ((__force u32 *)regs); 508#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
509 return big_endian_mmio(ohci) ?
510 readl_be (regs) :
511 readl (regs);
512#else
513 return readl (regs);
514#endif
468} 515}
469 516
470static inline void ohci_writel (const struct ohci_hcd *ohci, 517static inline void _ohci_writel (const struct ohci_hcd *ohci,
471 const unsigned int val, __hc32 __iomem *regs) 518 const unsigned int val, __hc32 __iomem *regs)
472{ 519{
473 big_endian(ohci) ? writel_be (val, regs) : 520#ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO
474 writel (val, (__force u32 *)regs); 521 big_endian_mmio(ohci) ?
522 writel_be (val, regs) :
523 writel (val, regs);
524#else
525 writel (val, regs);
526#endif
475} 527}
476 528
477#else /* !CONFIG_USB_OHCI_BIG_ENDIAN */
478
479#define big_endian(ohci) 0 /* only little endian */
480
481#ifdef CONFIG_ARCH_LH7A404 529#ifdef CONFIG_ARCH_LH7A404
482 /* Marc Singer: at the time this code was written, the LH7A404 530/* Marc Singer: at the time this code was written, the LH7A404
483 * had a problem reading the USB host registers. This 531 * had a problem reading the USB host registers. This
484 * implementation of the ohci_readl function performs the read 532 * implementation of the ohci_readl function performs the read
485 * twice as a work-around. 533 * twice as a work-around.
486 */ 534 */
487static inline unsigned int 535#define ohci_readl(o,r) (_ohci_readl(o,r),_ohci_readl(o,r))
488ohci_readl (const struct ohci_hcd *ohci, const __hc32 *regs) 536#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
489{
490 *(volatile __force unsigned int*) regs;
491 return *(volatile __force unsigned int*) regs;
492}
493#else 537#else
494 /* Standard version of ohci_readl uses standard, platform 538#define ohci_readl(o,r) _ohci_readl(o,r)
495 * specific implementation. */ 539#define ohci_writel(o,v,r) _ohci_writel(o,v,r)
496static inline unsigned int
497ohci_readl (const struct ohci_hcd *ohci, __hc32 __iomem * regs)
498{
499 return readl(regs);
500}
501#endif 540#endif
502 541
503static inline void ohci_writel (const struct ohci_hcd *ohci,
504 const unsigned int val, __hc32 __iomem *regs)
505{
506 writel (val, regs);
507}
508
509#endif /* !CONFIG_USB_OHCI_BIG_ENDIAN */
510 542
511/*-------------------------------------------------------------------------*/ 543/*-------------------------------------------------------------------------*/
512 544
513/* cpu to ohci */ 545/* cpu to ohci */
514static inline __hc16 cpu_to_hc16 (const struct ohci_hcd *ohci, const u16 x) 546static inline __hc16 cpu_to_hc16 (const struct ohci_hcd *ohci, const u16 x)
515{ 547{
516 return big_endian(ohci) ? (__force __hc16)cpu_to_be16(x) : (__force __hc16)cpu_to_le16(x); 548 return big_endian_desc(ohci) ?
549 (__force __hc16)cpu_to_be16(x) :
550 (__force __hc16)cpu_to_le16(x);
517} 551}
518 552
519static inline __hc16 cpu_to_hc16p (const struct ohci_hcd *ohci, const u16 *x) 553static inline __hc16 cpu_to_hc16p (const struct ohci_hcd *ohci, const u16 *x)
520{ 554{
521 return big_endian(ohci) ? cpu_to_be16p(x) : cpu_to_le16p(x); 555 return big_endian_desc(ohci) ?
556 cpu_to_be16p(x) :
557 cpu_to_le16p(x);
522} 558}
523 559
524static inline __hc32 cpu_to_hc32 (const struct ohci_hcd *ohci, const u32 x) 560static inline __hc32 cpu_to_hc32 (const struct ohci_hcd *ohci, const u32 x)
525{ 561{
526 return big_endian(ohci) ? (__force __hc32)cpu_to_be32(x) : (__force __hc32)cpu_to_le32(x); 562 return big_endian_desc(ohci) ?
563 (__force __hc32)cpu_to_be32(x) :
564 (__force __hc32)cpu_to_le32(x);
527} 565}
528 566
529static inline __hc32 cpu_to_hc32p (const struct ohci_hcd *ohci, const u32 *x) 567static inline __hc32 cpu_to_hc32p (const struct ohci_hcd *ohci, const u32 *x)
530{ 568{
531 return big_endian(ohci) ? cpu_to_be32p(x) : cpu_to_le32p(x); 569 return big_endian_desc(ohci) ?
570 cpu_to_be32p(x) :
571 cpu_to_le32p(x);
532} 572}
533 573
534/* ohci to cpu */ 574/* ohci to cpu */
535static inline u16 hc16_to_cpu (const struct ohci_hcd *ohci, const __hc16 x) 575static inline u16 hc16_to_cpu (const struct ohci_hcd *ohci, const __hc16 x)
536{ 576{
537 return big_endian(ohci) ? be16_to_cpu((__force __be16)x) : le16_to_cpu((__force __le16)x); 577 return big_endian_desc(ohci) ?
578 be16_to_cpu((__force __be16)x) :
579 le16_to_cpu((__force __le16)x);
538} 580}
539 581
540static inline u16 hc16_to_cpup (const struct ohci_hcd *ohci, const __hc16 *x) 582static inline u16 hc16_to_cpup (const struct ohci_hcd *ohci, const __hc16 *x)
541{ 583{
542 return big_endian(ohci) ? be16_to_cpup((__force __be16 *)x) : le16_to_cpup((__force __le16 *)x); 584 return big_endian_desc(ohci) ?
585 be16_to_cpup((__force __be16 *)x) :
586 le16_to_cpup((__force __le16 *)x);
543} 587}
544 588
545static inline u32 hc32_to_cpu (const struct ohci_hcd *ohci, const __hc32 x) 589static inline u32 hc32_to_cpu (const struct ohci_hcd *ohci, const __hc32 x)
546{ 590{
547 return big_endian(ohci) ? be32_to_cpu((__force __be32)x) : le32_to_cpu((__force __le32)x); 591 return big_endian_desc(ohci) ?
592 be32_to_cpu((__force __be32)x) :
593 le32_to_cpu((__force __le32)x);
548} 594}
549 595
550static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x) 596static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
551{ 597{
552 return big_endian(ohci) ? be32_to_cpup((__force __be32 *)x) : le32_to_cpup((__force __le32 *)x); 598 return big_endian_desc(ohci) ?
599 be32_to_cpup((__force __be32 *)x) :
600 le32_to_cpup((__force __le32 *)x);
553} 601}
554 602
555/*-------------------------------------------------------------------------*/ 603/*-------------------------------------------------------------------------*/
@@ -557,6 +605,9 @@ static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
557/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all 605/* HCCA frame number is 16 bits, but is accessed as 32 bits since not all
558 * hardware handles 16 bit reads. That creates a different confusion on 606 * hardware handles 16 bit reads. That creates a different confusion on
559 * some big-endian SOC implementations. Same thing happens with PSW access. 607 * some big-endian SOC implementations. Same thing happens with PSW access.
608 *
609 * FIXME: Deal with that as a runtime quirk when STB03xxx is ported over
610 * to arch/powerpc
560 */ 611 */
561 612
562#ifdef CONFIG_STB03xxx 613#ifdef CONFIG_STB03xxx
@@ -568,7 +619,7 @@ static inline u32 hc32_to_cpup (const struct ohci_hcd *ohci, const __hc32 *x)
568static inline u16 ohci_frame_no(const struct ohci_hcd *ohci) 619static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
569{ 620{
570 u32 tmp; 621 u32 tmp;
571 if (big_endian(ohci)) { 622 if (big_endian_desc(ohci)) {
572 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no); 623 tmp = be32_to_cpup((__force __be32 *)&ohci->hcca->frame_no);
573 tmp >>= OHCI_BE_FRAME_NO_SHIFT; 624 tmp >>= OHCI_BE_FRAME_NO_SHIFT;
574 } else 625 } else
@@ -580,7 +631,7 @@ static inline u16 ohci_frame_no(const struct ohci_hcd *ohci)
580static inline __hc16 *ohci_hwPSWp(const struct ohci_hcd *ohci, 631static inline __hc16 *ohci_hwPSWp(const struct ohci_hcd *ohci,
581 const struct td *td, int index) 632 const struct td *td, int index)
582{ 633{
583 return (__hc16 *)(big_endian(ohci) ? 634 return (__hc16 *)(big_endian_desc(ohci) ?
584 &td->hwPSW[index ^ 1] : &td->hwPSW[index]); 635 &td->hwPSW[index ^ 1] : &td->hwPSW[index]);
585} 636}
586 637
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index e345f15b7d87..5d6c06bc4524 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -168,9 +168,13 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
168 space, "", qh, qtype, 168 space, "", qh, qtype,
169 le32_to_cpu(qh->link), le32_to_cpu(element)); 169 le32_to_cpu(qh->link), le32_to_cpu(element));
170 if (qh->type == USB_ENDPOINT_XFER_ISOC) 170 if (qh->type == USB_ENDPOINT_XFER_ISOC)
171 out += sprintf(out, "%*s period %d frame %x desc [%p]\n", 171 out += sprintf(out, "%*s period %d phase %d load %d us, "
172 space, "", qh->period, qh->iso_frame, 172 "frame %x desc [%p]\n",
173 qh->iso_packet_desc); 173 space, "", qh->period, qh->phase, qh->load,
174 qh->iso_frame, qh->iso_packet_desc);
175 else if (qh->type == USB_ENDPOINT_XFER_INT)
176 out += sprintf(out, "%*s period %d phase %d load %d us\n",
177 space, "", qh->period, qh->phase, qh->load);
174 178
175 if (element & UHCI_PTR_QH) 179 if (element & UHCI_PTR_QH)
176 out += sprintf(out, "%*s Element points to QH (bug?)\n", space, ""); 180 out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
@@ -208,7 +212,7 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
208 space, "", nurbs); 212 space, "", nurbs);
209 } 213 }
210 214
211 if (qh->udev) { 215 if (qh->dummy_td) {
212 out += sprintf(out, "%*s Dummy TD\n", space, ""); 216 out += sprintf(out, "%*s Dummy TD\n", space, "");
213 out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0); 217 out += uhci_show_td(qh->dummy_td, out, len - (out - buf), 0);
214 } 218 }
@@ -347,31 +351,80 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
347 struct uhci_qh *qh; 351 struct uhci_qh *qh;
348 struct uhci_td *td; 352 struct uhci_td *td;
349 struct list_head *tmp, *head; 353 struct list_head *tmp, *head;
354 int nframes, nerrs;
350 355
351 out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); 356 out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
352 out += sprintf(out, "HC status\n"); 357 out += sprintf(out, "HC status\n");
353 out += uhci_show_status(uhci, out, len - (out - buf)); 358 out += uhci_show_status(uhci, out, len - (out - buf));
359
360 out += sprintf(out, "Periodic load table\n");
361 for (i = 0; i < MAX_PHASE; ++i) {
362 out += sprintf(out, "\t%d", uhci->load[i]);
363 if (i % 8 == 7)
364 *out++ = '\n';
365 }
366 out += sprintf(out, "Total: %d, #INT: %d, #ISO: %d\n",
367 uhci->total_load,
368 uhci_to_hcd(uhci)->self.bandwidth_int_reqs,
369 uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs);
354 if (debug <= 1) 370 if (debug <= 1)
355 return out - buf; 371 return out - buf;
356 372
357 out += sprintf(out, "Frame List\n"); 373 out += sprintf(out, "Frame List\n");
374 nframes = 10;
375 nerrs = 0;
358 for (i = 0; i < UHCI_NUMFRAMES; ++i) { 376 for (i = 0; i < UHCI_NUMFRAMES; ++i) {
377 __le32 link, qh_dma;
378
379 j = 0;
359 td = uhci->frame_cpu[i]; 380 td = uhci->frame_cpu[i];
381 link = uhci->frame[i];
360 if (!td) 382 if (!td)
361 continue; 383 goto check_link;
362 384
363 out += sprintf(out, "- Frame %d\n", i); \ 385 if (nframes > 0) {
364 if (td->dma_handle != (dma_addr_t)uhci->frame[i]) 386 out += sprintf(out, "- Frame %d -> (%08x)\n",
365 out += sprintf(out, " frame list does not match td->dma_handle!\n"); 387 i, le32_to_cpu(link));
388 j = 1;
389 }
366 390
367 head = &td->fl_list; 391 head = &td->fl_list;
368 tmp = head; 392 tmp = head;
369 do { 393 do {
370 td = list_entry(tmp, struct uhci_td, fl_list); 394 td = list_entry(tmp, struct uhci_td, fl_list);
371 tmp = tmp->next; 395 tmp = tmp->next;
372 out += uhci_show_td(td, out, len - (out - buf), 4); 396 if (cpu_to_le32(td->dma_handle) != link) {
397 if (nframes > 0)
398 out += sprintf(out, " link does "
399 "not match list entry!\n");
400 else
401 ++nerrs;
402 }
403 if (nframes > 0)
404 out += uhci_show_td(td, out,
405 len - (out - buf), 4);
406 link = td->link;
373 } while (tmp != head); 407 } while (tmp != head);
408
409check_link:
410 qh_dma = uhci_frame_skel_link(uhci, i);
411 if (link != qh_dma) {
412 if (nframes > 0) {
413 if (!j) {
414 out += sprintf(out,
415 "- Frame %d -> (%08x)\n",
416 i, le32_to_cpu(link));
417 j = 1;
418 }
419 out += sprintf(out, " link does not match "
420 "QH (%08x)!\n", le32_to_cpu(qh_dma));
421 } else
422 ++nerrs;
423 }
424 nframes -= j;
374 } 425 }
426 if (nerrs > 0)
427 out += sprintf(out, "Skipped %d bad links\n", nerrs);
375 428
376 out += sprintf(out, "Skeleton QHs\n"); 429 out += sprintf(out, "Skeleton QHs\n");
377 430
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index e0d4c2358b39..49b9d390b95f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -92,6 +92,34 @@ static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
92static void wakeup_rh(struct uhci_hcd *uhci); 92static void wakeup_rh(struct uhci_hcd *uhci);
93static void uhci_get_current_frame_number(struct uhci_hcd *uhci); 93static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
94 94
95/*
96 * Calculate the link pointer DMA value for the first Skeleton QH in a frame.
97 */
98static __le32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
99{
100 int skelnum;
101
102 /*
103 * The interrupt queues will be interleaved as evenly as possible.
104 * There's not much to be done about period-1 interrupts; they have
105 * to occur in every frame. But we can schedule period-2 interrupts
106 * in odd-numbered frames, period-4 interrupts in frames congruent
107 * to 2 (mod 4), and so on. This way each frame only has two
108 * interrupt QHs, which will help spread out bandwidth utilization.
109 *
110 * ffs (Find First bit Set) does exactly what we need:
111 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8],
112 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc.
113 * ffs >= 7 => not on any high-period queue, so use
114 * skel_int1_qh = skelqh[9].
115 * Add in UHCI_NUMFRAMES to insure at least one bit is set.
116 */
117 skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
118 if (skelnum <= 1)
119 skelnum = 9;
120 return UHCI_PTR_QH | cpu_to_le32(uhci->skelqh[skelnum]->dma_handle);
121}
122
95#include "uhci-debug.c" 123#include "uhci-debug.c"
96#include "uhci-q.c" 124#include "uhci-q.c"
97#include "uhci-hub.c" 125#include "uhci-hub.c"
@@ -631,32 +659,11 @@ static int uhci_start(struct usb_hcd *hcd)
631 /* 659 /*
632 * Fill the frame list: make all entries point to the proper 660 * Fill the frame list: make all entries point to the proper
633 * interrupt queue. 661 * interrupt queue.
634 *
635 * The interrupt queues will be interleaved as evenly as possible.
636 * There's not much to be done about period-1 interrupts; they have
637 * to occur in every frame. But we can schedule period-2 interrupts
638 * in odd-numbered frames, period-4 interrupts in frames congruent
639 * to 2 (mod 4), and so on. This way each frame only has two
640 * interrupt QHs, which will help spread out bandwidth utilization.
641 */ 662 */
642 for (i = 0; i < UHCI_NUMFRAMES; i++) { 663 for (i = 0; i < UHCI_NUMFRAMES; i++) {
643 int irq;
644
645 /*
646 * ffs (Find First bit Set) does exactly what we need:
647 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8],
648 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc.
649 * ffs >= 7 => not on any high-period queue, so use
650 * skel_int1_qh = skelqh[9].
651 * Add UHCI_NUMFRAMES to insure at least one bit is set.
652 */
653 irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES);
654 if (irq <= 1)
655 irq = 9;
656 664
657 /* Only place we don't use the frame list routines */ 665 /* Only place we don't use the frame list routines */
658 uhci->frame[i] = UHCI_PTR_QH | 666 uhci->frame[i] = uhci_frame_skel_link(uhci, i);
659 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
660 } 667 }
661 668
662 /* 669 /*
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 108e3de2dc26..74469b5bcb61 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -83,6 +83,7 @@
83#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ 83#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
84#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames 84#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames
85 * can be scheduled */ 85 * can be scheduled */
86#define MAX_PHASE 32 /* Periodic scheduling length */
86 87
87/* When no queues need Full-Speed Bandwidth Reclamation, 88/* When no queues need Full-Speed Bandwidth Reclamation,
88 * delay this long before turning FSBR off */ 89 * delay this long before turning FSBR off */
@@ -141,6 +142,8 @@ struct uhci_qh {
141 unsigned long advance_jiffies; /* Time of last queue advance */ 142 unsigned long advance_jiffies; /* Time of last queue advance */
142 unsigned int unlink_frame; /* When the QH was unlinked */ 143 unsigned int unlink_frame; /* When the QH was unlinked */
143 unsigned int period; /* For Interrupt and Isochronous QHs */ 144 unsigned int period; /* For Interrupt and Isochronous QHs */
145 short phase; /* Between 0 and period-1 */
146 short load; /* Periodic time requirement, in us */
144 unsigned int iso_frame; /* Frame # for iso_packet_desc */ 147 unsigned int iso_frame; /* Frame # for iso_packet_desc */
145 int iso_status; /* Status for Isochronous URBs */ 148 int iso_status; /* Status for Isochronous URBs */
146 149
@@ -153,6 +156,8 @@ struct uhci_qh {
153 unsigned int needs_fixup:1; /* Must fix the TD toggle values */ 156 unsigned int needs_fixup:1; /* Must fix the TD toggle values */
154 unsigned int is_stopped:1; /* Queue was stopped by error/unlink */ 157 unsigned int is_stopped:1; /* Queue was stopped by error/unlink */
155 unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */ 158 unsigned int wait_expired:1; /* QH_WAIT_TIMEOUT has expired */
159 unsigned int bandwidth_reserved:1; /* Periodic bandwidth has
160 * been allocated */
156} __attribute__((aligned(16))); 161} __attribute__((aligned(16)));
157 162
158/* 163/*
@@ -414,6 +419,9 @@ struct uhci_hcd {
414 419
415 wait_queue_head_t waitqh; /* endpoint_disable waiters */ 420 wait_queue_head_t waitqh; /* endpoint_disable waiters */
416 int num_waiting; /* Number of waiters */ 421 int num_waiting; /* Number of waiters */
422
423 int total_load; /* Sum of array values */
424 short load[MAX_PHASE]; /* Periodic allocations */
417}; 425};
418 426
419/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ 427/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index 30b88459ac7d..2cbb239e63f8 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -248,16 +248,26 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
248 INIT_LIST_HEAD(&qh->node); 248 INIT_LIST_HEAD(&qh->node);
249 249
250 if (udev) { /* Normal QH */ 250 if (udev) { /* Normal QH */
251 qh->dummy_td = uhci_alloc_td(uhci); 251 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
252 if (!qh->dummy_td) { 252 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
253 dma_pool_free(uhci->qh_pool, qh, dma_handle); 253 qh->dummy_td = uhci_alloc_td(uhci);
254 return NULL; 254 if (!qh->dummy_td) {
255 dma_pool_free(uhci->qh_pool, qh, dma_handle);
256 return NULL;
257 }
255 } 258 }
256 qh->state = QH_STATE_IDLE; 259 qh->state = QH_STATE_IDLE;
257 qh->hep = hep; 260 qh->hep = hep;
258 qh->udev = udev; 261 qh->udev = udev;
259 hep->hcpriv = qh; 262 hep->hcpriv = qh;
260 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 263
264 if (qh->type == USB_ENDPOINT_XFER_INT ||
265 qh->type == USB_ENDPOINT_XFER_ISOC)
266 qh->load = usb_calc_bus_time(udev->speed,
267 usb_endpoint_dir_in(&hep->desc),
268 qh->type == USB_ENDPOINT_XFER_ISOC,
269 le16_to_cpu(hep->desc.wMaxPacketSize))
270 / 1000 + 1;
261 271
262 } else { /* Skeleton QH */ 272 } else { /* Skeleton QH */
263 qh->state = QH_STATE_ACTIVE; 273 qh->state = QH_STATE_ACTIVE;
@@ -275,7 +285,8 @@ static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
275 list_del(&qh->node); 285 list_del(&qh->node);
276 if (qh->udev) { 286 if (qh->udev) {
277 qh->hep->hcpriv = NULL; 287 qh->hep->hcpriv = NULL;
278 uhci_free_td(uhci, qh->dummy_td); 288 if (qh->dummy_td)
289 uhci_free_td(uhci, qh->dummy_td);
279 } 290 }
280 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); 291 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
281} 292}
@@ -327,7 +338,7 @@ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
327 goto done; 338 goto done;
328 qh->element = UHCI_PTR_TERM; 339 qh->element = UHCI_PTR_TERM;
329 340
330 /* Control pipes have to worry about toggles */ 341 /* Control pipes don't have to worry about toggles */
331 if (qh->type == USB_ENDPOINT_XFER_CONTROL) 342 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
332 goto done; 343 goto done;
333 344
@@ -493,6 +504,121 @@ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
493 wake_up_all(&uhci->waitqh); 504 wake_up_all(&uhci->waitqh);
494} 505}
495 506
507/*
508 * Find the highest existing bandwidth load for a given phase and period.
509 */
510static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
511{
512 int highest_load = uhci->load[phase];
513
514 for (phase += period; phase < MAX_PHASE; phase += period)
515 highest_load = max_t(int, highest_load, uhci->load[phase]);
516 return highest_load;
517}
518
519/*
520 * Set qh->phase to the optimal phase for a periodic transfer and
521 * check whether the bandwidth requirement is acceptable.
522 */
523static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
524{
525 int minimax_load;
526
527 /* Find the optimal phase (unless it is already set) and get
528 * its load value. */
529 if (qh->phase >= 0)
530 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
531 else {
532 int phase, load;
533 int max_phase = min_t(int, MAX_PHASE, qh->period);
534
535 qh->phase = 0;
536 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
537 for (phase = 1; phase < max_phase; ++phase) {
538 load = uhci_highest_load(uhci, phase, qh->period);
539 if (load < minimax_load) {
540 minimax_load = load;
541 qh->phase = phase;
542 }
543 }
544 }
545
546 /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
547 if (minimax_load + qh->load > 900) {
548 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
549 "period %d, phase %d, %d + %d us\n",
550 qh->period, qh->phase, minimax_load, qh->load);
551 return -ENOSPC;
552 }
553 return 0;
554}
555
556/*
557 * Reserve a periodic QH's bandwidth in the schedule
558 */
559static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
560{
561 int i;
562 int load = qh->load;
563 char *p = "??";
564
565 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
566 uhci->load[i] += load;
567 uhci->total_load += load;
568 }
569 uhci_to_hcd(uhci)->self.bandwidth_allocated =
570 uhci->total_load / MAX_PHASE;
571 switch (qh->type) {
572 case USB_ENDPOINT_XFER_INT:
573 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
574 p = "INT";
575 break;
576 case USB_ENDPOINT_XFER_ISOC:
577 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
578 p = "ISO";
579 break;
580 }
581 qh->bandwidth_reserved = 1;
582 dev_dbg(uhci_dev(uhci),
583 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
584 "reserve", qh->udev->devnum,
585 qh->hep->desc.bEndpointAddress, p,
586 qh->period, qh->phase, load);
587}
588
589/*
590 * Release a periodic QH's bandwidth reservation
591 */
592static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
593{
594 int i;
595 int load = qh->load;
596 char *p = "??";
597
598 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
599 uhci->load[i] -= load;
600 uhci->total_load -= load;
601 }
602 uhci_to_hcd(uhci)->self.bandwidth_allocated =
603 uhci->total_load / MAX_PHASE;
604 switch (qh->type) {
605 case USB_ENDPOINT_XFER_INT:
606 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
607 p = "INT";
608 break;
609 case USB_ENDPOINT_XFER_ISOC:
610 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
611 p = "ISO";
612 break;
613 }
614 qh->bandwidth_reserved = 0;
615 dev_dbg(uhci_dev(uhci),
616 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
617 "release", qh->udev->devnum,
618 qh->hep->desc.bEndpointAddress, p,
619 qh->period, qh->phase, load);
620}
621
496static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, 622static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
497 struct urb *urb) 623 struct urb *urb)
498{ 624{
@@ -796,7 +922,6 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
796 wmb(); 922 wmb();
797 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE); 923 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
798 qh->dummy_td = td; 924 qh->dummy_td = td;
799 qh->period = urb->interval;
800 925
801 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 926 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
802 usb_pipeout(urb->pipe), toggle); 927 usb_pipeout(urb->pipe), toggle);
@@ -827,28 +952,42 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
827static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, 952static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
828 struct uhci_qh *qh) 953 struct uhci_qh *qh)
829{ 954{
830 int exponent; 955 int ret;
831 956
832 /* USB 1.1 interrupt transfers only involve one packet per interval. 957 /* USB 1.1 interrupt transfers only involve one packet per interval.
833 * Drivers can submit URBs of any length, but longer ones will need 958 * Drivers can submit URBs of any length, but longer ones will need
834 * multiple intervals to complete. 959 * multiple intervals to complete.
835 */ 960 */
836 961
837 /* Figure out which power-of-two queue to use */ 962 if (!qh->bandwidth_reserved) {
838 for (exponent = 7; exponent >= 0; --exponent) { 963 int exponent;
839 if ((1 << exponent) <= urb->interval)
840 break;
841 }
842 if (exponent < 0)
843 return -EINVAL;
844 urb->interval = 1 << exponent;
845 964
846 if (qh->period == 0) 965 /* Figure out which power-of-two queue to use */
966 for (exponent = 7; exponent >= 0; --exponent) {
967 if ((1 << exponent) <= urb->interval)
968 break;
969 }
970 if (exponent < 0)
971 return -EINVAL;
972 qh->period = 1 << exponent;
847 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; 973 qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)];
848 else if (qh->period != urb->interval)
849 return -EINVAL; /* Can't change the period */
850 974
851 return uhci_submit_common(uhci, urb, qh); 975 /* For now, interrupt phase is fixed by the layout
976 * of the QH lists. */
977 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
978 ret = uhci_check_bandwidth(uhci, qh);
979 if (ret)
980 return ret;
981 } else if (qh->period > urb->interval)
982 return -EINVAL; /* Can't decrease the period */
983
984 ret = uhci_submit_common(uhci, urb, qh);
985 if (ret == 0) {
986 urb->interval = qh->period;
987 if (!qh->bandwidth_reserved)
988 uhci_reserve_bandwidth(uhci, qh);
989 }
990 return ret;
852} 991}
853 992
854/* 993/*
@@ -995,15 +1134,32 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
995 return -EFBIG; 1134 return -EFBIG;
996 1135
997 /* Check the period and figure out the starting frame number */ 1136 /* Check the period and figure out the starting frame number */
998 if (qh->period == 0) { 1137 if (!qh->bandwidth_reserved) {
1138 qh->period = urb->interval;
999 if (urb->transfer_flags & URB_ISO_ASAP) { 1139 if (urb->transfer_flags & URB_ISO_ASAP) {
1140 qh->phase = -1; /* Find the best phase */
1141 i = uhci_check_bandwidth(uhci, qh);
1142 if (i)
1143 return i;
1144
1145 /* Allow a little time to allocate the TDs */
1000 uhci_get_current_frame_number(uhci); 1146 uhci_get_current_frame_number(uhci);
1001 urb->start_frame = uhci->frame_number + 10; 1147 frame = uhci->frame_number + 10;
1148
1149 /* Move forward to the first frame having the
1150 * correct phase */
1151 urb->start_frame = frame + ((qh->phase - frame) &
1152 (qh->period - 1));
1002 } else { 1153 } else {
1003 i = urb->start_frame - uhci->last_iso_frame; 1154 i = urb->start_frame - uhci->last_iso_frame;
1004 if (i <= 0 || i >= UHCI_NUMFRAMES) 1155 if (i <= 0 || i >= UHCI_NUMFRAMES)
1005 return -EINVAL; 1156 return -EINVAL;
1157 qh->phase = urb->start_frame & (qh->period - 1);
1158 i = uhci_check_bandwidth(uhci, qh);
1159 if (i)
1160 return i;
1006 } 1161 }
1162
1007 } else if (qh->period != urb->interval) { 1163 } else if (qh->period != urb->interval) {
1008 return -EINVAL; /* Can't change the period */ 1164 return -EINVAL; /* Can't change the period */
1009 1165
@@ -1049,9 +1205,6 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1049 /* Set the interrupt-on-completion flag on the last packet. */ 1205 /* Set the interrupt-on-completion flag on the last packet. */
1050 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); 1206 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1051 1207
1052 qh->skel = uhci->skel_iso_qh;
1053 qh->period = urb->interval;
1054
1055 /* Add the TDs to the frame list */ 1208 /* Add the TDs to the frame list */
1056 frame = urb->start_frame; 1209 frame = urb->start_frame;
1057 list_for_each_entry(td, &urbp->td_list, list) { 1210 list_for_each_entry(td, &urbp->td_list, list) {
@@ -1065,6 +1218,9 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1065 qh->iso_status = 0; 1218 qh->iso_status = 0;
1066 } 1219 }
1067 1220
1221 qh->skel = uhci->skel_iso_qh;
1222 if (!qh->bandwidth_reserved)
1223 uhci_reserve_bandwidth(uhci, qh);
1068 return 0; 1224 return 0;
1069} 1225}
1070 1226
@@ -1119,7 +1275,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1119 unsigned long flags; 1275 unsigned long flags;
1120 struct urb_priv *urbp; 1276 struct urb_priv *urbp;
1121 struct uhci_qh *qh; 1277 struct uhci_qh *qh;
1122 int bustime;
1123 1278
1124 spin_lock_irqsave(&uhci->lock, flags); 1279 spin_lock_irqsave(&uhci->lock, flags);
1125 1280
@@ -1149,35 +1304,11 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
1149 ret = uhci_submit_bulk(uhci, urb, qh); 1304 ret = uhci_submit_bulk(uhci, urb, qh);
1150 break; 1305 break;
1151 case USB_ENDPOINT_XFER_INT: 1306 case USB_ENDPOINT_XFER_INT:
1152 if (list_empty(&qh->queue)) { 1307 ret = uhci_submit_interrupt(uhci, urb, qh);
1153 bustime = usb_check_bandwidth(urb->dev, urb);
1154 if (bustime < 0)
1155 ret = bustime;
1156 else {
1157 ret = uhci_submit_interrupt(uhci, urb, qh);
1158 if (ret == 0)
1159 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1160 }
1161 } else { /* inherit from parent */
1162 struct urb_priv *eurbp;
1163
1164 eurbp = list_entry(qh->queue.prev, struct urb_priv,
1165 node);
1166 urb->bandwidth = eurbp->urb->bandwidth;
1167 ret = uhci_submit_interrupt(uhci, urb, qh);
1168 }
1169 break; 1308 break;
1170 case USB_ENDPOINT_XFER_ISOC: 1309 case USB_ENDPOINT_XFER_ISOC:
1171 urb->error_count = 0; 1310 urb->error_count = 0;
1172 bustime = usb_check_bandwidth(urb->dev, urb);
1173 if (bustime < 0) {
1174 ret = bustime;
1175 break;
1176 }
1177
1178 ret = uhci_submit_isochronous(uhci, urb, qh); 1311 ret = uhci_submit_isochronous(uhci, urb, qh);
1179 if (ret == 0)
1180 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1181 break; 1312 break;
1182 } 1313 }
1183 if (ret != 0) 1314 if (ret != 0)
@@ -1274,24 +1405,6 @@ __acquires(uhci->lock)
1274 1405
1275 uhci_free_urb_priv(uhci, urbp); 1406 uhci_free_urb_priv(uhci, urbp);
1276 1407
1277 switch (qh->type) {
1278 case USB_ENDPOINT_XFER_ISOC:
1279 /* Release bandwidth for Interrupt or Isoc. transfers */
1280 if (urb->bandwidth)
1281 usb_release_bandwidth(urb->dev, urb, 1);
1282 break;
1283 case USB_ENDPOINT_XFER_INT:
1284 /* Release bandwidth for Interrupt or Isoc. transfers */
1285 /* Make sure we don't release if we have a queued URB */
1286 if (list_empty(&qh->queue) && urb->bandwidth)
1287 usb_release_bandwidth(urb->dev, urb, 0);
1288 else
1289 /* bandwidth was passed on to queued URB, */
1290 /* so don't let usb_unlink_urb() release it */
1291 urb->bandwidth = 0;
1292 break;
1293 }
1294
1295 spin_unlock(&uhci->lock); 1408 spin_unlock(&uhci->lock);
1296 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb); 1409 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);
1297 spin_lock(&uhci->lock); 1410 spin_lock(&uhci->lock);
@@ -1300,9 +1413,8 @@ __acquires(uhci->lock)
1300 * reserved bandwidth. */ 1413 * reserved bandwidth. */
1301 if (list_empty(&qh->queue)) { 1414 if (list_empty(&qh->queue)) {
1302 uhci_unlink_qh(uhci, qh); 1415 uhci_unlink_qh(uhci, qh);
1303 1416 if (qh->bandwidth_reserved)
1304 /* Bandwidth stuff not yet implemented */ 1417 uhci_release_bandwidth(uhci, qh);
1305 qh->period = 0;
1306 } 1418 }
1307} 1419}
1308 1420
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 63a84bbc310d..d308afd06935 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -565,11 +565,15 @@ static void mdc800_usb_disconnect (struct usb_interface *intf)
565 565
566 usb_deregister_dev(intf, &mdc800_class); 566 usb_deregister_dev(intf, &mdc800_class);
567 567
568 /* must be under lock to make sure no URB
569 is submitted after usb_kill_urb() */
570 mutex_lock(&mdc800->io_lock);
568 mdc800->state=NOT_CONNECTED; 571 mdc800->state=NOT_CONNECTED;
569 572
570 usb_kill_urb(mdc800->irq_urb); 573 usb_kill_urb(mdc800->irq_urb);
571 usb_kill_urb(mdc800->write_urb); 574 usb_kill_urb(mdc800->write_urb);
572 usb_kill_urb(mdc800->download_urb); 575 usb_kill_urb(mdc800->download_urb);
576 mutex_unlock(&mdc800->io_lock);
573 577
574 mdc800->dev = NULL; 578 mdc800->dev = NULL;
575 usb_set_intfdata(intf, NULL); 579 usb_set_intfdata(intf, NULL);
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig
index c7d887540d8d..2e71d3cca198 100644
--- a/drivers/usb/input/Kconfig
+++ b/drivers/usb/input/Kconfig
@@ -69,6 +69,14 @@ config LOGITECH_FF
69 Note: if you say N here, this device will still be supported, but without 69 Note: if you say N here, this device will still be supported, but without
70 force feedback. 70 force feedback.
71 71
72config PANTHERLORD_FF
73 bool "PantherLord USB/PS2 2in1 Adapter support"
74 depends on HID_FF
75 select INPUT_FF_MEMLESS if USB_HID
76 help
77 Say Y here if you have a PantherLord USB/PS2 2in1 Adapter and want
78 to enable force feedback support for it.
79
72config THRUSTMASTER_FF 80config THRUSTMASTER_FF
73 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)" 81 bool "ThrustMaster FireStorm Dual Power 2 support (EXPERIMENTAL)"
74 depends on HID_FF && EXPERIMENTAL 82 depends on HID_FF && EXPERIMENTAL
@@ -344,3 +352,15 @@ config USB_APPLETOUCH
344 352
345 To compile this driver as a module, choose M here: the 353 To compile this driver as a module, choose M here: the
346 module will be called appletouch. 354 module will be called appletouch.
355
356config USB_GTCO
357 tristate "GTCO CalComp/InterWrite USB Support"
358 depends on USB && INPUT
359 ---help---
360 Say Y here if you want to use the USB version of the GTCO
361 CalComp/InterWrite Tablet. Make sure to say Y to "Mouse support"
362 (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
363 (CONFIG_INPUT_EVDEV) as well.
364
365 To compile this driver as a module, choose M here: the
366 module will be called gtco.
diff --git a/drivers/usb/input/Makefile b/drivers/usb/input/Makefile
index 1a24b5bfa05f..a9d206c945e9 100644
--- a/drivers/usb/input/Makefile
+++ b/drivers/usb/input/Makefile
@@ -17,6 +17,9 @@ endif
17ifeq ($(CONFIG_LOGITECH_FF),y) 17ifeq ($(CONFIG_LOGITECH_FF),y)
18 usbhid-objs += hid-lgff.o 18 usbhid-objs += hid-lgff.o
19endif 19endif
20ifeq ($(CONFIG_PANTHERLORD_FF),y)
21 usbhid-objs += hid-plff.o
22endif
20ifeq ($(CONFIG_THRUSTMASTER_FF),y) 23ifeq ($(CONFIG_THRUSTMASTER_FF),y)
21 usbhid-objs += hid-tmff.o 24 usbhid-objs += hid-tmff.o
22endif 25endif
@@ -45,6 +48,7 @@ obj-$(CONFIG_USB_ACECAD) += acecad.o
45obj-$(CONFIG_USB_YEALINK) += yealink.o 48obj-$(CONFIG_USB_YEALINK) += yealink.o
46obj-$(CONFIG_USB_XPAD) += xpad.o 49obj-$(CONFIG_USB_XPAD) += xpad.o
47obj-$(CONFIG_USB_APPLETOUCH) += appletouch.o 50obj-$(CONFIG_USB_APPLETOUCH) += appletouch.o
51obj-$(CONFIG_USB_GTCO) += gtco.o
48 52
49ifeq ($(CONFIG_USB_DEBUG),y) 53ifeq ($(CONFIG_USB_DEBUG),y)
50EXTRA_CFLAGS += -DDEBUG 54EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/usb/input/gtco.c b/drivers/usb/input/gtco.c
new file mode 100644
index 000000000000..203cdc1bbba4
--- /dev/null
+++ b/drivers/usb/input/gtco.c
@@ -0,0 +1,1104 @@
1/* -*- linux-c -*-
2
3GTCO digitizer USB driver
4
5Use the err(), dbg() and info() macros from usb.h for system logging
6
7TO CHECK: Is pressure done right on report 5?
8
9Copyright (C) 2006 GTCO CalComp
10
11This program is free software; you can redistribute it and/or
12modify it under the terms of the GNU General Public License
13as published by the Free Software Foundation; version 2
14of the License.
15
16This program is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License
22along with this program; if not, write to the Free Software
23Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24
25Permission to use, copy, modify, distribute, and sell this software and its
26documentation for any purpose is hereby granted without fee, provided that
27the above copyright notice appear in all copies and that both that
28copyright notice and this permission notice appear in supporting
29documentation, and that the name of GTCO-CalComp not be used in advertising
30or publicity pertaining to distribution of the software without specific,
31written prior permission. GTCO-CalComp makes no representations about the
32suitability of this software for any purpose. It is provided "as is"
33without express or implied warranty.
34
35GTCO-CALCOMP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
36INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
37EVENT SHALL GTCO-CALCOMP BE LIABLE FOR ANY SPECIAL, INDIRECT OR
38CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
39DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
40TORTIOUS ACTIONS, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
41PERFORMANCE OF THIS SOFTWARE.
42
43GTCO CalComp, Inc.
447125 Riverwood Drive
45Columbia, MD 21046
46
47Jeremy Roberson jroberson@gtcocalcomp.com
48Scott Hill shill@gtcocalcomp.com
49*/
50
51
52
53/*#define DEBUG*/
54
55#include <linux/kernel.h>
56#include <linux/module.h>
57#include <linux/errno.h>
58#include <linux/init.h>
59#include <linux/slab.h>
60#include <linux/input.h>
61#include <linux/usb.h>
62#include <asm/uaccess.h>
63#include <asm/unaligned.h>
64#include <asm/byteorder.h>
65
66
67#include <linux/version.h>
68#include <linux/usb/input.h>
69
70/* Version with a Major number of 2 is for kernel inclusion only. */
71#define GTCO_VERSION "2.00.0006"
72
73
74/* MACROS */
75
76#define VENDOR_ID_GTCO 0x078C
77#define PID_400 0x400
78#define PID_401 0x401
79#define PID_1000 0x1000
80#define PID_1001 0x1001
81#define PID_1002 0x1002
82
83/* Max size of a single report */
84#define REPORT_MAX_SIZE 10
85
86
87/* Bitmask whether pen is in range */
88#define MASK_INRANGE 0x20
89#define MASK_BUTTON 0x01F
90
91#define PATHLENGTH 64
92
93/* DATA STRUCTURES */
94
95/* Device table */
96static struct usb_device_id gtco_usbid_table [] = {
97 { USB_DEVICE(VENDOR_ID_GTCO, PID_400) },
98 { USB_DEVICE(VENDOR_ID_GTCO, PID_401) },
99 { USB_DEVICE(VENDOR_ID_GTCO, PID_1000) },
100 { USB_DEVICE(VENDOR_ID_GTCO, PID_1001) },
101 { USB_DEVICE(VENDOR_ID_GTCO, PID_1002) },
102 { }
103};
104MODULE_DEVICE_TABLE (usb, gtco_usbid_table);
105
106
107/* Structure to hold all of our device specific stuff */
108struct gtco {
109
110 struct input_dev *inputdevice; /* input device struct pointer */
111 struct usb_device *usbdev; /* the usb device for this device */
112 struct urb *urbinfo; /* urb for incoming reports */
113 dma_addr_t buf_dma; /* dma addr of the data buffer*/
114 unsigned char * buffer; /* databuffer for reports */
115
116 char usbpath[PATHLENGTH];
117 int openCount;
118
119 /* Information pulled from Report Descriptor */
120 u32 usage;
121 u32 min_X;
122 u32 max_X;
123 u32 min_Y;
124 u32 max_Y;
125 s8 mintilt_X;
126 s8 maxtilt_X;
127 s8 mintilt_Y;
128 s8 maxtilt_Y;
129 u32 maxpressure;
130 u32 minpressure;
131};
132
133
134
135/* Code for parsing the HID REPORT DESCRIPTOR */
136
137/* From HID1.11 spec */
138struct hid_descriptor
139{
140 struct usb_descriptor_header header;
141 __le16 bcdHID;
142 u8 bCountryCode;
143 u8 bNumDescriptors;
144 u8 bDescriptorType;
145 __le16 wDescriptorLength;
146} __attribute__ ((packed));
147
148
149#define HID_DESCRIPTOR_SIZE 9
150#define HID_DEVICE_TYPE 33
151#define REPORT_DEVICE_TYPE 34
152
153
154#define PREF_TAG(x) ((x)>>4)
155#define PREF_TYPE(x) ((x>>2)&0x03)
156#define PREF_SIZE(x) ((x)&0x03)
157
158#define TYPE_MAIN 0
159#define TYPE_GLOBAL 1
160#define TYPE_LOCAL 2
161#define TYPE_RESERVED 3
162
163#define TAG_MAIN_INPUT 0x8
164#define TAG_MAIN_OUTPUT 0x9
165#define TAG_MAIN_FEATURE 0xB
166#define TAG_MAIN_COL_START 0xA
167#define TAG_MAIN_COL_END 0xC
168
169#define TAG_GLOB_USAGE 0
170#define TAG_GLOB_LOG_MIN 1
171#define TAG_GLOB_LOG_MAX 2
172#define TAG_GLOB_PHYS_MIN 3
173#define TAG_GLOB_PHYS_MAX 4
174#define TAG_GLOB_UNIT_EXP 5
175#define TAG_GLOB_UNIT 6
176#define TAG_GLOB_REPORT_SZ 7
177#define TAG_GLOB_REPORT_ID 8
178#define TAG_GLOB_REPORT_CNT 9
179#define TAG_GLOB_PUSH 10
180#define TAG_GLOB_POP 11
181
182#define TAG_GLOB_MAX 12
183
184#define DIGITIZER_USAGE_TIP_PRESSURE 0x30
185#define DIGITIZER_USAGE_TILT_X 0x3D
186#define DIGITIZER_USAGE_TILT_Y 0x3E
187
188
189/*
190 *
191 * This is an abbreviated parser for the HID Report Descriptor. We
192 * know what devices we are talking to, so this is by no means meant
193 * to be generic. We can make some safe assumptions:
194 *
195 * - We know there are no LONG tags, all short
196 * - We know that we have no MAIN Feature and MAIN Output items
197 * - We know what the IRQ reports are supposed to look like.
198 *
199 * The main purpose of this is to use the HID report desc to figure
200 * out the mins and maxs of the fields in the IRQ reports. The IRQ
201 * reports for 400/401 change slightly if the max X is bigger than 64K.
202 *
203 */
204static void parse_hid_report_descriptor(struct gtco *device, char * report,
205 int length)
206{
207 int x,i=0;
208
209 /* Tag primitive vars */
210 __u8 prefix;
211 __u8 size;
212 __u8 tag;
213 __u8 type;
214 __u8 data = 0;
215 __u16 data16 = 0;
216 __u32 data32 = 0;
217
218
219 /* For parsing logic */
220 int inputnum = 0;
221 __u32 usage = 0;
222
223 /* Global Values, indexed by TAG */
224 __u32 globalval[TAG_GLOB_MAX];
225 __u32 oldval[TAG_GLOB_MAX];
226
227 /* Debug stuff */
228 char maintype='x';
229 char globtype[12];
230 int indent=0;
231 char indentstr[10]="";
232
233
234
235 dbg("======>>>>>>PARSE<<<<<<======");
236
237 /* Walk this report and pull out the info we need */
238 while (i<length){
239 prefix=report[i];
240
241 /* Skip over prefix */
242 i++;
243
244 /* Determine data size and save the data in the proper variable */
245 size = PREF_SIZE(prefix);
246 switch(size){
247 case 1:
248 data = report[i];
249 break;
250 case 2:
251 data16 = le16_to_cpu(get_unaligned((__le16*)(&(report[i]))));
252 break;
253 case 3:
254 size = 4;
255 data32 = le32_to_cpu(get_unaligned((__le32*)(&(report[i]))));
256 }
257
258 /* Skip size of data */
259 i+=size;
260
261 /* What we do depends on the tag type */
262 tag = PREF_TAG(prefix);
263 type = PREF_TYPE(prefix);
264 switch(type){
265 case TYPE_MAIN:
266 strcpy(globtype,"");
267 switch(tag){
268
269 case TAG_MAIN_INPUT:
270 /*
271 * The INPUT MAIN tag signifies this is
272 * information from a report. We need to
273 * figure out what it is and store the
274 * min/max values
275 */
276
277 maintype='I';
278 if (data==2){
279 strcpy(globtype,"Variable");
280 }
281 if (data==3){
282 strcpy(globtype,"Var|Const");
283 }
284
285 dbg("::::: Saving Report: %d input #%d Max: 0x%X(%d) Min:0x%X(%d) of %d bits",
286 globalval[TAG_GLOB_REPORT_ID],inputnum,
287 globalval[TAG_GLOB_LOG_MAX],globalval[TAG_GLOB_LOG_MAX],
288 globalval[TAG_GLOB_LOG_MIN],globalval[TAG_GLOB_LOG_MIN],
289 (globalval[TAG_GLOB_REPORT_SZ] * globalval[TAG_GLOB_REPORT_CNT]));
290
291
292 /*
293 We can assume that the first two input items
294 are always the X and Y coordinates. After
295 that, we look for everything else by
296 local usage value
297 */
298 switch (inputnum){
299 case 0: /* X coord */
300 dbg("GER: X Usage: 0x%x",usage);
301 if (device->max_X == 0){
302 device->max_X = globalval[TAG_GLOB_LOG_MAX];
303 device->min_X = globalval[TAG_GLOB_LOG_MIN];
304 }
305
306 break;
307 case 1: /* Y coord */
308 dbg("GER: Y Usage: 0x%x",usage);
309 if (device->max_Y == 0){
310 device->max_Y = globalval[TAG_GLOB_LOG_MAX];
311 device->min_Y = globalval[TAG_GLOB_LOG_MIN];
312 }
313 break;
314 default:
315 /* Tilt X */
316 if (usage == DIGITIZER_USAGE_TILT_X){
317 if (device->maxtilt_X == 0){
318 device->maxtilt_X = globalval[TAG_GLOB_LOG_MAX];
319 device->mintilt_X = globalval[TAG_GLOB_LOG_MIN];
320 }
321 }
322
323 /* Tilt Y */
324 if (usage == DIGITIZER_USAGE_TILT_Y){
325 if (device->maxtilt_Y == 0){
326 device->maxtilt_Y = globalval[TAG_GLOB_LOG_MAX];
327 device->mintilt_Y = globalval[TAG_GLOB_LOG_MIN];
328 }
329 }
330
331
332 /* Pressure */
333 if (usage == DIGITIZER_USAGE_TIP_PRESSURE){
334 if (device->maxpressure == 0){
335 device->maxpressure = globalval[TAG_GLOB_LOG_MAX];
336 device->minpressure = globalval[TAG_GLOB_LOG_MIN];
337 }
338 }
339
340 break;
341 }
342
343 inputnum++;
344
345
346 break;
347 case TAG_MAIN_OUTPUT:
348 maintype='O';
349 break;
350 case TAG_MAIN_FEATURE:
351 maintype='F';
352 break;
353 case TAG_MAIN_COL_START:
354 maintype='S';
355
356 if (data==0){
357 dbg("======>>>>>> Physical");
358 strcpy(globtype,"Physical");
359 }else{
360 dbg("======>>>>>>");
361 }
362
363 /* Indent the debug output */
364 indent++;
365 for (x=0;x<indent;x++){
366 indentstr[x]='-';
367 }
368 indentstr[x]=0;
369
370 /* Save global tags */
371 for (x=0;x<TAG_GLOB_MAX;x++){
372 oldval[x] = globalval[x];
373 }
374
375 break;
376 case TAG_MAIN_COL_END:
377 dbg("<<<<<<======");
378 maintype='E';
379 indent--;
380 for (x=0;x<indent;x++){
381 indentstr[x]='-';
382 }
383 indentstr[x]=0;
384
385 /* Copy global tags back */
386 for (x=0;x<TAG_GLOB_MAX;x++){
387 globalval[x] = oldval[x];
388 }
389
390 break;
391 }
392
393 switch (size){
394 case 1:
395 dbg("%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x",
396 indentstr,tag,maintype,size,globtype,data);
397 break;
398 case 2:
399 dbg("%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x",
400 indentstr,tag,maintype,size,globtype, data16);
401 break;
402 case 4:
403 dbg("%sMAINTAG:(%d) %c SIZE: %d Data: %s 0x%x",
404 indentstr,tag,maintype,size,globtype,data32);
405 break;
406 }
407 break;
408 case TYPE_GLOBAL:
409 switch(tag){
410 case TAG_GLOB_USAGE:
411 /*
412 * First time we hit the global usage tag,
413 * it should tell us the type of device
414 */
415 if (device->usage == 0){
416 device->usage = data;
417 }
418 strcpy(globtype,"USAGE");
419 break;
420 case TAG_GLOB_LOG_MIN :
421 strcpy(globtype,"LOG_MIN");
422 break;
423 case TAG_GLOB_LOG_MAX :
424 strcpy(globtype,"LOG_MAX");
425 break;
426 case TAG_GLOB_PHYS_MIN :
427 strcpy(globtype,"PHYS_MIN");
428 break;
429 case TAG_GLOB_PHYS_MAX :
430 strcpy(globtype,"PHYS_MAX");
431 break;
432 case TAG_GLOB_UNIT_EXP :
433 strcpy(globtype,"EXP");
434 break;
435 case TAG_GLOB_UNIT :
436 strcpy(globtype,"UNIT");
437 break;
438 case TAG_GLOB_REPORT_SZ :
439 strcpy(globtype,"REPORT_SZ");
440 break;
441 case TAG_GLOB_REPORT_ID :
442 strcpy(globtype,"REPORT_ID");
443 /* New report, restart numbering */
444 inputnum=0;
445 break;
446 case TAG_GLOB_REPORT_CNT:
447 strcpy(globtype,"REPORT_CNT");
448 break;
449 case TAG_GLOB_PUSH :
450 strcpy(globtype,"PUSH");
451 break;
452 case TAG_GLOB_POP:
453 strcpy(globtype,"POP");
454 break;
455 }
456
457
458 /* Check to make sure we have a good tag number
459 so we don't overflow array */
460 if (tag < TAG_GLOB_MAX){
461 switch (size){
462 case 1:
463 dbg("%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x",indentstr,globtype,tag,size,data);
464 globalval[tag]=data;
465 break;
466 case 2:
467 dbg("%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x",indentstr,globtype,tag,size,data16);
468 globalval[tag]=data16;
469 break;
470 case 4:
471 dbg("%sGLOBALTAG:%s(%d) SIZE: %d Data: 0x%x",indentstr,globtype,tag,size,data32);
472 globalval[tag]=data32;
473 break;
474 }
475 }else{
476 dbg("%sGLOBALTAG: ILLEGAL TAG:%d SIZE: %d ",
477 indentstr,tag,size);
478 }
479
480
481 break;
482
483 case TYPE_LOCAL:
484 switch(tag){
485 case TAG_GLOB_USAGE:
486 strcpy(globtype,"USAGE");
487 /* Always 1 byte */
488 usage = data;
489 break;
490 case TAG_GLOB_LOG_MIN :
491 strcpy(globtype,"MIN");
492 break;
493 case TAG_GLOB_LOG_MAX :
494 strcpy(globtype,"MAX");
495 break;
496 default:
497 strcpy(globtype,"UNKNOWN");
498 }
499
500 switch (size){
501 case 1:
502 dbg("%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x",
503 indentstr,tag,globtype,size,data);
504 break;
505 case 2:
506 dbg("%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x",
507 indentstr,tag,globtype,size,data16);
508 break;
509 case 4:
510 dbg("%sLOCALTAG:(%d) %s SIZE: %d Data: 0x%x",
511 indentstr,tag,globtype,size,data32);
512 break;
513 }
514
515 break;
516 }
517
518 }
519
520}
521
522
523
524/* INPUT DRIVER Routines */
525
526
527/*
528 * Called when opening the input device. This will submit the URB to
529 * the usb system so we start getting reports
530 */
531static int gtco_input_open(struct input_dev *inputdev)
532{
533 struct gtco *device;
534 device = inputdev->private;
535
536 device->urbinfo->dev = device->usbdev;
537 if (usb_submit_urb(device->urbinfo, GFP_KERNEL)) {
538 return -EIO;
539 }
540 return 0;
541}
542
543/**
544 Called when closing the input device. This will unlink the URB
545*/
546static void gtco_input_close(struct input_dev *inputdev)
547{
548 struct gtco *device = inputdev->private;
549
550 usb_kill_urb(device->urbinfo);
551
552}
553
554
555/*
556 * Setup input device capabilities. Tell the input system what this
557 * device is capable of generating.
558 *
559 * This information is based on what is read from the HID report and
560 * placed in the struct gtco structure
561 *
562 */
563static void gtco_setup_caps(struct input_dev *inputdev)
564{
565 struct gtco *device = inputdev->private;
566
567
568 /* Which events */
569 inputdev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_MSC);
570
571
572 /* Misc event menu block */
573 inputdev->mscbit[0] = BIT(MSC_SCAN)|BIT(MSC_SERIAL)|BIT(MSC_RAW) ;
574
575
576 /* Absolute values based on HID report info */
577 input_set_abs_params(inputdev, ABS_X, device->min_X, device->max_X,
578 0, 0);
579 input_set_abs_params(inputdev, ABS_Y, device->min_Y, device->max_Y,
580 0, 0);
581
582 /* Proximity */
583 input_set_abs_params(inputdev, ABS_DISTANCE, 0, 1, 0, 0);
584
585 /* Tilt & pressure */
586 input_set_abs_params(inputdev, ABS_TILT_X, device->mintilt_X,
587 device->maxtilt_X, 0, 0);
588 input_set_abs_params(inputdev, ABS_TILT_Y, device->mintilt_Y,
589 device->maxtilt_Y, 0, 0);
590 input_set_abs_params(inputdev, ABS_PRESSURE, device->minpressure,
591 device->maxpressure, 0, 0);
592
593
594 /* Transducer */
595 input_set_abs_params(inputdev, ABS_MISC, 0,0xFF, 0, 0);
596
597}
598
599
600
601/* USB Routines */
602
603
604/*
605 * URB callback routine. Called when we get IRQ reports from the
606 * digitizer.
607 *
608 * This bridges the USB and input device worlds. It generates events
609 * on the input device based on the USB reports.
610 */
611static void gtco_urb_callback(struct urb *urbinfo)
612{
613
614
615 struct gtco *device = urbinfo->context;
616 struct input_dev *inputdev;
617 int rc;
618 u32 val = 0;
619 s8 valsigned = 0;
620 char le_buffer[2];
621
622 inputdev = device->inputdevice;
623
624
625 /* Was callback OK? */
626 if ((urbinfo->status == -ECONNRESET ) ||
627 (urbinfo->status == -ENOENT ) ||
628 (urbinfo->status == -ESHUTDOWN )){
629
630 /* Shutdown is occurring. Return and don't queue up any more */
631 return;
632 }
633
634 if (urbinfo->status != 0 ) {
635 /* Some unknown error. Hopefully temporary. Just go and */
636 /* requeue an URB */
637 goto resubmit;
638 }
639
640 /*
641 * Good URB, now process
642 */
643
644 /* PID dependent when we interpret the report */
645 if ((inputdev->id.product == PID_1000 )||
646 (inputdev->id.product == PID_1001 )||
647 (inputdev->id.product == PID_1002 ))
648 {
649
650 /*
651 * Switch on the report ID
652 * Conveniently, the reports have more information, the higher
653 * the report number. We can just fall through the case
654 * statements if we start with the highest number report
655 */
656 switch(device->buffer[0]){
657 case 5:
658 /* Pressure is 9 bits */
659 val = ((u16)(device->buffer[8]) << 1);
660 val |= (u16)(device->buffer[7] >> 7);
661 input_report_abs(inputdev, ABS_PRESSURE,
662 device->buffer[8]);
663
664 /* Mask out the Y tilt value used for pressure */
665 device->buffer[7] = (u8)((device->buffer[7]) & 0x7F);
666
667
668 /* Fall thru */
669 case 4:
670 /* Tilt */
671
672 /* Sign extend these 7 bit numbers. */
673 if (device->buffer[6] & 0x40)
674 device->buffer[6] |= 0x80;
675
676 if (device->buffer[7] & 0x40)
677 device->buffer[7] |= 0x80;
678
679
680 valsigned = (device->buffer[6]);
681 input_report_abs(inputdev, ABS_TILT_X, (s32)valsigned);
682
683 valsigned = (device->buffer[7]);
684 input_report_abs(inputdev, ABS_TILT_Y, (s32)valsigned);
685
686 /* Fall thru */
687
688 case 2:
689 case 3:
690 /* Convert buttons, only 5 bits possible */
691 val = (device->buffer[5])&MASK_BUTTON;
692
693 /* We don't apply any meaning to the bitmask,
694 just report */
695 input_event(inputdev, EV_MSC, MSC_SERIAL, val);
696
697 /* Fall thru */
698 case 1:
699
700 /* All reports have X and Y coords in the same place */
701 val = le16_to_cpu(get_unaligned((__le16 *) &(device->buffer[1])));
702 input_report_abs(inputdev, ABS_X, val);
703
704 val = le16_to_cpu(get_unaligned((__le16 *) &(device->buffer[3])));
705 input_report_abs(inputdev, ABS_Y, val);
706
707
708 /* Ditto for proximity bit */
709 if (device->buffer[5]& MASK_INRANGE){
710 val = 1;
711 }else{
712 val=0;
713 }
714 input_report_abs(inputdev, ABS_DISTANCE, val);
715
716
717 /* Report 1 is an exception to how we handle buttons */
718 /* Buttons are an index, not a bitmask */
719 if (device->buffer[0] == 1){
720
721 /* Convert buttons, 5 bit index */
722 /* Report value of index set as one,
723 the rest as 0 */
724 val = device->buffer[5]& MASK_BUTTON;
725 dbg("======>>>>>>REPORT 1: val 0x%X(%d)",
726 val,val);
727
728 /*
729 * We don't apply any meaning to the button
730 * index, just report it
731 */
732 input_event(inputdev, EV_MSC, MSC_SERIAL, val);
733
734
735 }
736
737 break;
738 case 7:
739 /* Menu blocks */
740 input_event(inputdev, EV_MSC, MSC_SCAN,
741 device->buffer[1]);
742
743
744 break;
745
746 }
747
748
749 }
750 /* Other pid class */
751 if ((inputdev->id.product == PID_400 )||
752 (inputdev->id.product == PID_401 ))
753 {
754
755 /* Report 2 */
756 if (device->buffer[0] == 2){
757 /* Menu blocks */
758 input_event(inputdev, EV_MSC, MSC_SCAN,
759 device->buffer[1]);
760 }
761
762 /* Report 1 */
763 if (device->buffer[0] == 1){
764 char buttonbyte;
765
766
767 /* IF X max > 64K, we still a bit from the y report */
768 if (device->max_X > 0x10000){
769
770 val = (u16)(((u16)(device->buffer[2]<<8))|((u8)(device->buffer[1])));
771 val |= (u32)(((u8)device->buffer[3]&0x1)<< 16);
772
773 input_report_abs(inputdev, ABS_X, val);
774
775 le_buffer[0] = (u8)((u8)(device->buffer[3])>>1);
776 le_buffer[0] |= (u8)((device->buffer[3]&0x1)<<7);
777
778 le_buffer[1] = (u8)(device->buffer[4]>>1);
779 le_buffer[1] |= (u8)((device->buffer[5]&0x1)<<7);
780
781 val = le16_to_cpu(get_unaligned((__le16 *)(le_buffer)));
782
783 input_report_abs(inputdev, ABS_Y, val);
784
785
786 /*
787 * Shift the button byte right by one to
788 * make it look like the standard report
789 */
790 buttonbyte = (device->buffer[5])>>1;
791 }else{
792
793 val = le16_to_cpu(get_unaligned((__le16 *) (&(device->buffer[1]))));
794 input_report_abs(inputdev, ABS_X, val);
795
796 val = le16_to_cpu(get_unaligned((__le16 *) (&(device->buffer[3]))));
797 input_report_abs(inputdev, ABS_Y, val);
798
799 buttonbyte = device->buffer[5];
800
801 }
802
803
804 /* BUTTONS and PROXIMITY */
805 if (buttonbyte& MASK_INRANGE){
806 val = 1;
807 }else{
808 val=0;
809 }
810 input_report_abs(inputdev, ABS_DISTANCE, val);
811
812 /* Convert buttons, only 4 bits possible */
813 val = buttonbyte&0x0F;
814#ifdef USE_BUTTONS
815 for ( i=0;i<5;i++){
816 input_report_key(inputdev, BTN_DIGI+i,val&(1<<i));
817 }
818#else
819 /* We don't apply any meaning to the bitmask, just report */
820 input_event(inputdev, EV_MSC, MSC_SERIAL, val);
821#endif
822 /* TRANSDUCER */
823 input_report_abs(inputdev, ABS_MISC, device->buffer[6]);
824
825 }
826 }
827
828 /* Everybody gets report ID's */
829 input_event(inputdev, EV_MSC, MSC_RAW, device->buffer[0]);
830
831 /* Sync it up */
832 input_sync(inputdev);
833
834 resubmit:
835 rc = usb_submit_urb(urbinfo, GFP_ATOMIC);
836 if (rc != 0) {
837 err("usb_submit_urb failed rc=0x%x",rc);
838 }
839
840}
841
842/*
843 * The probe routine. This is called when the kernel find the matching USB
844 * vendor/product. We do the following:
845 *
846 * - Allocate mem for a local structure to manage the device
847 * - Request a HID Report Descriptor from the device and parse it to
848 * find out the device parameters
849 * - Create an input device and assign it attributes
850 * - Allocate an URB so the device can talk to us when the input
851 * queue is open
852 */
853static int gtco_probe(struct usb_interface *usbinterface,
854 const struct usb_device_id *id)
855{
856
857 struct gtco *device = NULL;
858 char path[PATHLENGTH];
859 struct input_dev *inputdev;
860 struct hid_descriptor *hid_desc;
861 char *report;
862 int result=0, retry;
863 struct usb_endpoint_descriptor *endpoint;
864
865 /* Allocate memory for device structure */
866 device = kzalloc(sizeof(struct gtco), GFP_KERNEL);
867 if (device == NULL) {
868 err("No more memory");
869 return -ENOMEM;
870 }
871
872
873 device->inputdevice = input_allocate_device();
874 if (!device->inputdevice){
875 kfree(device);
876 err("No more memory");
877 return -ENOMEM;
878 }
879
880 /* Get pointer to the input device */
881 inputdev = device->inputdevice;
882
883 /* Save interface information */
884 device->usbdev = usb_get_dev(interface_to_usbdev(usbinterface));
885
886
887 /* Allocate some data for incoming reports */
888 device->buffer = usb_buffer_alloc(device->usbdev, REPORT_MAX_SIZE,
889 GFP_KERNEL, &(device->buf_dma));
890 if (!device->buffer){
891 input_free_device(device->inputdevice);
892 kfree(device);
893 err("No more memory");
894 return -ENOMEM;
895 }
896
897 /* Allocate URB for reports */
898 device->urbinfo = usb_alloc_urb(0, GFP_KERNEL);
899 if (!device->urbinfo) {
900 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
901 device->buffer, device->buf_dma);
902 input_free_device(device->inputdevice);
903 kfree(device);
904 err("No more memory");
905 return -ENOMEM;
906 }
907
908
909 /*
910 * The endpoint is always altsetting 0, we know this since we know
911 * this device only has one interrupt endpoint
912 */
913 endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
914
915 /* Some debug */
916 dbg("gtco # interfaces: %d",usbinterface->num_altsetting);
917 dbg("num endpoints: %d",usbinterface->cur_altsetting->desc.bNumEndpoints);
918 dbg("interface class: %d",usbinterface->cur_altsetting->desc.bInterfaceClass);
919 dbg("endpoint: attribute:0x%x type:0x%x",endpoint->bmAttributes,endpoint->bDescriptorType);
920 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
921 dbg("endpoint: we have interrupt endpoint\n");
922
923 dbg("endpoint extra len:%d ",usbinterface->altsetting[0].extralen);
924
925
926
927 /*
928 * Find the HID descriptor so we can find out the size of the
929 * HID report descriptor
930 */
931 if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
932 HID_DEVICE_TYPE,&hid_desc) != 0){
933 err("Can't retrieve exta USB descriptor to get hid report descriptor length");
934 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
935 device->buffer, device->buf_dma);
936 input_free_device(device->inputdevice);
937 kfree(device);
938 return -EIO;
939 }
940
941 dbg("Extra descriptor success: type:%d len:%d",
942 hid_desc->bDescriptorType, hid_desc->wDescriptorLength);
943
944 if (!(report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL))) {
945 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
946 device->buffer, device->buf_dma);
947
948 input_free_device(device->inputdevice);
949 kfree(device);
950 err("No more memory");
951 return -ENOMEM;
952 }
953
954 /* Couple of tries to get reply */
955 for (retry=0;retry<3;retry++) {
956 result = usb_control_msg(device->usbdev,
957 usb_rcvctrlpipe(device->usbdev, 0),
958 USB_REQ_GET_DESCRIPTOR,
959 USB_RECIP_INTERFACE | USB_DIR_IN,
960 (REPORT_DEVICE_TYPE << 8),
961 0, /* interface */
962 report,
963 hid_desc->wDescriptorLength,
964 5000); /* 5 secs */
965
966 if (result == hid_desc->wDescriptorLength)
967 break;
968 }
969
970 /* If we didn't get the report, fail */
971 dbg("usb_control_msg result: :%d", result);
972 if (result != hid_desc->wDescriptorLength){
973 kfree(report);
974 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
975 device->buffer, device->buf_dma);
976 input_free_device(device->inputdevice);
977 kfree(device);
978 err("Failed to get HID Report Descriptor of size: %d",
979 hid_desc->wDescriptorLength);
980 return -EIO;
981 }
982
983
984 /* Now we parse the report */
985 parse_hid_report_descriptor(device,report,result);
986
987 /* Now we delete it */
988 kfree(report);
989
990 /* Create a device file node */
991 usb_make_path(device->usbdev, path, PATHLENGTH);
992 sprintf(device->usbpath, "%s/input0", path);
993
994
995 /* Set Input device functions */
996 inputdev->open = gtco_input_open;
997 inputdev->close = gtco_input_close;
998
999 /* Set input device information */
1000 inputdev->name = "GTCO_CalComp";
1001 inputdev->phys = device->usbpath;
1002 inputdev->private = device;
1003
1004
1005 /* Now set up all the input device capabilities */
1006 gtco_setup_caps(inputdev);
1007
1008 /* Set input device required ID information */
1009 usb_to_input_id(device->usbdev, &device->inputdevice->id);
1010 inputdev->cdev.dev = &usbinterface->dev;
1011
1012 /* Setup the URB, it will be posted later on open of input device */
1013 endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
1014
1015 usb_fill_int_urb(device->urbinfo,
1016 device->usbdev,
1017 usb_rcvintpipe(device->usbdev,
1018 endpoint->bEndpointAddress),
1019 device->buffer,
1020 REPORT_MAX_SIZE,
1021 gtco_urb_callback,
1022 device,
1023 endpoint->bInterval);
1024
1025 device->urbinfo->transfer_dma = device->buf_dma;
1026 device->urbinfo->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1027
1028
1029 /* Save device pointer in USB interface device */
1030 usb_set_intfdata(usbinterface, device);
1031
1032 /* All done, now register the input device */
1033 input_register_device(inputdev);
1034
1035 info( "gtco driver created usb: %s\n", path);
1036 return 0;
1037
1038}
1039
1040/*
1041 * This function is a standard USB function called when the USB device
1042 * is disconnected. We will get rid of the URV, de-register the input
1043 * device, and free up allocated memory
1044 */
1045static void gtco_disconnect(struct usb_interface *interface)
1046{
1047
1048 /* Grab private device ptr */
1049 struct gtco *device = usb_get_intfdata (interface);
1050 struct input_dev *inputdev;
1051
1052 inputdev = device->inputdevice;
1053
1054 /* Now reverse all the registration stuff */
1055 if (device) {
1056 input_unregister_device(inputdev);
1057 usb_kill_urb(device->urbinfo);
1058 usb_free_urb(device->urbinfo);
1059 usb_buffer_free(device->usbdev, REPORT_MAX_SIZE,
1060 device->buffer, device->buf_dma);
1061 kfree(device);
1062 }
1063
1064 info("gtco driver disconnected");
1065}
1066
1067
1068/* STANDARD MODULE LOAD ROUTINES */
1069
1070static struct usb_driver gtco_driverinfo_table = {
1071#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
1072 .owner = THIS_MODULE,
1073#endif
1074 .name = "gtco",
1075 .id_table = gtco_usbid_table,
1076 .probe = gtco_probe,
1077 .disconnect = gtco_disconnect,
1078};
1079/*
1080 * Register this module with the USB subsystem
1081 */
1082static int __init gtco_init(void)
1083{
1084 int rc;
1085 rc = usb_register(&gtco_driverinfo_table);
1086 if (rc) {
1087 err("usb_register() failed rc=0x%x", rc);
1088 }
1089 printk("GTCO usb driver version: %s",GTCO_VERSION);
1090 return rc;
1091}
1092
1093/*
1094 * Deregister this module with the USB subsystem
1095 */
1096static void __exit gtco_exit(void)
1097{
1098 usb_deregister(&gtco_driverinfo_table);
1099}
1100
1101module_init (gtco_init);
1102module_exit (gtco_exit);
1103
1104MODULE_LICENSE("GPL");
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index c6c9e72e5fd9..84983d1b7164 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/hid.h> 36#include <linux/hid.h>
37#include <linux/hiddev.h> 37#include <linux/hiddev.h>
38#include <linux/hid-debug.h>
38#include "usbhid.h" 39#include "usbhid.h"
39 40
40/* 41/*
@@ -220,23 +221,6 @@ static void hid_irq_in(struct urb *urb)
220 } 221 }
221} 222}
222 223
223/*
224 * Find a report field with a specified HID usage.
225 */
226#if 0
227struct hid_field *hid_find_field_by_usage(struct hid_device *hid, __u32 wanted_usage, int type)
228{
229 struct hid_report *report;
230 int i;
231
232 list_for_each_entry(report, &hid->report_enum[type].report_list, list)
233 for (i = 0; i < report->maxfield; i++)
234 if (report->field[i]->logical == wanted_usage)
235 return report->field[i];
236 return NULL;
237}
238#endif /* 0 */
239
240static int hid_submit_out(struct hid_device *hid) 224static int hid_submit_out(struct hid_device *hid)
241{ 225{
242 struct hid_report *report; 226 struct hid_report *report;
@@ -501,7 +485,7 @@ static int hid_get_class_descriptor(struct usb_device *dev, int ifnum,
501{ 485{
502 int result, retries = 4; 486 int result, retries = 4;
503 487
504 memset(buf,0,size); // Make sure we parse really received data 488 memset(buf, 0, size);
505 489
506 do { 490 do {
507 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 491 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
@@ -528,18 +512,6 @@ void usbhid_close(struct hid_device *hid)
528 usb_kill_urb(usbhid->urbin); 512 usb_kill_urb(usbhid->urbin);
529} 513}
530 514
531static int hidinput_open(struct input_dev *dev)
532{
533 struct hid_device *hid = dev->private;
534 return usbhid_open(hid);
535}
536
537static void hidinput_close(struct input_dev *dev)
538{
539 struct hid_device *hid = dev->private;
540 usbhid_close(hid);
541}
542
543#define USB_VENDOR_ID_PANJIT 0x134c 515#define USB_VENDOR_ID_PANJIT 0x134c
544 516
545#define USB_VENDOR_ID_TURBOX 0x062a 517#define USB_VENDOR_ID_TURBOX 0x062a
@@ -770,6 +742,7 @@ void usbhid_init_reports(struct hid_device *hid)
770#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c 742#define USB_DEVICE_ID_APPLE_GEYSER4_JIS 0x021c
771#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 743#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
772#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 744#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
745#define USB_DEVICE_ID_APPLE_IR 0x8240
773 746
774#define USB_VENDOR_ID_CHERRY 0x046a 747#define USB_VENDOR_ID_CHERRY 0x046a
775#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023 748#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
@@ -792,6 +765,12 @@ void usbhid_init_reports(struct hid_device *hid)
792#define USB_VENDOR_ID_IMATION 0x0718 765#define USB_VENDOR_ID_IMATION 0x0718
793#define USB_DEVICE_ID_DISC_STAKKA 0xd000 766#define USB_DEVICE_ID_DISC_STAKKA 0xd000
794 767
768#define USB_VENDOR_ID_PANTHERLORD 0x0810
769#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001
770
771#define USB_VENDOR_ID_SONY 0x054c
772#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
773
795/* 774/*
796 * Alphabetically sorted blacklist by quirk type. 775 * Alphabetically sorted blacklist by quirk type.
797 */ 776 */
@@ -946,19 +925,21 @@ static const struct hid_blacklist {
946 925
947 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION }, 926 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
948 927
949 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 928 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
950 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN }, 929 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
951 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 930 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
952 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 931 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
953 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 932 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
954 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 933 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
955 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 934 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
956 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 935 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
957 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 936 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
958 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 937 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
959 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 938 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
960 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 939 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
961 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 940 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
941
942 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IR, HID_QUIRK_IGNORE },
962 943
963 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE }, 944 { USB_VENDOR_ID_PANJIT, 0x0001, HID_QUIRK_IGNORE },
964 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE }, 945 { USB_VENDOR_ID_PANJIT, 0x0002, HID_QUIRK_IGNORE },
@@ -969,6 +950,10 @@ static const struct hid_blacklist {
969 950
970 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS }, 951 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_USB_RECEIVER, HID_QUIRK_BAD_RELATIVE_KEYS },
971 952
953 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
954
955 { USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER, HID_QUIRK_SONY_PS3_CONTROLLER },
956
972 { 0, 0 } 957 { 0, 0 }
973}; 958};
974 959
@@ -1033,6 +1018,32 @@ static void hid_fixup_cymotion_descriptor(char *rdesc, int rsize)
1033 } 1018 }
1034} 1019}
1035 1020
1021/*
1022 * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
1023 * to "operational". Without this, the ps3 controller will not report any
1024 * events.
1025 */
1026static void hid_fixup_sony_ps3_controller(struct usb_device *dev, int ifnum)
1027{
1028 int result;
1029 char *buf = kmalloc(18, GFP_KERNEL);
1030
1031 if (!buf)
1032 return;
1033
1034 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
1035 HID_REQ_GET_REPORT,
1036 USB_DIR_IN | USB_TYPE_CLASS |
1037 USB_RECIP_INTERFACE,
1038 (3 << 8) | 0xf2, ifnum, buf, 17,
1039 USB_CTRL_GET_TIMEOUT);
1040
1041 if (result < 0)
1042 err("%s failed: %d\n", __func__, result);
1043
1044 kfree(buf);
1045}
1046
1036static struct hid_device *usb_hid_configure(struct usb_interface *intf) 1047static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1037{ 1048{
1038 struct usb_host_interface *interface = intf->cur_altsetting; 1049 struct usb_host_interface *interface = intf->cur_altsetting;
@@ -1064,6 +1075,11 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1064 if (quirks & HID_QUIRK_IGNORE) 1075 if (quirks & HID_QUIRK_IGNORE)
1065 return NULL; 1076 return NULL;
1066 1077
1078 if ((quirks & HID_QUIRK_IGNORE_MOUSE) &&
1079 (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE))
1080 return NULL;
1081
1082
1067 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && 1083 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
1068 (!interface->desc.bNumEndpoints || 1084 (!interface->desc.bNumEndpoints ||
1069 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { 1085 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
@@ -1235,8 +1251,8 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1235 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; 1251 usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma;
1236 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); 1252 usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP);
1237 hid->hidinput_input_event = usb_hidinput_input_event; 1253 hid->hidinput_input_event = usb_hidinput_input_event;
1238 hid->hidinput_open = hidinput_open; 1254 hid->hid_open = usbhid_open;
1239 hid->hidinput_close = hidinput_close; 1255 hid->hid_close = usbhid_close;
1240#ifdef CONFIG_USB_HIDDEV 1256#ifdef CONFIG_USB_HIDDEV
1241 hid->hiddev_hid_event = hiddev_hid_event; 1257 hid->hiddev_hid_event = hiddev_hid_event;
1242 hid->hiddev_report_event = hiddev_report_event; 1258 hid->hiddev_report_event = hiddev_report_event;
@@ -1315,13 +1331,13 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
1315 return -ENODEV; 1331 return -ENODEV;
1316 } 1332 }
1317 1333
1318 /* This only gets called when we are a single-input (most of the 1334 if ((hid->claimed & HID_CLAIMED_INPUT))
1319 * time). IOW, not a HID_QUIRK_MULTI_INPUT. The hid_ff_init() is
1320 * only useful in this case, and not for multi-input quirks. */
1321 if ((hid->claimed & HID_CLAIMED_INPUT) &&
1322 !(hid->quirks & HID_QUIRK_MULTI_INPUT))
1323 hid_ff_init(hid); 1335 hid_ff_init(hid);
1324 1336
1337 if (hid->quirks & HID_QUIRK_SONY_PS3_CONTROLLER)
1338 hid_fixup_sony_ps3_controller(interface_to_usbdev(intf),
1339 intf->cur_altsetting->desc.bInterfaceNumber);
1340
1325 printk(KERN_INFO); 1341 printk(KERN_INFO);
1326 1342
1327 if (hid->claimed & HID_CLAIMED_INPUT) 1343 if (hid->claimed & HID_CLAIMED_INPUT)
diff --git a/drivers/usb/input/hid-ff.c b/drivers/usb/input/hid-ff.c
index 59ed65e7a621..5d145058a5cb 100644
--- a/drivers/usb/input/hid-ff.c
+++ b/drivers/usb/input/hid-ff.c
@@ -58,6 +58,9 @@ static struct hid_ff_initializer inits[] = {
58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */ 58 { 0x46d, 0xc295, hid_lgff_init }, /* Logitech MOMO force wheel */
59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */ 59 { 0x46d, 0xc219, hid_lgff_init }, /* Logitech Cordless rumble pad 2 */
60#endif 60#endif
61#ifdef CONFIG_PANTHERLORD_FF
62 { 0x810, 0x0001, hid_plff_init },
63#endif
61#ifdef CONFIG_THRUSTMASTER_FF 64#ifdef CONFIG_THRUSTMASTER_FF
62 { 0x44f, 0xb304, hid_tmff_init }, 65 { 0x44f, 0xb304, hid_tmff_init },
63#endif 66#endif
diff --git a/drivers/usb/input/hid-lgff.c b/drivers/usb/input/hid-lgff.c
index e47466268565..4f4fc3be192e 100644
--- a/drivers/usb/input/hid-lgff.c
+++ b/drivers/usb/input/hid-lgff.c
@@ -32,7 +32,7 @@
32#include <linux/hid.h> 32#include <linux/hid.h>
33#include "usbhid.h" 33#include "usbhid.h"
34 34
35struct device_type { 35struct dev_type {
36 u16 idVendor; 36 u16 idVendor;
37 u16 idProduct; 37 u16 idProduct;
38 const signed short *ff; 38 const signed short *ff;
@@ -48,7 +48,7 @@ static const signed short ff_joystick[] = {
48 -1 48 -1
49}; 49};
50 50
51static const struct device_type devices[] = { 51static const struct dev_type devices[] = {
52 { 0x046d, 0xc211, ff_rumble }, 52 { 0x046d, 0xc211, ff_rumble },
53 { 0x046d, 0xc219, ff_rumble }, 53 { 0x046d, 0xc219, ff_rumble },
54 { 0x046d, 0xc283, ff_joystick }, 54 { 0x046d, 0xc283, ff_joystick },
diff --git a/drivers/usb/input/hid-plff.c b/drivers/usb/input/hid-plff.c
new file mode 100644
index 000000000000..76d2e6e14db4
--- /dev/null
+++ b/drivers/usb/input/hid-plff.c
@@ -0,0 +1,129 @@
1/*
2 * Force feedback support for PantherLord USB/PS2 2in1 Adapter devices
3 *
4 * Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23
24/* #define DEBUG */
25
26#define debug(format, arg...) pr_debug("hid-plff: " format "\n" , ## arg)
27
28#include <linux/input.h>
29#include <linux/usb.h>
30#include <linux/hid.h>
31#include "usbhid.h"
32
33struct plff_device {
34 struct hid_report *report;
35};
36
37static int hid_plff_play(struct input_dev *dev, void *data,
38 struct ff_effect *effect)
39{
40 struct hid_device *hid = dev->private;
41 struct plff_device *plff = data;
42 int left, right;
43
44 left = effect->u.rumble.strong_magnitude;
45 right = effect->u.rumble.weak_magnitude;
46 debug("called with 0x%04x 0x%04x", left, right);
47
48 left = left * 0x7f / 0xffff;
49 right = right * 0x7f / 0xffff;
50
51 plff->report->field[0]->value[2] = left;
52 plff->report->field[0]->value[3] = right;
53 debug("running with 0x%02x 0x%02x", left, right);
54 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
55
56 return 0;
57}
58
59int hid_plff_init(struct hid_device *hid)
60{
61 struct plff_device *plff;
62 struct hid_report *report;
63 struct hid_input *hidinput;
64 struct list_head *report_list =
65 &hid->report_enum[HID_OUTPUT_REPORT].report_list;
66 struct list_head *report_ptr = report_list;
67 struct input_dev *dev;
68 int error;
69
70 /* The device contains 2 output reports (one for each
71 HID_QUIRK_MULTI_INPUT device), both containing 1 field, which
72 contains 4 ff00.0002 usages and 4 16bit absolute values.
73
74 The 2 input reports also contain a field which contains
75 8 ff00.0001 usages and 8 boolean values. Their meaning is
76 currently unknown. */
77
78 if (list_empty(report_list)) {
79 printk(KERN_ERR "hid-plff: no output reports found\n");
80 return -ENODEV;
81 }
82
83 list_for_each_entry(hidinput, &hid->inputs, list) {
84
85 report_ptr = report_ptr->next;
86
87 if (report_ptr == report_list) {
88 printk(KERN_ERR "hid-plff: required output report is missing\n");
89 return -ENODEV;
90 }
91
92 report = list_entry(report_ptr, struct hid_report, list);
93 if (report->maxfield < 1) {
94 printk(KERN_ERR "hid-plff: no fields in the report\n");
95 return -ENODEV;
96 }
97
98 if (report->field[0]->report_count < 4) {
99 printk(KERN_ERR "hid-plff: not enough values in the field\n");
100 return -ENODEV;
101 }
102
103 plff = kzalloc(sizeof(struct plff_device), GFP_KERNEL);
104 if (!plff)
105 return -ENOMEM;
106
107 dev = hidinput->input;
108
109 set_bit(FF_RUMBLE, dev->ffbit);
110
111 error = input_ff_create_memless(dev, plff, hid_plff_play);
112 if (error) {
113 kfree(plff);
114 return error;
115 }
116
117 plff->report = report;
118 plff->report->field[0]->value[0] = 0x00;
119 plff->report->field[0]->value[1] = 0x00;
120 plff->report->field[0]->value[2] = 0x00;
121 plff->report->field[0]->value[3] = 0x00;
122 usbhid_submit_report(hid, plff->report, USB_DIR_OUT);
123 }
124
125 printk(KERN_INFO "hid-plff: Force feedback for PantherLord USB/PS2 "
126 "2in1 Adapters by Anssi Hannula <anssi.hannula@gmail.com>\n");
127
128 return 0;
129}
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index c9418535bef8..15c70bd048c4 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -269,7 +269,7 @@ static int idmouse_release(struct inode *inode, struct file *file)
269 /* prevent a race condition with open() */ 269 /* prevent a race condition with open() */
270 mutex_lock(&disconnect_mutex); 270 mutex_lock(&disconnect_mutex);
271 271
272 dev = (struct usb_idmouse *) file->private_data; 272 dev = file->private_data;
273 273
274 if (dev == NULL) { 274 if (dev == NULL) {
275 mutex_unlock(&disconnect_mutex); 275 mutex_unlock(&disconnect_mutex);
@@ -304,17 +304,15 @@ static int idmouse_release(struct inode *inode, struct file *file)
304static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count, 304static ssize_t idmouse_read(struct file *file, char __user *buffer, size_t count,
305 loff_t * ppos) 305 loff_t * ppos)
306{ 306{
307 struct usb_idmouse *dev; 307 struct usb_idmouse *dev = file->private_data;
308 int result; 308 int result;
309 309
310 dev = (struct usb_idmouse *) file->private_data;
311
312 /* lock this object */ 310 /* lock this object */
313 down (&dev->sem); 311 down(&dev->sem);
314 312
315 /* verify that the device wasn't unplugged */ 313 /* verify that the device wasn't unplugged */
316 if (!dev->present) { 314 if (!dev->present) {
317 up (&dev->sem); 315 up(&dev->sem);
318 return -ENODEV; 316 return -ENODEV;
319 } 317 }
320 318
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 384fa3769805..fdf68479a166 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -69,7 +69,7 @@ struct rio_usb_data {
69 char *obuf, *ibuf; /* transfer buffers */ 69 char *obuf, *ibuf; /* transfer buffers */
70 char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */ 70 char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
71 wait_queue_head_t wait_q; /* for timeouts */ 71 wait_queue_head_t wait_q; /* for timeouts */
72 struct semaphore lock; /* general race avoidance */ 72 struct mutex lock; /* general race avoidance */
73}; 73};
74 74
75static struct rio_usb_data rio_instance; 75static struct rio_usb_data rio_instance;
@@ -78,17 +78,17 @@ static int open_rio(struct inode *inode, struct file *file)
78{ 78{
79 struct rio_usb_data *rio = &rio_instance; 79 struct rio_usb_data *rio = &rio_instance;
80 80
81 down(&(rio->lock)); 81 mutex_lock(&(rio->lock));
82 82
83 if (rio->isopen || !rio->present) { 83 if (rio->isopen || !rio->present) {
84 up(&(rio->lock)); 84 mutex_unlock(&(rio->lock));
85 return -EBUSY; 85 return -EBUSY;
86 } 86 }
87 rio->isopen = 1; 87 rio->isopen = 1;
88 88
89 init_waitqueue_head(&rio->wait_q); 89 init_waitqueue_head(&rio->wait_q);
90 90
91 up(&(rio->lock)); 91 mutex_unlock(&(rio->lock));
92 92
93 info("Rio opened."); 93 info("Rio opened.");
94 94
@@ -117,7 +117,7 @@ ioctl_rio(struct inode *inode, struct file *file, unsigned int cmd,
117 int retries; 117 int retries;
118 int retval=0; 118 int retval=0;
119 119
120 down(&(rio->lock)); 120 mutex_lock(&(rio->lock));
121 /* Sanity check to make sure rio is connected, powered, etc */ 121 /* Sanity check to make sure rio is connected, powered, etc */
122 if ( rio == NULL || 122 if ( rio == NULL ||
123 rio->present == 0 || 123 rio->present == 0 ||
@@ -257,7 +257,7 @@ ioctl_rio(struct inode *inode, struct file *file, unsigned int cmd,
257 257
258 258
259err_out: 259err_out:
260 up(&(rio->lock)); 260 mutex_unlock(&(rio->lock));
261 return retval; 261 return retval;
262} 262}
263 263
@@ -275,14 +275,17 @@ write_rio(struct file *file, const char __user *buffer,
275 int result = 0; 275 int result = 0;
276 int maxretry; 276 int maxretry;
277 int errn = 0; 277 int errn = 0;
278 int intr;
278 279
279 down(&(rio->lock)); 280 intr = mutex_lock_interruptible(&(rio->lock));
281 if (intr)
282 return -EINTR;
280 /* Sanity check to make sure rio is connected, powered, etc */ 283 /* Sanity check to make sure rio is connected, powered, etc */
281 if ( rio == NULL || 284 if ( rio == NULL ||
282 rio->present == 0 || 285 rio->present == 0 ||
283 rio->rio_dev == NULL ) 286 rio->rio_dev == NULL )
284 { 287 {
285 up(&(rio->lock)); 288 mutex_unlock(&(rio->lock));
286 return -ENODEV; 289 return -ENODEV;
287 } 290 }
288 291
@@ -305,7 +308,7 @@ write_rio(struct file *file, const char __user *buffer,
305 goto error; 308 goto error;
306 } 309 }
307 if (signal_pending(current)) { 310 if (signal_pending(current)) {
308 up(&(rio->lock)); 311 mutex_unlock(&(rio->lock));
309 return bytes_written ? bytes_written : -EINTR; 312 return bytes_written ? bytes_written : -EINTR;
310 } 313 }
311 314
@@ -341,12 +344,12 @@ write_rio(struct file *file, const char __user *buffer,
341 buffer += copy_size; 344 buffer += copy_size;
342 } while (count > 0); 345 } while (count > 0);
343 346
344 up(&(rio->lock)); 347 mutex_unlock(&(rio->lock));
345 348
346 return bytes_written ? bytes_written : -EIO; 349 return bytes_written ? bytes_written : -EIO;
347 350
348error: 351error:
349 up(&(rio->lock)); 352 mutex_unlock(&(rio->lock));
350 return errn; 353 return errn;
351} 354}
352 355
@@ -361,14 +364,17 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
361 int result; 364 int result;
362 int maxretry = 10; 365 int maxretry = 10;
363 char *ibuf; 366 char *ibuf;
367 int intr;
364 368
365 down(&(rio->lock)); 369 intr = mutex_lock_interruptible(&(rio->lock));
370 if (intr)
371 return -EINTR;
366 /* Sanity check to make sure rio is connected, powered, etc */ 372 /* Sanity check to make sure rio is connected, powered, etc */
367 if ( rio == NULL || 373 if ( rio == NULL ||
368 rio->present == 0 || 374 rio->present == 0 ||
369 rio->rio_dev == NULL ) 375 rio->rio_dev == NULL )
370 { 376 {
371 up(&(rio->lock)); 377 mutex_unlock(&(rio->lock));
372 return -ENODEV; 378 return -ENODEV;
373 } 379 }
374 380
@@ -379,11 +385,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
379 385
380 while (count > 0) { 386 while (count > 0) {
381 if (signal_pending(current)) { 387 if (signal_pending(current)) {
382 up(&(rio->lock)); 388 mutex_unlock(&(rio->lock));
383 return read_count ? read_count : -EINTR; 389 return read_count ? read_count : -EINTR;
384 } 390 }
385 if (!rio->rio_dev) { 391 if (!rio->rio_dev) {
386 up(&(rio->lock)); 392 mutex_unlock(&(rio->lock));
387 return -ENODEV; 393 return -ENODEV;
388 } 394 }
389 this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count; 395 this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
@@ -400,7 +406,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
400 count = this_read = partial; 406 count = this_read = partial;
401 } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */ 407 } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
402 if (!maxretry--) { 408 if (!maxretry--) {
403 up(&(rio->lock)); 409 mutex_unlock(&(rio->lock));
404 err("read_rio: maxretry timeout"); 410 err("read_rio: maxretry timeout");
405 return -ETIME; 411 return -ETIME;
406 } 412 }
@@ -409,18 +415,18 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
409 finish_wait(&rio->wait_q, &wait); 415 finish_wait(&rio->wait_q, &wait);
410 continue; 416 continue;
411 } else if (result != -EREMOTEIO) { 417 } else if (result != -EREMOTEIO) {
412 up(&(rio->lock)); 418 mutex_unlock(&(rio->lock));
413 err("Read Whoops - result:%u partial:%u this_read:%u", 419 err("Read Whoops - result:%u partial:%u this_read:%u",
414 result, partial, this_read); 420 result, partial, this_read);
415 return -EIO; 421 return -EIO;
416 } else { 422 } else {
417 up(&(rio->lock)); 423 mutex_unlock(&(rio->lock));
418 return (0); 424 return (0);
419 } 425 }
420 426
421 if (this_read) { 427 if (this_read) {
422 if (copy_to_user(buffer, ibuf, this_read)) { 428 if (copy_to_user(buffer, ibuf, this_read)) {
423 up(&(rio->lock)); 429 mutex_unlock(&(rio->lock));
424 return -EFAULT; 430 return -EFAULT;
425 } 431 }
426 count -= this_read; 432 count -= this_read;
@@ -428,7 +434,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
428 buffer += this_read; 434 buffer += this_read;
429 } 435 }
430 } 436 }
431 up(&(rio->lock)); 437 mutex_unlock(&(rio->lock));
432 return read_count; 438 return read_count;
433} 439}
434 440
@@ -480,7 +486,7 @@ static int probe_rio(struct usb_interface *intf,
480 } 486 }
481 dbg("probe_rio: ibuf address:%p", rio->ibuf); 487 dbg("probe_rio: ibuf address:%p", rio->ibuf);
482 488
483 init_MUTEX(&(rio->lock)); 489 mutex_init(&(rio->lock));
484 490
485 usb_set_intfdata (intf, rio); 491 usb_set_intfdata (intf, rio);
486 rio->present = 1; 492 rio->present = 1;
@@ -496,12 +502,12 @@ static void disconnect_rio(struct usb_interface *intf)
496 if (rio) { 502 if (rio) {
497 usb_deregister_dev(intf, &usb_rio_class); 503 usb_deregister_dev(intf, &usb_rio_class);
498 504
499 down(&(rio->lock)); 505 mutex_lock(&(rio->lock));
500 if (rio->isopen) { 506 if (rio->isopen) {
501 rio->isopen = 0; 507 rio->isopen = 0;
502 /* better let it finish - the release will do whats needed */ 508 /* better let it finish - the release will do whats needed */
503 rio->rio_dev = NULL; 509 rio->rio_dev = NULL;
504 up(&(rio->lock)); 510 mutex_unlock(&(rio->lock));
505 return; 511 return;
506 } 512 }
507 kfree(rio->ibuf); 513 kfree(rio->ibuf);
@@ -510,7 +516,7 @@ static void disconnect_rio(struct usb_interface *intf)
510 info("USB Rio disconnected."); 516 info("USB Rio disconnected.");
511 517
512 rio->present = 0; 518 rio->present = 0;
513 up(&(rio->lock)); 519 mutex_unlock(&(rio->lock));
514 } 520 }
515} 521}
516 522
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 3cf3ea3a88ed..90c59535778d 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -2,7 +2,7 @@
2# Makefile for USB Core files and filesystem 2# Makefile for USB Core files and filesystem
3# 3#
4 4
5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_dma.o 5usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
6 6
7# This does not use CONFIG_USB_MON because we want this to use a tristate. 7# This does not use CONFIG_USB_MON because we want this to use a tristate.
8obj-$(CONFIG_USB) += usbmon.o 8obj-$(CONFIG_USB) += usbmon.o
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
new file mode 100644
index 000000000000..c01dfe603672
--- /dev/null
+++ b/drivers/usb/mon/mon_bin.c
@@ -0,0 +1,1172 @@
1/*
2 * The USB Monitor, inspired by Dave Harding's USBMon.
3 *
4 * This is a binary format reader.
5 *
6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
7 * Copyright (C) 2006 Pete Zaitcev (zaitcev@redhat.com)
8 */
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/cdev.h>
14#include <linux/usb.h>
15#include <linux/poll.h>
16#include <linux/compat.h>
17#include <linux/mm.h>
18
19#include <asm/uaccess.h>
20
21#include "usb_mon.h"
22
23/*
24 * Defined by USB 2.0 clause 9.3, table 9.2.
25 */
26#define SETUP_LEN 8
27
28/* ioctl macros */
29#define MON_IOC_MAGIC 0x92
30
31#define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
32/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
33#define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
34#define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
35#define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
36#define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
37#define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
38#define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
39#ifdef CONFIG_COMPAT
40#define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
41#define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
42#endif
43
44/*
45 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
46 * But it's all right. Just use a simple way to make sure the chunk is never
47 * smaller than a page.
48 *
49 * N.B. An application does not know our chunk size.
50 *
51 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
52 * page-sized chunks for the time being.
53 */
54#define CHUNK_SIZE PAGE_SIZE
55#define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
56
57/*
58 * The magic limit was calculated so that it allows the monitoring
59 * application to pick data once in two ticks. This way, another application,
60 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
61 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
62 * enormous overhead built into the bus protocol, so we need about 1000 KB.
63 *
64 * This is still too much for most cases, where we just snoop a few
65 * descriptor fetches for enumeration. So, the default is a "reasonable"
66 * amount for systems with HZ=250 and incomplete bus saturation.
67 *
68 * XXX What about multi-megabyte URBs which take minutes to transfer?
69 */
70#define BUFF_MAX CHUNK_ALIGN(1200*1024)
71#define BUFF_DFL CHUNK_ALIGN(300*1024)
72#define BUFF_MIN CHUNK_ALIGN(8*1024)
73
74/*
75 * The per-event API header (2 per URB).
76 *
77 * This structure is seen in userland as defined by the documentation.
78 */
79struct mon_bin_hdr {
80 u64 id; /* URB ID - from submission to callback */
81 unsigned char type; /* Same as in text API; extensible. */
82 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */
83 unsigned char epnum; /* Endpoint number and transfer direction */
84 unsigned char devnum; /* Device address */
85 unsigned short busnum; /* Bus number */
86 char flag_setup;
87 char flag_data;
88 s64 ts_sec; /* gettimeofday */
89 s32 ts_usec; /* gettimeofday */
90 int status;
91 unsigned int len_urb; /* Length of data (submitted or actual) */
92 unsigned int len_cap; /* Delivered length */
93 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */
94};
95
96/* per file statistic */
97struct mon_bin_stats {
98 u32 queued;
99 u32 dropped;
100};
101
102struct mon_bin_get {
103 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */
104 void __user *data;
105 size_t alloc; /* Length of data (can be zero) */
106};
107
108struct mon_bin_mfetch {
109 u32 __user *offvec; /* Vector of events fetched */
110 u32 nfetch; /* Number of events to fetch (out: fetched) */
111 u32 nflush; /* Number of events to flush */
112};
113
114#ifdef CONFIG_COMPAT
115struct mon_bin_get32 {
116 u32 hdr32;
117 u32 data32;
118 u32 alloc32;
119};
120
121struct mon_bin_mfetch32 {
122 u32 offvec32;
123 u32 nfetch32;
124 u32 nflush32;
125};
126#endif
127
128/* Having these two values same prevents wrapping of the mon_bin_hdr */
129#define PKT_ALIGN 64
130#define PKT_SIZE 64
131
132/* max number of USB bus supported */
133#define MON_BIN_MAX_MINOR 128
134
135/*
136 * The buffer: map of used pages.
137 */
138struct mon_pgmap {
139 struct page *pg;
140 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */
141};
142
143/*
144 * This gets associated with an open file struct.
145 */
146struct mon_reader_bin {
147 /* The buffer: one per open. */
148 spinlock_t b_lock; /* Protect b_cnt, b_in */
149 unsigned int b_size; /* Current size of the buffer - bytes */
150 unsigned int b_cnt; /* Bytes used */
151 unsigned int b_in, b_out; /* Offsets into buffer - bytes */
152 unsigned int b_read; /* Amount of read data in curr. pkt. */
153 struct mon_pgmap *b_vec; /* The map array */
154 wait_queue_head_t b_wait; /* Wait for data here */
155
156 struct mutex fetch_lock; /* Protect b_read, b_out */
157 int mmap_active;
158
159 /* A list of these is needed for "bus 0". Some time later. */
160 struct mon_reader r;
161
162 /* Stats */
163 unsigned int cnt_lost;
164};
165
166static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
167 unsigned int offset)
168{
169 return (struct mon_bin_hdr *)
170 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE);
171}
172
173#define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
174
175static dev_t mon_bin_dev0;
176static struct cdev mon_bin_cdev;
177
178static void mon_buff_area_fill(const struct mon_reader_bin *rp,
179 unsigned int offset, unsigned int size);
180static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
181static int mon_alloc_buff(struct mon_pgmap *map, int npages);
182static void mon_free_buff(struct mon_pgmap *map, int npages);
183
184/*
185 * This is a "chunked memcpy". It does not manipulate any counters.
186 * But it returns the new offset for repeated application.
187 */
188unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
189 unsigned int off, const unsigned char *from, unsigned int length)
190{
191 unsigned int step_len;
192 unsigned char *buf;
193 unsigned int in_page;
194
195 while (length) {
196 /*
197 * Determine step_len.
198 */
199 step_len = length;
200 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
201 if (in_page < step_len)
202 step_len = in_page;
203
204 /*
205 * Copy data and advance pointers.
206 */
207 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
208 memcpy(buf, from, step_len);
209 if ((off += step_len) >= this->b_size) off = 0;
210 from += step_len;
211 length -= step_len;
212 }
213 return off;
214}
215
216/*
217 * This is a little worse than the above because it's "chunked copy_to_user".
218 * The return value is an error code, not an offset.
219 */
220static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
221 char __user *to, int length)
222{
223 unsigned int step_len;
224 unsigned char *buf;
225 unsigned int in_page;
226
227 while (length) {
228 /*
229 * Determine step_len.
230 */
231 step_len = length;
232 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1));
233 if (in_page < step_len)
234 step_len = in_page;
235
236 /*
237 * Copy data and advance pointers.
238 */
239 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE;
240 if (copy_to_user(to, buf, step_len))
241 return -EINVAL;
242 if ((off += step_len) >= this->b_size) off = 0;
243 to += step_len;
244 length -= step_len;
245 }
246 return 0;
247}
248
249/*
250 * Allocate an (aligned) area in the buffer.
251 * This is called under b_lock.
252 * Returns ~0 on failure.
253 */
254static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
255 unsigned int size)
256{
257 unsigned int offset;
258
259 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
260 if (rp->b_cnt + size > rp->b_size)
261 return ~0;
262 offset = rp->b_in;
263 rp->b_cnt += size;
264 if ((rp->b_in += size) >= rp->b_size)
265 rp->b_in -= rp->b_size;
266 return offset;
267}
268
269/*
270 * This is the same thing as mon_buff_area_alloc, only it does not allow
271 * buffers to wrap. This is needed by applications which pass references
272 * into mmap-ed buffers up their stacks (libpcap can do that).
273 *
274 * Currently, we always have the header stuck with the data, although
275 * it is not strictly speaking necessary.
276 *
277 * When a buffer would wrap, we place a filler packet to mark the space.
278 */
279static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
280 unsigned int size)
281{
282 unsigned int offset;
283 unsigned int fill_size;
284
285 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
286 if (rp->b_cnt + size > rp->b_size)
287 return ~0;
288 if (rp->b_in + size > rp->b_size) {
289 /*
290 * This would wrap. Find if we still have space after
291 * skipping to the end of the buffer. If we do, place
292 * a filler packet and allocate a new packet.
293 */
294 fill_size = rp->b_size - rp->b_in;
295 if (rp->b_cnt + size + fill_size > rp->b_size)
296 return ~0;
297 mon_buff_area_fill(rp, rp->b_in, fill_size);
298
299 offset = 0;
300 rp->b_in = size;
301 rp->b_cnt += size + fill_size;
302 } else if (rp->b_in + size == rp->b_size) {
303 offset = rp->b_in;
304 rp->b_in = 0;
305 rp->b_cnt += size;
306 } else {
307 offset = rp->b_in;
308 rp->b_in += size;
309 rp->b_cnt += size;
310 }
311 return offset;
312}
313
314/*
315 * Return a few (kilo-)bytes to the head of the buffer.
316 * This is used if a DMA fetch fails.
317 */
318static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
319{
320
321 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
322 rp->b_cnt -= size;
323 if (rp->b_in < size)
324 rp->b_in += rp->b_size;
325 rp->b_in -= size;
326}
327
328/*
329 * This has to be called under both b_lock and fetch_lock, because
330 * it accesses both b_cnt and b_out.
331 */
332static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
333{
334
335 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
336 rp->b_cnt -= size;
337 if ((rp->b_out += size) >= rp->b_size)
338 rp->b_out -= rp->b_size;
339}
340
341static void mon_buff_area_fill(const struct mon_reader_bin *rp,
342 unsigned int offset, unsigned int size)
343{
344 struct mon_bin_hdr *ep;
345
346 ep = MON_OFF2HDR(rp, offset);
347 memset(ep, 0, PKT_SIZE);
348 ep->type = '@';
349 ep->len_cap = size - PKT_SIZE;
350}
351
352static inline char mon_bin_get_setup(unsigned char *setupb,
353 const struct urb *urb, char ev_type)
354{
355
356 if (!usb_pipecontrol(urb->pipe) || ev_type != 'S')
357 return '-';
358
359 if (urb->transfer_flags & URB_NO_SETUP_DMA_MAP)
360 return mon_dmapeek(setupb, urb->setup_dma, SETUP_LEN);
361 if (urb->setup_packet == NULL)
362 return 'Z';
363
364 memcpy(setupb, urb->setup_packet, SETUP_LEN);
365 return 0;
366}
367
368static char mon_bin_get_data(const struct mon_reader_bin *rp,
369 unsigned int offset, struct urb *urb, unsigned int length)
370{
371
372 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) {
373 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
374 return 0;
375 }
376
377 if (urb->transfer_buffer == NULL)
378 return 'Z';
379
380 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
381 return 0;
382}
383
384static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
385 char ev_type)
386{
387 unsigned long flags;
388 struct timeval ts;
389 unsigned int urb_length;
390 unsigned int offset;
391 unsigned int length;
392 struct mon_bin_hdr *ep;
393 char data_tag = 0;
394
395 do_gettimeofday(&ts);
396
397 spin_lock_irqsave(&rp->b_lock, flags);
398
399 /*
400 * Find the maximum allowable length, then allocate space.
401 */
402 urb_length = (ev_type == 'S') ?
403 urb->transfer_buffer_length : urb->actual_length;
404 length = urb_length;
405
406 if (length >= rp->b_size/5)
407 length = rp->b_size/5;
408
409 if (usb_pipein(urb->pipe)) {
410 if (ev_type == 'S') {
411 length = 0;
412 data_tag = '<';
413 }
414 } else {
415 if (ev_type == 'C') {
416 length = 0;
417 data_tag = '>';
418 }
419 }
420
421 if (rp->mmap_active)
422 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE);
423 else
424 offset = mon_buff_area_alloc(rp, length + PKT_SIZE);
425 if (offset == ~0) {
426 rp->cnt_lost++;
427 spin_unlock_irqrestore(&rp->b_lock, flags);
428 return;
429 }
430
431 ep = MON_OFF2HDR(rp, offset);
432 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0;
433
434 /*
435 * Fill the allocated area.
436 */
437 memset(ep, 0, PKT_SIZE);
438 ep->type = ev_type;
439 ep->xfer_type = usb_pipetype(urb->pipe);
440 /* We use the fact that usb_pipein() returns 0x80 */
441 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
442 ep->devnum = usb_pipedevice(urb->pipe);
443 ep->busnum = rp->r.m_bus->u_bus->busnum;
444 ep->id = (unsigned long) urb;
445 ep->ts_sec = ts.tv_sec;
446 ep->ts_usec = ts.tv_usec;
447 ep->status = urb->status;
448 ep->len_urb = urb_length;
449 ep->len_cap = length;
450
451 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type);
452 if (length != 0) {
453 ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
454 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
455 ep->len_cap = 0;
456 mon_buff_area_shrink(rp, length);
457 }
458 } else {
459 ep->flag_data = data_tag;
460 }
461
462 spin_unlock_irqrestore(&rp->b_lock, flags);
463
464 wake_up(&rp->b_wait);
465}
466
467static void mon_bin_submit(void *data, struct urb *urb)
468{
469 struct mon_reader_bin *rp = data;
470 mon_bin_event(rp, urb, 'S');
471}
472
473static void mon_bin_complete(void *data, struct urb *urb)
474{
475 struct mon_reader_bin *rp = data;
476 mon_bin_event(rp, urb, 'C');
477}
478
479static void mon_bin_error(void *data, struct urb *urb, int error)
480{
481 struct mon_reader_bin *rp = data;
482 unsigned long flags;
483 unsigned int offset;
484 struct mon_bin_hdr *ep;
485
486 spin_lock_irqsave(&rp->b_lock, flags);
487
488 offset = mon_buff_area_alloc(rp, PKT_SIZE);
489 if (offset == ~0) {
490 /* Not incrementing cnt_lost. Just because. */
491 spin_unlock_irqrestore(&rp->b_lock, flags);
492 return;
493 }
494
495 ep = MON_OFF2HDR(rp, offset);
496
497 memset(ep, 0, PKT_SIZE);
498 ep->type = 'E';
499 ep->xfer_type = usb_pipetype(urb->pipe);
500 /* We use the fact that usb_pipein() returns 0x80 */
501 ep->epnum = usb_pipeendpoint(urb->pipe) | usb_pipein(urb->pipe);
502 ep->devnum = usb_pipedevice(urb->pipe);
503 ep->busnum = rp->r.m_bus->u_bus->busnum;
504 ep->id = (unsigned long) urb;
505 ep->status = error;
506
507 ep->flag_setup = '-';
508 ep->flag_data = 'E';
509
510 spin_unlock_irqrestore(&rp->b_lock, flags);
511
512 wake_up(&rp->b_wait);
513}
514
515static int mon_bin_open(struct inode *inode, struct file *file)
516{
517 struct mon_bus *mbus;
518 struct usb_bus *ubus;
519 struct mon_reader_bin *rp;
520 size_t size;
521 int rc;
522
523 mutex_lock(&mon_lock);
524 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) {
525 mutex_unlock(&mon_lock);
526 return -ENODEV;
527 }
528 if ((ubus = mbus->u_bus) == NULL) {
529 printk(KERN_ERR TAG ": consistency error on open\n");
530 mutex_unlock(&mon_lock);
531 return -ENODEV;
532 }
533
534 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL);
535 if (rp == NULL) {
536 rc = -ENOMEM;
537 goto err_alloc;
538 }
539 spin_lock_init(&rp->b_lock);
540 init_waitqueue_head(&rp->b_wait);
541 mutex_init(&rp->fetch_lock);
542
543 rp->b_size = BUFF_DFL;
544
545 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);
546 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) {
547 rc = -ENOMEM;
548 goto err_allocvec;
549 }
550
551 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0)
552 goto err_allocbuff;
553
554 rp->r.m_bus = mbus;
555 rp->r.r_data = rp;
556 rp->r.rnf_submit = mon_bin_submit;
557 rp->r.rnf_error = mon_bin_error;
558 rp->r.rnf_complete = mon_bin_complete;
559
560 mon_reader_add(mbus, &rp->r);
561
562 file->private_data = rp;
563 mutex_unlock(&mon_lock);
564 return 0;
565
566err_allocbuff:
567 kfree(rp->b_vec);
568err_allocvec:
569 kfree(rp);
570err_alloc:
571 mutex_unlock(&mon_lock);
572 return rc;
573}
574
575/*
576 * Extract an event from buffer and copy it to user space.
577 * Wait if there is no event ready.
578 * Returns zero or error.
579 */
580static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
581 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes)
582{
583 unsigned long flags;
584 struct mon_bin_hdr *ep;
585 size_t step_len;
586 unsigned int offset;
587 int rc;
588
589 mutex_lock(&rp->fetch_lock);
590
591 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
592 mutex_unlock(&rp->fetch_lock);
593 return rc;
594 }
595
596 ep = MON_OFF2HDR(rp, rp->b_out);
597
598 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) {
599 mutex_unlock(&rp->fetch_lock);
600 return -EFAULT;
601 }
602
603 step_len = min(ep->len_cap, nbytes);
604 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0;
605
606 if (copy_from_buf(rp, offset, data, step_len)) {
607 mutex_unlock(&rp->fetch_lock);
608 return -EFAULT;
609 }
610
611 spin_lock_irqsave(&rp->b_lock, flags);
612 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
613 spin_unlock_irqrestore(&rp->b_lock, flags);
614 rp->b_read = 0;
615
616 mutex_unlock(&rp->fetch_lock);
617 return 0;
618}
619
620static int mon_bin_release(struct inode *inode, struct file *file)
621{
622 struct mon_reader_bin *rp = file->private_data;
623 struct mon_bus* mbus = rp->r.m_bus;
624
625 mutex_lock(&mon_lock);
626
627 if (mbus->nreaders <= 0) {
628 printk(KERN_ERR TAG ": consistency error on close\n");
629 mutex_unlock(&mon_lock);
630 return 0;
631 }
632 mon_reader_del(mbus, &rp->r);
633
634 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE);
635 kfree(rp->b_vec);
636 kfree(rp);
637
638 mutex_unlock(&mon_lock);
639 return 0;
640}
641
642static ssize_t mon_bin_read(struct file *file, char __user *buf,
643 size_t nbytes, loff_t *ppos)
644{
645 struct mon_reader_bin *rp = file->private_data;
646 unsigned long flags;
647 struct mon_bin_hdr *ep;
648 unsigned int offset;
649 size_t step_len;
650 char *ptr;
651 ssize_t done = 0;
652 int rc;
653
654 mutex_lock(&rp->fetch_lock);
655
656 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
657 mutex_unlock(&rp->fetch_lock);
658 return rc;
659 }
660
661 ep = MON_OFF2HDR(rp, rp->b_out);
662
663 if (rp->b_read < sizeof(struct mon_bin_hdr)) {
664 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read);
665 ptr = ((char *)ep) + rp->b_read;
666 if (step_len && copy_to_user(buf, ptr, step_len)) {
667 mutex_unlock(&rp->fetch_lock);
668 return -EFAULT;
669 }
670 nbytes -= step_len;
671 buf += step_len;
672 rp->b_read += step_len;
673 done += step_len;
674 }
675
676 if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
677 step_len = min(nbytes, (size_t)ep->len_cap);
678 offset = rp->b_out + PKT_SIZE;
679 offset += rp->b_read - sizeof(struct mon_bin_hdr);
680 if (offset >= rp->b_size)
681 offset -= rp->b_size;
682 if (copy_from_buf(rp, offset, buf, step_len)) {
683 mutex_unlock(&rp->fetch_lock);
684 return -EFAULT;
685 }
686 nbytes -= step_len;
687 buf += step_len;
688 rp->b_read += step_len;
689 done += step_len;
690 }
691
692 /*
693 * Check if whole packet was read, and if so, jump to the next one.
694 */
695 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) {
696 spin_lock_irqsave(&rp->b_lock, flags);
697 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
698 spin_unlock_irqrestore(&rp->b_lock, flags);
699 rp->b_read = 0;
700 }
701
702 mutex_unlock(&rp->fetch_lock);
703 return done;
704}
705
706/*
707 * Remove at most nevents from chunked buffer.
708 * Returns the number of removed events.
709 */
710static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
711{
712 unsigned long flags;
713 struct mon_bin_hdr *ep;
714 int i;
715
716 mutex_lock(&rp->fetch_lock);
717 spin_lock_irqsave(&rp->b_lock, flags);
718 for (i = 0; i < nevents; ++i) {
719 if (MON_RING_EMPTY(rp))
720 break;
721
722 ep = MON_OFF2HDR(rp, rp->b_out);
723 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap);
724 }
725 spin_unlock_irqrestore(&rp->b_lock, flags);
726 rp->b_read = 0;
727 mutex_unlock(&rp->fetch_lock);
728 return i;
729}
730
731/*
732 * Fetch at most max event offsets into the buffer and put them into vec.
733 * The events are usually freed later with mon_bin_flush.
734 * Return the effective number of events fetched.
735 */
736static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
737 u32 __user *vec, unsigned int max)
738{
739 unsigned int cur_out;
740 unsigned int bytes, avail;
741 unsigned int size;
742 unsigned int nevents;
743 struct mon_bin_hdr *ep;
744 unsigned long flags;
745 int rc;
746
747 mutex_lock(&rp->fetch_lock);
748
749 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
750 mutex_unlock(&rp->fetch_lock);
751 return rc;
752 }
753
754 spin_lock_irqsave(&rp->b_lock, flags);
755 avail = rp->b_cnt;
756 spin_unlock_irqrestore(&rp->b_lock, flags);
757
758 cur_out = rp->b_out;
759 nevents = 0;
760 bytes = 0;
761 while (bytes < avail) {
762 if (nevents >= max)
763 break;
764
765 ep = MON_OFF2HDR(rp, cur_out);
766 if (put_user(cur_out, &vec[nevents])) {
767 mutex_unlock(&rp->fetch_lock);
768 return -EFAULT;
769 }
770
771 nevents++;
772 size = ep->len_cap + PKT_SIZE;
773 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
774 if ((cur_out += size) >= rp->b_size)
775 cur_out -= rp->b_size;
776 bytes += size;
777 }
778
779 mutex_unlock(&rp->fetch_lock);
780 return nevents;
781}
782
783/*
784 * Count events. This is almost the same as the above mon_bin_fetch,
785 * only we do not store offsets into user vector, and we have no limit.
786 */
787static int mon_bin_queued(struct mon_reader_bin *rp)
788{
789 unsigned int cur_out;
790 unsigned int bytes, avail;
791 unsigned int size;
792 unsigned int nevents;
793 struct mon_bin_hdr *ep;
794 unsigned long flags;
795
796 mutex_lock(&rp->fetch_lock);
797
798 spin_lock_irqsave(&rp->b_lock, flags);
799 avail = rp->b_cnt;
800 spin_unlock_irqrestore(&rp->b_lock, flags);
801
802 cur_out = rp->b_out;
803 nevents = 0;
804 bytes = 0;
805 while (bytes < avail) {
806 ep = MON_OFF2HDR(rp, cur_out);
807
808 nevents++;
809 size = ep->len_cap + PKT_SIZE;
810 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
811 if ((cur_out += size) >= rp->b_size)
812 cur_out -= rp->b_size;
813 bytes += size;
814 }
815
816 mutex_unlock(&rp->fetch_lock);
817 return nevents;
818}
819
820/*
821 */
822static int mon_bin_ioctl(struct inode *inode, struct file *file,
823 unsigned int cmd, unsigned long arg)
824{
825 struct mon_reader_bin *rp = file->private_data;
826 // struct mon_bus* mbus = rp->r.m_bus;
827 int ret = 0;
828 struct mon_bin_hdr *ep;
829 unsigned long flags;
830
831 switch (cmd) {
832
833 case MON_IOCQ_URB_LEN:
834 /*
835 * N.B. This only returns the size of data, without the header.
836 */
837 spin_lock_irqsave(&rp->b_lock, flags);
838 if (!MON_RING_EMPTY(rp)) {
839 ep = MON_OFF2HDR(rp, rp->b_out);
840 ret = ep->len_cap;
841 }
842 spin_unlock_irqrestore(&rp->b_lock, flags);
843 break;
844
845 case MON_IOCQ_RING_SIZE:
846 ret = rp->b_size;
847 break;
848
849 case MON_IOCT_RING_SIZE:
850 /*
851 * Changing the buffer size will flush it's contents; the new
852 * buffer is allocated before releasing the old one to be sure
853 * the device will stay functional also in case of memory
854 * pressure.
855 */
856 {
857 int size;
858 struct mon_pgmap *vec;
859
860 if (arg < BUFF_MIN || arg > BUFF_MAX)
861 return -EINVAL;
862
863 size = CHUNK_ALIGN(arg);
864 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE),
865 GFP_KERNEL)) == NULL) {
866 ret = -ENOMEM;
867 break;
868 }
869
870 ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
871 if (ret < 0) {
872 kfree(vec);
873 break;
874 }
875
876 mutex_lock(&rp->fetch_lock);
877 spin_lock_irqsave(&rp->b_lock, flags);
878 mon_free_buff(rp->b_vec, size/CHUNK_SIZE);
879 kfree(rp->b_vec);
880 rp->b_vec = vec;
881 rp->b_size = size;
882 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0;
883 rp->cnt_lost = 0;
884 spin_unlock_irqrestore(&rp->b_lock, flags);
885 mutex_unlock(&rp->fetch_lock);
886 }
887 break;
888
889 case MON_IOCH_MFLUSH:
890 ret = mon_bin_flush(rp, arg);
891 break;
892
893 case MON_IOCX_GET:
894 {
895 struct mon_bin_get getb;
896
897 if (copy_from_user(&getb, (void __user *)arg,
898 sizeof(struct mon_bin_get)))
899 return -EFAULT;
900
901 if (getb.alloc > 0x10000000) /* Want to cast to u32 */
902 return -EINVAL;
903 ret = mon_bin_get_event(file, rp,
904 getb.hdr, getb.data, (unsigned int)getb.alloc);
905 }
906 break;
907
908#ifdef CONFIG_COMPAT
909 case MON_IOCX_GET32: {
910 struct mon_bin_get32 getb;
911
912 if (copy_from_user(&getb, (void __user *)arg,
913 sizeof(struct mon_bin_get32)))
914 return -EFAULT;
915
916 ret = mon_bin_get_event(file, rp,
917 compat_ptr(getb.hdr32), compat_ptr(getb.data32),
918 getb.alloc32);
919 }
920 break;
921#endif
922
923 case MON_IOCX_MFETCH:
924 {
925 struct mon_bin_mfetch mfetch;
926 struct mon_bin_mfetch __user *uptr;
927
928 uptr = (struct mon_bin_mfetch __user *)arg;
929
930 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
931 return -EFAULT;
932
933 if (mfetch.nflush) {
934 ret = mon_bin_flush(rp, mfetch.nflush);
935 if (ret < 0)
936 return ret;
937 if (put_user(ret, &uptr->nflush))
938 return -EFAULT;
939 }
940 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
941 if (ret < 0)
942 return ret;
943 if (put_user(ret, &uptr->nfetch))
944 return -EFAULT;
945 ret = 0;
946 }
947 break;
948
949#ifdef CONFIG_COMPAT
950 case MON_IOCX_MFETCH32:
951 {
952 struct mon_bin_mfetch32 mfetch;
953 struct mon_bin_mfetch32 __user *uptr;
954
955 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg);
956
957 if (copy_from_user(&mfetch, uptr, sizeof(mfetch)))
958 return -EFAULT;
959
960 if (mfetch.nflush32) {
961 ret = mon_bin_flush(rp, mfetch.nflush32);
962 if (ret < 0)
963 return ret;
964 if (put_user(ret, &uptr->nflush32))
965 return -EFAULT;
966 }
967 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
968 mfetch.nfetch32);
969 if (ret < 0)
970 return ret;
971 if (put_user(ret, &uptr->nfetch32))
972 return -EFAULT;
973 ret = 0;
974 }
975 break;
976#endif
977
978 case MON_IOCG_STATS: {
979 struct mon_bin_stats __user *sp;
980 unsigned int nevents;
981 unsigned int ndropped;
982
983 spin_lock_irqsave(&rp->b_lock, flags);
984 ndropped = rp->cnt_lost;
985 rp->cnt_lost = 0;
986 spin_unlock_irqrestore(&rp->b_lock, flags);
987 nevents = mon_bin_queued(rp);
988
989 sp = (struct mon_bin_stats __user *)arg;
990 if (put_user(rp->cnt_lost, &sp->dropped))
991 return -EFAULT;
992 if (put_user(nevents, &sp->queued))
993 return -EFAULT;
994
995 }
996 break;
997
998 default:
999 return -ENOTTY;
1000 }
1001
1002 return ret;
1003}
1004
1005static unsigned int
1006mon_bin_poll(struct file *file, struct poll_table_struct *wait)
1007{
1008 struct mon_reader_bin *rp = file->private_data;
1009 unsigned int mask = 0;
1010 unsigned long flags;
1011
1012 if (file->f_mode & FMODE_READ)
1013 poll_wait(file, &rp->b_wait, wait);
1014
1015 spin_lock_irqsave(&rp->b_lock, flags);
1016 if (!MON_RING_EMPTY(rp))
1017 mask |= POLLIN | POLLRDNORM; /* readable */
1018 spin_unlock_irqrestore(&rp->b_lock, flags);
1019 return mask;
1020}
1021
1022/*
1023 * open and close: just keep track of how many times the device is
1024 * mapped, to use the proper memory allocation function.
1025 */
1026static void mon_bin_vma_open(struct vm_area_struct *vma)
1027{
1028 struct mon_reader_bin *rp = vma->vm_private_data;
1029 rp->mmap_active++;
1030}
1031
1032static void mon_bin_vma_close(struct vm_area_struct *vma)
1033{
1034 struct mon_reader_bin *rp = vma->vm_private_data;
1035 rp->mmap_active--;
1036}
1037
1038/*
1039 * Map ring pages to user space.
1040 */
1041struct page *mon_bin_vma_nopage(struct vm_area_struct *vma,
1042 unsigned long address, int *type)
1043{
1044 struct mon_reader_bin *rp = vma->vm_private_data;
1045 unsigned long offset, chunk_idx;
1046 struct page *pageptr;
1047
1048 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
1049 if (offset >= rp->b_size)
1050 return NOPAGE_SIGBUS;
1051 chunk_idx = offset / CHUNK_SIZE;
1052 pageptr = rp->b_vec[chunk_idx].pg;
1053 get_page(pageptr);
1054 if (type)
1055 *type = VM_FAULT_MINOR;
1056 return pageptr;
1057}
1058
1059struct vm_operations_struct mon_bin_vm_ops = {
1060 .open = mon_bin_vma_open,
1061 .close = mon_bin_vma_close,
1062 .nopage = mon_bin_vma_nopage,
1063};
1064
1065int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
1066{
1067 /* don't do anything here: "nopage" will set up page table entries */
1068 vma->vm_ops = &mon_bin_vm_ops;
1069 vma->vm_flags |= VM_RESERVED;
1070 vma->vm_private_data = filp->private_data;
1071 mon_bin_vma_open(vma);
1072 return 0;
1073}
1074
1075struct file_operations mon_fops_binary = {
1076 .owner = THIS_MODULE,
1077 .open = mon_bin_open,
1078 .llseek = no_llseek,
1079 .read = mon_bin_read,
1080 /* .write = mon_text_write, */
1081 .poll = mon_bin_poll,
1082 .ioctl = mon_bin_ioctl,
1083 .release = mon_bin_release,
1084};
1085
1086static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
1087{
1088 DECLARE_WAITQUEUE(waita, current);
1089 unsigned long flags;
1090
1091 add_wait_queue(&rp->b_wait, &waita);
1092 set_current_state(TASK_INTERRUPTIBLE);
1093
1094 spin_lock_irqsave(&rp->b_lock, flags);
1095 while (MON_RING_EMPTY(rp)) {
1096 spin_unlock_irqrestore(&rp->b_lock, flags);
1097
1098 if (file->f_flags & O_NONBLOCK) {
1099 set_current_state(TASK_RUNNING);
1100 remove_wait_queue(&rp->b_wait, &waita);
1101 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */
1102 }
1103 schedule();
1104 if (signal_pending(current)) {
1105 remove_wait_queue(&rp->b_wait, &waita);
1106 return -EINTR;
1107 }
1108 set_current_state(TASK_INTERRUPTIBLE);
1109
1110 spin_lock_irqsave(&rp->b_lock, flags);
1111 }
1112 spin_unlock_irqrestore(&rp->b_lock, flags);
1113
1114 set_current_state(TASK_RUNNING);
1115 remove_wait_queue(&rp->b_wait, &waita);
1116 return 0;
1117}
1118
1119static int mon_alloc_buff(struct mon_pgmap *map, int npages)
1120{
1121 int n;
1122 unsigned long vaddr;
1123
1124 for (n = 0; n < npages; n++) {
1125 vaddr = get_zeroed_page(GFP_KERNEL);
1126 if (vaddr == 0) {
1127 while (n-- != 0)
1128 free_page((unsigned long) map[n].ptr);
1129 return -ENOMEM;
1130 }
1131 map[n].ptr = (unsigned char *) vaddr;
1132 map[n].pg = virt_to_page(vaddr);
1133 }
1134 return 0;
1135}
1136
1137static void mon_free_buff(struct mon_pgmap *map, int npages)
1138{
1139 int n;
1140
1141 for (n = 0; n < npages; n++)
1142 free_page((unsigned long) map[n].ptr);
1143}
1144
1145int __init mon_bin_init(void)
1146{
1147 int rc;
1148
1149 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon");
1150 if (rc < 0)
1151 goto err_dev;
1152
1153 cdev_init(&mon_bin_cdev, &mon_fops_binary);
1154 mon_bin_cdev.owner = THIS_MODULE;
1155
1156 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR);
1157 if (rc < 0)
1158 goto err_add;
1159
1160 return 0;
1161
1162err_add:
1163 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1164err_dev:
1165 return rc;
1166}
1167
1168void __exit mon_bin_exit(void)
1169{
1170 cdev_del(&mon_bin_cdev);
1171 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR);
1172}
diff --git a/drivers/usb/mon/mon_dma.c b/drivers/usb/mon/mon_dma.c
index ddcfc01e77a0..140cc80bd2b1 100644
--- a/drivers/usb/mon/mon_dma.c
+++ b/drivers/usb/mon/mon_dma.c
@@ -48,6 +48,36 @@ char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49 return 0; 49 return 0;
50} 50}
51
52void mon_dmapeek_vec(const struct mon_reader_bin *rp,
53 unsigned int offset, dma_addr_t dma_addr, unsigned int length)
54{
55 unsigned long flags;
56 unsigned int step_len;
57 struct page *pg;
58 unsigned char *map;
59 unsigned long page_off, page_len;
60
61 local_irq_save(flags);
62 while (length) {
63 /* compute number of bytes we are going to copy in this page */
64 step_len = length;
65 page_off = dma_addr & (PAGE_SIZE-1);
66 page_len = PAGE_SIZE - page_off;
67 if (page_len < step_len)
68 step_len = page_len;
69
70 /* copy data and advance pointers */
71 pg = phys_to_page(dma_addr);
72 map = kmap_atomic(pg, KM_IRQ0);
73 offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
74 kunmap_atomic(map, KM_IRQ0);
75 dma_addr += step_len;
76 length -= step_len;
77 }
78 local_irq_restore(flags);
79}
80
51#endif /* __i386__ */ 81#endif /* __i386__ */
52 82
53#ifndef MON_HAS_UNMAP 83#ifndef MON_HAS_UNMAP
@@ -55,4 +85,11 @@ char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
55{ 85{
56 return 'D'; 86 return 'D';
57} 87}
58#endif 88
89void mon_dmapeek_vec(const struct mon_reader_bin *rp,
90 unsigned int offset, dma_addr_t dma_addr, unsigned int length)
91{
92 ;
93}
94
95#endif /* MON_HAS_UNMAP */
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 394bbf2f68d4..c9739e7b35e5 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -9,7 +9,6 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/usb.h> 11#include <linux/usb.h>
12#include <linux/debugfs.h>
13#include <linux/smp_lock.h> 12#include <linux/smp_lock.h>
14#include <linux/notifier.h> 13#include <linux/notifier.h>
15#include <linux/mutex.h> 14#include <linux/mutex.h>
@@ -22,11 +21,10 @@ static void mon_complete(struct usb_bus *ubus, struct urb *urb);
22static void mon_stop(struct mon_bus *mbus); 21static void mon_stop(struct mon_bus *mbus);
23static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus); 22static void mon_dissolve(struct mon_bus *mbus, struct usb_bus *ubus);
24static void mon_bus_drop(struct kref *r); 23static void mon_bus_drop(struct kref *r);
25static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus); 24static void mon_bus_init(struct usb_bus *ubus);
26 25
27DEFINE_MUTEX(mon_lock); 26DEFINE_MUTEX(mon_lock);
28 27
29static struct dentry *mon_dir; /* /dbg/usbmon */
30static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */ 28static LIST_HEAD(mon_buses); /* All buses we know: struct mon_bus */
31 29
32/* 30/*
@@ -200,7 +198,7 @@ static void mon_stop(struct mon_bus *mbus)
200 */ 198 */
201static void mon_bus_add(struct usb_bus *ubus) 199static void mon_bus_add(struct usb_bus *ubus)
202{ 200{
203 mon_bus_init(mon_dir, ubus); 201 mon_bus_init(ubus);
204} 202}
205 203
206/* 204/*
@@ -212,8 +210,8 @@ static void mon_bus_remove(struct usb_bus *ubus)
212 210
213 mutex_lock(&mon_lock); 211 mutex_lock(&mon_lock);
214 list_del(&mbus->bus_link); 212 list_del(&mbus->bus_link);
215 debugfs_remove(mbus->dent_t); 213 if (mbus->text_inited)
216 debugfs_remove(mbus->dent_s); 214 mon_text_del(mbus);
217 215
218 mon_dissolve(mbus, ubus); 216 mon_dissolve(mbus, ubus);
219 kref_put(&mbus->ref, mon_bus_drop); 217 kref_put(&mbus->ref, mon_bus_drop);
@@ -281,13 +279,9 @@ static void mon_bus_drop(struct kref *r)
281 * - refcount USB bus struct 279 * - refcount USB bus struct
282 * - link 280 * - link
283 */ 281 */
284static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus) 282static void mon_bus_init(struct usb_bus *ubus)
285{ 283{
286 struct dentry *d;
287 struct mon_bus *mbus; 284 struct mon_bus *mbus;
288 enum { NAMESZ = 10 };
289 char name[NAMESZ];
290 int rc;
291 285
292 if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL) 286 if ((mbus = kzalloc(sizeof(struct mon_bus), GFP_KERNEL)) == NULL)
293 goto err_alloc; 287 goto err_alloc;
@@ -303,57 +297,54 @@ static void mon_bus_init(struct dentry *mondir, struct usb_bus *ubus)
303 ubus->mon_bus = mbus; 297 ubus->mon_bus = mbus;
304 mbus->uses_dma = ubus->uses_dma; 298 mbus->uses_dma = ubus->uses_dma;
305 299
306 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum); 300 mbus->text_inited = mon_text_add(mbus, ubus);
307 if (rc <= 0 || rc >= NAMESZ) 301 // mon_bin_add(...)
308 goto err_print_t;
309 d = debugfs_create_file(name, 0600, mondir, mbus, &mon_fops_text);
310 if (d == NULL)
311 goto err_create_t;
312 mbus->dent_t = d;
313
314 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum);
315 if (rc <= 0 || rc >= NAMESZ)
316 goto err_print_s;
317 d = debugfs_create_file(name, 0600, mondir, mbus, &mon_fops_stat);
318 if (d == NULL)
319 goto err_create_s;
320 mbus->dent_s = d;
321 302
322 mutex_lock(&mon_lock); 303 mutex_lock(&mon_lock);
323 list_add_tail(&mbus->bus_link, &mon_buses); 304 list_add_tail(&mbus->bus_link, &mon_buses);
324 mutex_unlock(&mon_lock); 305 mutex_unlock(&mon_lock);
325 return; 306 return;
326 307
327err_create_s:
328err_print_s:
329 debugfs_remove(mbus->dent_t);
330err_create_t:
331err_print_t:
332 kfree(mbus);
333err_alloc: 308err_alloc:
334 return; 309 return;
335} 310}
336 311
312/*
313 * Search a USB bus by number. Notice that USB bus numbers start from one,
314 * which we may later use to identify "all" with zero.
315 *
316 * This function must be called with mon_lock held.
317 *
318 * This is obviously inefficient and may be revised in the future.
319 */
320struct mon_bus *mon_bus_lookup(unsigned int num)
321{
322 struct list_head *p;
323 struct mon_bus *mbus;
324
325 list_for_each (p, &mon_buses) {
326 mbus = list_entry(p, struct mon_bus, bus_link);
327 if (mbus->u_bus->busnum == num) {
328 return mbus;
329 }
330 }
331 return NULL;
332}
333
337static int __init mon_init(void) 334static int __init mon_init(void)
338{ 335{
339 struct usb_bus *ubus; 336 struct usb_bus *ubus;
340 struct dentry *mondir; 337 int rc;
341 338
342 mondir = debugfs_create_dir("usbmon", NULL); 339 if ((rc = mon_text_init()) != 0)
343 if (IS_ERR(mondir)) { 340 goto err_text;
344 printk(KERN_NOTICE TAG ": debugfs is not available\n"); 341 if ((rc = mon_bin_init()) != 0)
345 return -ENODEV; 342 goto err_bin;
346 }
347 if (mondir == NULL) {
348 printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
349 return -ENODEV;
350 }
351 mon_dir = mondir;
352 343
353 if (usb_mon_register(&mon_ops_0) != 0) { 344 if (usb_mon_register(&mon_ops_0) != 0) {
354 printk(KERN_NOTICE TAG ": unable to register with the core\n"); 345 printk(KERN_NOTICE TAG ": unable to register with the core\n");
355 debugfs_remove(mondir); 346 rc = -ENODEV;
356 return -ENODEV; 347 goto err_reg;
357 } 348 }
358 // MOD_INC_USE_COUNT(which_module?); 349 // MOD_INC_USE_COUNT(which_module?);
359 350
@@ -361,10 +352,17 @@ static int __init mon_init(void)
361 352
362 mutex_lock(&usb_bus_list_lock); 353 mutex_lock(&usb_bus_list_lock);
363 list_for_each_entry (ubus, &usb_bus_list, bus_list) { 354 list_for_each_entry (ubus, &usb_bus_list, bus_list) {
364 mon_bus_init(mondir, ubus); 355 mon_bus_init(ubus);
365 } 356 }
366 mutex_unlock(&usb_bus_list_lock); 357 mutex_unlock(&usb_bus_list_lock);
367 return 0; 358 return 0;
359
360err_reg:
361 mon_bin_exit();
362err_bin:
363 mon_text_exit();
364err_text:
365 return rc;
368} 366}
369 367
370static void __exit mon_exit(void) 368static void __exit mon_exit(void)
@@ -381,8 +379,8 @@ static void __exit mon_exit(void)
381 mbus = list_entry(p, struct mon_bus, bus_link); 379 mbus = list_entry(p, struct mon_bus, bus_link);
382 list_del(p); 380 list_del(p);
383 381
384 debugfs_remove(mbus->dent_t); 382 if (mbus->text_inited)
385 debugfs_remove(mbus->dent_s); 383 mon_text_del(mbus);
386 384
387 /* 385 /*
388 * This never happens, because the open/close paths in 386 * This never happens, because the open/close paths in
@@ -401,7 +399,8 @@ static void __exit mon_exit(void)
401 } 399 }
402 mutex_unlock(&mon_lock); 400 mutex_unlock(&mon_lock);
403 401
404 debugfs_remove(mon_dir); 402 mon_text_exit();
403 mon_bin_exit();
405} 404}
406 405
407module_init(mon_init); 406module_init(mon_init);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 05cf2c9a8f84..d38a1279d9d9 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -9,6 +9,7 @@
9#include <linux/usb.h> 9#include <linux/usb.h>
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/debugfs.h>
12#include <asm/uaccess.h> 13#include <asm/uaccess.h>
13 14
14#include "usb_mon.h" 15#include "usb_mon.h"
@@ -63,6 +64,8 @@ struct mon_reader_text {
63 char slab_name[SLAB_NAME_SZ]; 64 char slab_name[SLAB_NAME_SZ];
64}; 65};
65 66
67static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */
68
66static void mon_text_ctor(void *, struct kmem_cache *, unsigned long); 69static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
67 70
68/* 71/*
@@ -436,7 +439,7 @@ static int mon_text_release(struct inode *inode, struct file *file)
436 return 0; 439 return 0;
437} 440}
438 441
439const struct file_operations mon_fops_text = { 442static const struct file_operations mon_fops_text = {
440 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
441 .open = mon_text_open, 444 .open = mon_text_open,
442 .llseek = no_llseek, 445 .llseek = no_llseek,
@@ -447,6 +450,47 @@ const struct file_operations mon_fops_text = {
447 .release = mon_text_release, 450 .release = mon_text_release,
448}; 451};
449 452
453int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
454{
455 struct dentry *d;
456 enum { NAMESZ = 10 };
457 char name[NAMESZ];
458 int rc;
459
460 rc = snprintf(name, NAMESZ, "%dt", ubus->busnum);
461 if (rc <= 0 || rc >= NAMESZ)
462 goto err_print_t;
463 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_text);
464 if (d == NULL)
465 goto err_create_t;
466 mbus->dent_t = d;
467
468 /* XXX The stats do not belong to here (text API), but oh well... */
469 rc = snprintf(name, NAMESZ, "%ds", ubus->busnum);
470 if (rc <= 0 || rc >= NAMESZ)
471 goto err_print_s;
472 d = debugfs_create_file(name, 0600, mon_dir, mbus, &mon_fops_stat);
473 if (d == NULL)
474 goto err_create_s;
475 mbus->dent_s = d;
476
477 return 1;
478
479err_create_s:
480err_print_s:
481 debugfs_remove(mbus->dent_t);
482 mbus->dent_t = NULL;
483err_create_t:
484err_print_t:
485 return 0;
486}
487
488void mon_text_del(struct mon_bus *mbus)
489{
490 debugfs_remove(mbus->dent_t);
491 debugfs_remove(mbus->dent_s);
492}
493
450/* 494/*
451 * Slab interface: constructor. 495 * Slab interface: constructor.
452 */ 496 */
@@ -459,3 +503,24 @@ static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sfla
459 memset(mem, 0xe5, sizeof(struct mon_event_text)); 503 memset(mem, 0xe5, sizeof(struct mon_event_text));
460} 504}
461 505
506int __init mon_text_init(void)
507{
508 struct dentry *mondir;
509
510 mondir = debugfs_create_dir("usbmon", NULL);
511 if (IS_ERR(mondir)) {
512 printk(KERN_NOTICE TAG ": debugfs is not available\n");
513 return -ENODEV;
514 }
515 if (mondir == NULL) {
516 printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
517 return -ENODEV;
518 }
519 mon_dir = mondir;
520 return 0;
521}
522
523void __exit mon_text_exit(void)
524{
525 debugfs_remove(mon_dir);
526}
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index ab9d02d5df77..4f949ce8a7f3 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -17,9 +17,11 @@
17struct mon_bus { 17struct mon_bus {
18 struct list_head bus_link; 18 struct list_head bus_link;
19 spinlock_t lock; 19 spinlock_t lock;
20 struct usb_bus *u_bus;
21
22 int text_inited;
20 struct dentry *dent_s; /* Debugging file */ 23 struct dentry *dent_s; /* Debugging file */
21 struct dentry *dent_t; /* Text interface file */ 24 struct dentry *dent_t; /* Text interface file */
22 struct usb_bus *u_bus;
23 int uses_dma; 25 int uses_dma;
24 26
25 /* Ref */ 27 /* Ref */
@@ -48,13 +50,35 @@ struct mon_reader {
48void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r); 50void mon_reader_add(struct mon_bus *mbus, struct mon_reader *r);
49void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r); 51void mon_reader_del(struct mon_bus *mbus, struct mon_reader *r);
50 52
53struct mon_bus *mon_bus_lookup(unsigned int num);
54
55int /*bool*/ mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus);
56void mon_text_del(struct mon_bus *mbus);
57// void mon_bin_add(struct mon_bus *);
58
59int __init mon_text_init(void);
60void __exit mon_text_exit(void);
61int __init mon_bin_init(void);
62void __exit mon_bin_exit(void);
63
51/* 64/*
52 */ 65 * DMA interface.
66 *
67 * XXX The vectored side needs a serious re-thinking. Abstracting vectors,
68 * like in Paolo's original patch, produces a double pkmap. We need an idea.
69*/
53extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len); 70extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len);
54 71
72struct mon_reader_bin;
73extern void mon_dmapeek_vec(const struct mon_reader_bin *rp,
74 unsigned int offset, dma_addr_t dma_addr, unsigned int len);
75extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp,
76 unsigned int offset, const unsigned char *from, unsigned int len);
77
78/*
79 */
55extern struct mutex mon_lock; 80extern struct mutex mon_lock;
56 81
57extern const struct file_operations mon_fops_text;
58extern const struct file_operations mon_fops_stat; 82extern const struct file_operations mon_fops_stat;
59 83
60#endif /* __USB_MON_H */ 84#endif /* __USB_MON_H */
diff --git a/drivers/usb/net/Kconfig b/drivers/usb/net/Kconfig
index e081836014ac..a2b94ef512bc 100644
--- a/drivers/usb/net/Kconfig
+++ b/drivers/usb/net/Kconfig
@@ -222,13 +222,15 @@ config USB_NET_MCS7830
222 adapters marketed under the DeLOCK brand. 222 adapters marketed under the DeLOCK brand.
223 223
224config USB_NET_RNDIS_HOST 224config USB_NET_RNDIS_HOST
225 tristate "Host for RNDIS devices (EXPERIMENTAL)" 225 tristate "Host for RNDIS and ActiveSync devices (EXPERIMENTAL)"
226 depends on USB_USBNET && EXPERIMENTAL 226 depends on USB_USBNET && EXPERIMENTAL
227 select USB_NET_CDCETHER 227 select USB_NET_CDCETHER
228 help 228 help
229 This option enables hosting "Remote NDIS" USB networking links, 229 This option enables hosting "Remote NDIS" USB networking links,
230 as encouraged by Microsoft (instead of CDC Ethernet!) for use in 230 as encouraged by Microsoft (instead of CDC Ethernet!) for use in
231 various devices that may only support this protocol. 231 various devices that may only support this protocol. A variant
232 of this protocol (with even less public documentation) seems to
233 be at the root of Microsoft's "ActiveSync" too.
232 234
233 Avoid using this protocol unless you have no better options. 235 Avoid using this protocol unless you have no better options.
234 The protocol specification is incomplete, and is controlled by 236 The protocol specification is incomplete, and is controlled by
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 896449f0cf85..4206df2d61b7 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -1449,6 +1449,10 @@ static const struct usb_device_id products [] = {
1449 // Linksys USB1000 1449 // Linksys USB1000
1450 USB_DEVICE (0x1737, 0x0039), 1450 USB_DEVICE (0x1737, 0x0039),
1451 .driver_info = (unsigned long) &ax88178_info, 1451 .driver_info = (unsigned long) &ax88178_info,
1452}, {
1453 // IO-DATA ETG-US2
1454 USB_DEVICE (0x04bb, 0x0930),
1455 .driver_info = (unsigned long) &ax88178_info,
1452}, 1456},
1453 { }, // END 1457 { }, // END
1454}; 1458};
diff --git a/drivers/usb/net/cdc_ether.c b/drivers/usb/net/cdc_ether.c
index 44a91547146e..e5cdafa258dd 100644
--- a/drivers/usb/net/cdc_ether.c
+++ b/drivers/usb/net/cdc_ether.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * CDC Ethernet based networking peripherals 2 * CDC Ethernet based networking peripherals
3 * Copyright (C) 2003-2005 by David Brownell 3 * Copyright (C) 2003-2005 by David Brownell
4 * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -35,6 +36,29 @@
35#include "usbnet.h" 36#include "usbnet.h"
36 37
37 38
39#if defined(CONFIG_USB_NET_RNDIS_HOST) || defined(CONFIG_USB_NET_RNDIS_HOST_MODULE)
40
41static int is_rndis(struct usb_interface_descriptor *desc)
42{
43 return desc->bInterfaceClass == USB_CLASS_COMM
44 && desc->bInterfaceSubClass == 2
45 && desc->bInterfaceProtocol == 0xff;
46}
47
48static int is_activesync(struct usb_interface_descriptor *desc)
49{
50 return desc->bInterfaceClass == USB_CLASS_MISC
51 && desc->bInterfaceSubClass == 1
52 && desc->bInterfaceProtocol == 1;
53}
54
55#else
56
57#define is_rndis(desc) 0
58#define is_activesync(desc) 0
59
60#endif
61
38/* 62/*
39 * probes control interface, claims data interface, collects the bulk 63 * probes control interface, claims data interface, collects the bulk
40 * endpoints, activates data interface (if needed), maybe sets MTU. 64 * endpoints, activates data interface (if needed), maybe sets MTU.
@@ -71,7 +95,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
71 /* this assumes that if there's a non-RNDIS vendor variant 95 /* this assumes that if there's a non-RNDIS vendor variant
72 * of cdc-acm, it'll fail RNDIS requests cleanly. 96 * of cdc-acm, it'll fail RNDIS requests cleanly.
73 */ 97 */
74 rndis = (intf->cur_altsetting->desc.bInterfaceProtocol == 0xff); 98 rndis = is_rndis(&intf->cur_altsetting->desc)
99 || is_activesync(&intf->cur_altsetting->desc);
75 100
76 memset(info, 0, sizeof *info); 101 memset(info, 0, sizeof *info);
77 info->control = intf; 102 info->control = intf;
@@ -99,6 +124,23 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
99 goto bad_desc; 124 goto bad_desc;
100 } 125 }
101 break; 126 break;
127 case USB_CDC_ACM_TYPE:
128 /* paranoia: disambiguate a "real" vendor-specific
129 * modem interface from an RNDIS non-modem.
130 */
131 if (rndis) {
132 struct usb_cdc_acm_descriptor *d;
133
134 d = (void *) buf;
135 if (d->bmCapabilities) {
136 dev_dbg(&intf->dev,
137 "ACM capabilities %02x, "
138 "not really RNDIS?\n",
139 d->bmCapabilities);
140 goto bad_desc;
141 }
142 }
143 break;
102 case USB_CDC_UNION_TYPE: 144 case USB_CDC_UNION_TYPE:
103 if (info->u) { 145 if (info->u) {
104 dev_dbg(&intf->dev, "extra CDC union\n"); 146 dev_dbg(&intf->dev, "extra CDC union\n");
@@ -171,7 +213,21 @@ next_desc:
171 buf += buf [0]; 213 buf += buf [0];
172 } 214 }
173 215
174 if (!info->header || !info->u || (!rndis && !info->ether)) { 216 /* Microsoft ActiveSync based RNDIS devices lack the CDC descriptors,
217 * so we'll hard-wire the interfaces and not check for descriptors.
218 */
219 if (is_activesync(&intf->cur_altsetting->desc) && !info->u) {
220 info->control = usb_ifnum_to_if(dev->udev, 0);
221 info->data = usb_ifnum_to_if(dev->udev, 1);
222 if (!info->control || !info->data) {
223 dev_dbg(&intf->dev,
224 "activesync: master #0/%p slave #1/%p\n",
225 info->control,
226 info->data);
227 goto bad_desc;
228 }
229
230 } else if (!info->header || !info->u || (!rndis && !info->ether)) {
175 dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n", 231 dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
176 info->header ? "" : "header ", 232 info->header ? "" : "header ",
177 info->u ? "" : "union ", 233 info->u ? "" : "union ",
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c
index a6f0f4d934df..31e5fe363fdc 100644
--- a/drivers/usb/net/gl620a.c
+++ b/drivers/usb/net/gl620a.c
@@ -70,12 +70,12 @@
70 (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4) 70 (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4)
71 71
72struct gl_packet { 72struct gl_packet {
73 u32 packet_length; 73 __le32 packet_length;
74 char packet_data [1]; 74 char packet_data [1];
75}; 75};
76 76
77struct gl_header { 77struct gl_header {
78 u32 packet_count; 78 __le32 packet_count;
79 struct gl_packet packets; 79 struct gl_packet packets;
80}; 80};
81 81
@@ -85,15 +85,14 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
85 struct gl_packet *packet; 85 struct gl_packet *packet;
86 struct sk_buff *gl_skb; 86 struct sk_buff *gl_skb;
87 u32 size; 87 u32 size;
88 u32 count;
88 89
89 header = (struct gl_header *) skb->data; 90 header = (struct gl_header *) skb->data;
90 91
91 // get the packet count of the received skb 92 // get the packet count of the received skb
92 le32_to_cpus(&header->packet_count); 93 count = le32_to_cpu(header->packet_count);
93 if ((header->packet_count > GL_MAX_TRANSMIT_PACKETS) 94 if (count > GL_MAX_TRANSMIT_PACKETS) {
94 || (header->packet_count < 0)) { 95 dbg("genelink: invalid received packet count %u", count);
95 dbg("genelink: invalid received packet count %d",
96 header->packet_count);
97 return 0; 96 return 0;
98 } 97 }
99 98
@@ -103,7 +102,7 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
103 // decrement the length for the packet count size 4 bytes 102 // decrement the length for the packet count size 4 bytes
104 skb_pull(skb, 4); 103 skb_pull(skb, 4);
105 104
106 while (header->packet_count > 1) { 105 while (count > 1) {
107 // get the packet length 106 // get the packet length
108 size = le32_to_cpu(packet->packet_length); 107 size = le32_to_cpu(packet->packet_length);
109 108
@@ -124,9 +123,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
124 } 123 }
125 124
126 // advance to the next packet 125 // advance to the next packet
127 packet = (struct gl_packet *) 126 packet = (struct gl_packet *)&packet->packet_data[size];
128 &packet->packet_data [size]; 127 count--;
129 header->packet_count--;
130 128
131 // shift the data pointer to the next gl_packet 129 // shift the data pointer to the next gl_packet
132 skb_pull(skb, size + 4); 130 skb_pull(skb, size + 4);
@@ -149,8 +147,8 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
149 int length = skb->len; 147 int length = skb->len;
150 int headroom = skb_headroom(skb); 148 int headroom = skb_headroom(skb);
151 int tailroom = skb_tailroom(skb); 149 int tailroom = skb_tailroom(skb);
152 u32 *packet_count; 150 __le32 *packet_count;
153 u32 *packet_len; 151 __le32 *packet_len;
154 152
155 // FIXME: magic numbers, bleech 153 // FIXME: magic numbers, bleech
156 padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1; 154 padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1;
@@ -172,7 +170,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
172 } 170 }
173 171
174 // attach the packet count to the header 172 // attach the packet count to the header
175 packet_count = (u32 *) skb_push(skb, (4 + 4*1)); 173 packet_count = (__le32 *) skb_push(skb, (4 + 4*1));
176 packet_len = packet_count + 1; 174 packet_len = packet_count + 1;
177 175
178 *packet_count = cpu_to_le32(1); 176 *packet_count = cpu_to_le32(1);
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c
index fa78326d0bf0..36a989160a68 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/usb/net/kaweth.c
@@ -179,6 +179,7 @@ static struct usb_driver kaweth_driver = {
179 .suspend = kaweth_suspend, 179 .suspend = kaweth_suspend,
180 .resume = kaweth_resume, 180 .resume = kaweth_resume,
181 .id_table = usb_klsi_table, 181 .id_table = usb_klsi_table,
182 .supports_autosuspend = 1,
182}; 183};
183 184
184typedef __u8 eth_addr_t[6]; 185typedef __u8 eth_addr_t[6];
@@ -225,6 +226,7 @@ struct kaweth_device
225 struct delayed_work lowmem_work; 226 struct delayed_work lowmem_work;
226 227
227 struct usb_device *dev; 228 struct usb_device *dev;
229 struct usb_interface *intf;
228 struct net_device *net; 230 struct net_device *net;
229 wait_queue_head_t term_wait; 231 wait_queue_head_t term_wait;
230 232
@@ -662,9 +664,14 @@ static int kaweth_open(struct net_device *net)
662 664
663 dbg("Opening network device."); 665 dbg("Opening network device.");
664 666
667 res = usb_autopm_get_interface(kaweth->intf);
668 if (res) {
669 err("Interface cannot be resumed.");
670 return -EIO;
671 }
665 res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL); 672 res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL);
666 if (res) 673 if (res)
667 return -EIO; 674 goto err_out;
668 675
669 usb_fill_int_urb( 676 usb_fill_int_urb(
670 kaweth->irq_urb, 677 kaweth->irq_urb,
@@ -681,7 +688,7 @@ static int kaweth_open(struct net_device *net)
681 res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL); 688 res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL);
682 if (res) { 689 if (res) {
683 usb_kill_urb(kaweth->rx_urb); 690 usb_kill_urb(kaweth->rx_urb);
684 return -EIO; 691 goto err_out;
685 } 692 }
686 kaweth->opened = 1; 693 kaweth->opened = 1;
687 694
@@ -689,10 +696,14 @@ static int kaweth_open(struct net_device *net)
689 696
690 kaweth_async_set_rx_mode(kaweth); 697 kaweth_async_set_rx_mode(kaweth);
691 return 0; 698 return 0;
699
700err_out:
701 usb_autopm_enable(kaweth->intf);
702 return -EIO;
692} 703}
693 704
694/**************************************************************** 705/****************************************************************
695 * kaweth_close 706 * kaweth_kill_urbs
696 ****************************************************************/ 707 ****************************************************************/
697static void kaweth_kill_urbs(struct kaweth_device *kaweth) 708static void kaweth_kill_urbs(struct kaweth_device *kaweth)
698{ 709{
@@ -724,17 +735,29 @@ static int kaweth_close(struct net_device *net)
724 735
725 kaweth->status &= ~KAWETH_STATUS_CLOSING; 736 kaweth->status &= ~KAWETH_STATUS_CLOSING;
726 737
738 usb_autopm_enable(kaweth->intf);
739
727 return 0; 740 return 0;
728} 741}
729 742
730static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 743static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
731{ 744{
745 struct kaweth_device *kaweth = netdev_priv(dev);
732 746
733 strlcpy(info->driver, driver_name, sizeof(info->driver)); 747 strlcpy(info->driver, driver_name, sizeof(info->driver));
748 usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
749}
750
751static u32 kaweth_get_link(struct net_device *dev)
752{
753 struct kaweth_device *kaweth = netdev_priv(dev);
754
755 return kaweth->linkstate;
734} 756}
735 757
736static struct ethtool_ops ops = { 758static struct ethtool_ops ops = {
737 .get_drvinfo = kaweth_get_drvinfo 759 .get_drvinfo = kaweth_get_drvinfo,
760 .get_link = kaweth_get_link
738}; 761};
739 762
740/**************************************************************** 763/****************************************************************
@@ -908,6 +931,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
908 struct kaweth_device *kaweth = usb_get_intfdata(intf); 931 struct kaweth_device *kaweth = usb_get_intfdata(intf);
909 unsigned long flags; 932 unsigned long flags;
910 933
934 dbg("Suspending device");
911 spin_lock_irqsave(&kaweth->device_lock, flags); 935 spin_lock_irqsave(&kaweth->device_lock, flags);
912 kaweth->status |= KAWETH_STATUS_SUSPENDING; 936 kaweth->status |= KAWETH_STATUS_SUSPENDING;
913 spin_unlock_irqrestore(&kaweth->device_lock, flags); 937 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -924,6 +948,7 @@ static int kaweth_resume(struct usb_interface *intf)
924 struct kaweth_device *kaweth = usb_get_intfdata(intf); 948 struct kaweth_device *kaweth = usb_get_intfdata(intf);
925 unsigned long flags; 949 unsigned long flags;
926 950
951 dbg("Resuming device");
927 spin_lock_irqsave(&kaweth->device_lock, flags); 952 spin_lock_irqsave(&kaweth->device_lock, flags);
928 kaweth->status &= ~KAWETH_STATUS_SUSPENDING; 953 kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
929 spin_unlock_irqrestore(&kaweth->device_lock, flags); 954 spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1086,6 +1111,8 @@ err_fw:
1086 1111
1087 dbg("Initializing net device."); 1112 dbg("Initializing net device.");
1088 1113
1114 kaweth->intf = intf;
1115
1089 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 1116 kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1090 if (!kaweth->tx_urb) 1117 if (!kaweth->tx_urb)
1091 goto err_free_netdev; 1118 goto err_free_netdev;
@@ -1265,7 +1292,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
1265{ 1292{
1266 struct urb *urb; 1293 struct urb *urb;
1267 int retv; 1294 int retv;
1268 int length; 1295 int length = 0; /* shut up GCC */
1269 1296
1270 urb = usb_alloc_urb(0, GFP_NOIO); 1297 urb = usb_alloc_urb(0, GFP_NOIO);
1271 if (!urb) 1298 if (!urb)
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h
index 98f6898cae1f..c7467823cd1c 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/usb/net/pegasus.h
@@ -214,9 +214,9 @@ PEGASUS_DEV( "Billionton USBEL-100", VENDOR_BILLIONTON, 0x0988,
214 DEFAULT_GPIO_RESET ) 214 DEFAULT_GPIO_RESET )
215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511, 215PEGASUS_DEV( "Billionton USBE-100", VENDOR_BILLIONTON, 0x8511,
216 DEFAULT_GPIO_RESET | PEGASUS_II ) 216 DEFAULT_GPIO_RESET | PEGASUS_II )
217PEGASUS_DEV( "Corega FEter USB-TX", VENDOR_COREGA, 0x0004, 217PEGASUS_DEV( "Corega FEther USB-TX", VENDOR_COREGA, 0x0004,
218 DEFAULT_GPIO_RESET ) 218 DEFAULT_GPIO_RESET )
219PEGASUS_DEV( "Corega FEter USB-TXS", VENDOR_COREGA, 0x000d, 219PEGASUS_DEV( "Corega FEther USB-TXS", VENDOR_COREGA, 0x000d,
220 DEFAULT_GPIO_RESET | PEGASUS_II ) 220 DEFAULT_GPIO_RESET | PEGASUS_II )
221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001, 221PEGASUS_DEV( "D-Link DSB-650TX", VENDOR_DLINK, 0x4001,
222 DEFAULT_GPIO_RESET ) 222 DEFAULT_GPIO_RESET )
diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c
index a322a16d9cf8..be888d2d813c 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/usb/net/rndis_host.c
@@ -49,6 +49,8 @@
49 * - In some cases, MS-Windows will emit undocumented requests; this 49 * - In some cases, MS-Windows will emit undocumented requests; this
50 * matters more to peripheral implementations than host ones. 50 * matters more to peripheral implementations than host ones.
51 * 51 *
52 * Moreover there's a no-open-specs variant of RNDIS called "ActiveSync".
53 *
52 * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in 54 * For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in
53 * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and 55 * favor of such non-proprietary alternatives as CDC Ethernet or the newer (and
54 * currently rare) "Ethernet Emulation Model" (EEM). 56 * currently rare) "Ethernet Emulation Model" (EEM).
@@ -61,6 +63,9 @@
61 * - control-in: GET_ENCAPSULATED 63 * - control-in: GET_ENCAPSULATED
62 * 64 *
63 * We'll try to ignore the RESPONSE_AVAILABLE notifications. 65 * We'll try to ignore the RESPONSE_AVAILABLE notifications.
66 *
67 * REVISIT some RNDIS implementations seem to have curious issues still
68 * to be resolved.
64 */ 69 */
65struct rndis_msg_hdr { 70struct rndis_msg_hdr {
66 __le32 msg_type; /* RNDIS_MSG_* */ 71 __le32 msg_type; /* RNDIS_MSG_* */
@@ -71,8 +76,14 @@ struct rndis_msg_hdr {
71 // ... and more 76 // ... and more
72} __attribute__ ((packed)); 77} __attribute__ ((packed));
73 78
74/* RNDIS defines this (absurdly huge) control timeout */ 79/* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */
75#define RNDIS_CONTROL_TIMEOUT_MS (10 * 1000) 80#define CONTROL_BUFFER_SIZE 1025
81
82/* RNDIS defines an (absurdly huge) 10 second control timeout,
83 * but ActiveSync seems to use a more usual 5 second timeout
84 * (which matches the USB 2.0 spec).
85 */
86#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000)
76 87
77 88
78#define ccpu2 __constant_cpu_to_le32 89#define ccpu2 __constant_cpu_to_le32
@@ -270,6 +281,7 @@ static void rndis_status(struct usbnet *dev, struct urb *urb)
270static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf) 281static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
271{ 282{
272 struct cdc_state *info = (void *) &dev->data; 283 struct cdc_state *info = (void *) &dev->data;
284 int master_ifnum;
273 int retval; 285 int retval;
274 unsigned count; 286 unsigned count;
275 __le32 rsp; 287 __le32 rsp;
@@ -279,7 +291,7 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
279 * disconnect(): either serialize, or dispatch responses on xid 291 * disconnect(): either serialize, or dispatch responses on xid
280 */ 292 */
281 293
282 /* Issue the request; don't bother byteswapping our xid */ 294 /* Issue the request; xid is unique, don't bother byteswapping it */
283 if (likely(buf->msg_type != RNDIS_MSG_HALT 295 if (likely(buf->msg_type != RNDIS_MSG_HALT
284 && buf->msg_type != RNDIS_MSG_RESET)) { 296 && buf->msg_type != RNDIS_MSG_RESET)) {
285 xid = dev->xid++; 297 xid = dev->xid++;
@@ -287,11 +299,12 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
287 xid = dev->xid++; 299 xid = dev->xid++;
288 buf->request_id = (__force __le32) xid; 300 buf->request_id = (__force __le32) xid;
289 } 301 }
302 master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber;
290 retval = usb_control_msg(dev->udev, 303 retval = usb_control_msg(dev->udev,
291 usb_sndctrlpipe(dev->udev, 0), 304 usb_sndctrlpipe(dev->udev, 0),
292 USB_CDC_SEND_ENCAPSULATED_COMMAND, 305 USB_CDC_SEND_ENCAPSULATED_COMMAND,
293 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 306 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
294 0, info->u->bMasterInterface0, 307 0, master_ifnum,
295 buf, le32_to_cpu(buf->msg_len), 308 buf, le32_to_cpu(buf->msg_len),
296 RNDIS_CONTROL_TIMEOUT_MS); 309 RNDIS_CONTROL_TIMEOUT_MS);
297 if (unlikely(retval < 0 || xid == 0)) 310 if (unlikely(retval < 0 || xid == 0))
@@ -306,13 +319,13 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
306 */ 319 */
307 rsp = buf->msg_type | RNDIS_MSG_COMPLETION; 320 rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
308 for (count = 0; count < 10; count++) { 321 for (count = 0; count < 10; count++) {
309 memset(buf, 0, 1024); 322 memset(buf, 0, CONTROL_BUFFER_SIZE);
310 retval = usb_control_msg(dev->udev, 323 retval = usb_control_msg(dev->udev,
311 usb_rcvctrlpipe(dev->udev, 0), 324 usb_rcvctrlpipe(dev->udev, 0),
312 USB_CDC_GET_ENCAPSULATED_RESPONSE, 325 USB_CDC_GET_ENCAPSULATED_RESPONSE,
313 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 326 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
314 0, info->u->bMasterInterface0, 327 0, master_ifnum,
315 buf, 1024, 328 buf, CONTROL_BUFFER_SIZE,
316 RNDIS_CONTROL_TIMEOUT_MS); 329 RNDIS_CONTROL_TIMEOUT_MS);
317 if (likely(retval >= 8)) { 330 if (likely(retval >= 8)) {
318 msg_len = le32_to_cpu(buf->msg_len); 331 msg_len = le32_to_cpu(buf->msg_len);
@@ -350,7 +363,7 @@ static int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
350 usb_sndctrlpipe(dev->udev, 0), 363 usb_sndctrlpipe(dev->udev, 0),
351 USB_CDC_SEND_ENCAPSULATED_COMMAND, 364 USB_CDC_SEND_ENCAPSULATED_COMMAND,
352 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 365 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
353 0, info->u->bMasterInterface0, 366 0, master_ifnum,
354 msg, sizeof *msg, 367 msg, sizeof *msg,
355 RNDIS_CONTROL_TIMEOUT_MS); 368 RNDIS_CONTROL_TIMEOUT_MS);
356 if (unlikely(retval < 0)) 369 if (unlikely(retval < 0))
@@ -393,38 +406,64 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
393 u32 tmp; 406 u32 tmp;
394 407
395 /* we can't rely on i/o from stack working, or stack allocation */ 408 /* we can't rely on i/o from stack working, or stack allocation */
396 u.buf = kmalloc(1024, GFP_KERNEL); 409 u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
397 if (!u.buf) 410 if (!u.buf)
398 return -ENOMEM; 411 return -ENOMEM;
399 retval = usbnet_generic_cdc_bind(dev, intf); 412 retval = usbnet_generic_cdc_bind(dev, intf);
400 if (retval < 0) 413 if (retval < 0)
401 goto fail; 414 goto fail;
402 415
403 net->hard_header_len += sizeof (struct rndis_data_hdr);
404
405 /* initialize; max transfer is 16KB at full speed */
406 u.init->msg_type = RNDIS_MSG_INIT; 416 u.init->msg_type = RNDIS_MSG_INIT;
407 u.init->msg_len = ccpu2(sizeof *u.init); 417 u.init->msg_len = ccpu2(sizeof *u.init);
408 u.init->major_version = ccpu2(1); 418 u.init->major_version = ccpu2(1);
409 u.init->minor_version = ccpu2(0); 419 u.init->minor_version = ccpu2(0);
410 u.init->max_transfer_size = ccpu2(net->mtu + net->hard_header_len);
411 420
421 /* max transfer (in spec) is 0x4000 at full speed, but for
422 * TX we'll stick to one Ethernet packet plus RNDIS framing.
423 * For RX we handle drivers that zero-pad to end-of-packet.
424 * Don't let userspace change these settings.
425 */
426 net->hard_header_len += sizeof (struct rndis_data_hdr);
427 dev->hard_mtu = net->mtu + net->hard_header_len;
428
429 dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1);
430 dev->rx_urb_size &= ~(dev->maxpacket - 1);
431 u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size);
432
433 net->change_mtu = NULL;
412 retval = rndis_command(dev, u.header); 434 retval = rndis_command(dev, u.header);
413 if (unlikely(retval < 0)) { 435 if (unlikely(retval < 0)) {
414 /* it might not even be an RNDIS device!! */ 436 /* it might not even be an RNDIS device!! */
415 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval); 437 dev_err(&intf->dev, "RNDIS init failed, %d\n", retval);
438 goto fail_and_release;
439 }
440 tmp = le32_to_cpu(u.init_c->max_transfer_size);
441 if (tmp < dev->hard_mtu) {
442 dev_err(&intf->dev,
443 "dev can't take %u byte packets (max %u)\n",
444 dev->hard_mtu, tmp);
416 goto fail_and_release; 445 goto fail_and_release;
417 } 446 }
418 dev->hard_mtu = le32_to_cpu(u.init_c->max_transfer_size); 447
419 /* REVISIT: peripheral "alignment" request is ignored ... */ 448 /* REVISIT: peripheral "alignment" request is ignored ... */
420 dev_dbg(&intf->dev, "hard mtu %u, align %d\n", dev->hard_mtu, 449 dev_dbg(&intf->dev,
450 "hard mtu %u (%u from dev), rx buflen %Zu, align %d\n",
451 dev->hard_mtu, tmp, dev->rx_urb_size,
421 1 << le32_to_cpu(u.init_c->packet_alignment)); 452 1 << le32_to_cpu(u.init_c->packet_alignment));
422 453
423 /* get designated host ethernet address */ 454 /* Get designated host ethernet address.
424 memset(u.get, 0, sizeof *u.get); 455 *
456 * Adding a payload exactly the same size as the expected response
457 * payload is an evident requirement MSFT added for ActiveSync.
458 * This undocumented (and nonsensical) issue was found by sniffing
459 * protocol requests from the ActiveSync 4.1 Windows driver.
460 */
461 memset(u.get, 0, sizeof *u.get + 48);
425 u.get->msg_type = RNDIS_MSG_QUERY; 462 u.get->msg_type = RNDIS_MSG_QUERY;
426 u.get->msg_len = ccpu2(sizeof *u.get); 463 u.get->msg_len = ccpu2(sizeof *u.get + 48);
427 u.get->oid = OID_802_3_PERMANENT_ADDRESS; 464 u.get->oid = OID_802_3_PERMANENT_ADDRESS;
465 u.get->len = ccpu2(48);
466 u.get->offset = ccpu2(20);
428 467
429 retval = rndis_command(dev, u.header); 468 retval = rndis_command(dev, u.header);
430 if (unlikely(retval < 0)) { 469 if (unlikely(retval < 0)) {
@@ -432,7 +471,7 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
432 goto fail_and_release; 471 goto fail_and_release;
433 } 472 }
434 tmp = le32_to_cpu(u.get_c->offset); 473 tmp = le32_to_cpu(u.get_c->offset);
435 if (unlikely((tmp + 8) > (1024 - ETH_ALEN) 474 if (unlikely((tmp + 8) > (CONTROL_BUFFER_SIZE - ETH_ALEN)
436 || u.get_c->len != ccpu2(ETH_ALEN))) { 475 || u.get_c->len != ccpu2(ETH_ALEN))) {
437 dev_err(&intf->dev, "rndis ethaddr off %d len %d ?\n", 476 dev_err(&intf->dev, "rndis ethaddr off %d len %d ?\n",
438 tmp, le32_to_cpu(u.get_c->len)); 477 tmp, le32_to_cpu(u.get_c->len));
@@ -598,6 +637,10 @@ static const struct usb_device_id products [] = {
598 /* RNDIS is MSFT's un-official variant of CDC ACM */ 637 /* RNDIS is MSFT's un-official variant of CDC ACM */
599 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff), 638 USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
600 .driver_info = (unsigned long) &rndis_info, 639 .driver_info = (unsigned long) &rndis_info,
640}, {
641 /* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
642 USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
643 .driver_info = (unsigned long) &rndis_info,
601}, 644},
602 { }, // END 645 { }, // END
603}; 646};
diff --git a/drivers/usb/net/rtl8150.c b/drivers/usb/net/rtl8150.c
index e0eecda78ec1..670262a38a0f 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/usb/net/rtl8150.c
@@ -284,7 +284,8 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
284 u8 data[3], tmp; 284 u8 data[3], tmp;
285 285
286 data[0] = phy; 286 data[0] = phy;
287 *(data + 1) = cpu_to_le16p(&reg); 287 data[1] = reg & 0xff;
288 data[2] = (reg >> 8) & 0xff;
288 tmp = indx | PHY_WRITE | PHY_GO; 289 tmp = indx | PHY_WRITE | PHY_GO;
289 i = 0; 290 i = 0;
290 291
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 86bcf63b6ba5..11dad42c3c60 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -572,8 +572,20 @@ static void aircable_unthrottle(struct usb_serial_port *port)
572 schedule_work(&priv->rx_work); 572 schedule_work(&priv->rx_work);
573} 573}
574 574
575static struct usb_driver aircable_driver = {
576 .name = "aircable",
577 .probe = usb_serial_probe,
578 .disconnect = usb_serial_disconnect,
579 .id_table = id_table,
580 .no_dynamic_id = 1,
581};
582
575static struct usb_serial_driver aircable_device = { 583static struct usb_serial_driver aircable_device = {
576 .description = "aircable", 584 .driver = {
585 .owner = THIS_MODULE,
586 .name = "aircable",
587 },
588 .usb_driver = &aircable_driver,
577 .id_table = id_table, 589 .id_table = id_table,
578 .num_ports = 1, 590 .num_ports = 1,
579 .attach = aircable_attach, 591 .attach = aircable_attach,
@@ -587,13 +599,6 @@ static struct usb_serial_driver aircable_device = {
587 .unthrottle = aircable_unthrottle, 599 .unthrottle = aircable_unthrottle,
588}; 600};
589 601
590static struct usb_driver aircable_driver = {
591 .name = "aircable",
592 .probe = usb_serial_probe,
593 .disconnect = usb_serial_disconnect,
594 .id_table = id_table,
595};
596
597static int __init aircable_init (void) 602static int __init aircable_init (void)
598{ 603{
599 int retval; 604 int retval;
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index f2ca76a9cbac..0af42e32fa0a 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -277,6 +277,7 @@ static struct usb_serial_driver airprime_device = {
277 .owner = THIS_MODULE, 277 .owner = THIS_MODULE,
278 .name = "airprime", 278 .name = "airprime",
279 }, 279 },
280 .usb_driver = &airprime_driver,
280 .id_table = id_table, 281 .id_table = id_table,
281 .num_interrupt_in = NUM_DONT_CARE, 282 .num_interrupt_in = NUM_DONT_CARE,
282 .num_bulk_in = NUM_DONT_CARE, 283 .num_bulk_in = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 5261cd22ee6b..edd685791a6b 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -444,6 +444,7 @@ static struct usb_driver ark3116_driver = {
444 .probe = usb_serial_probe, 444 .probe = usb_serial_probe,
445 .disconnect = usb_serial_disconnect, 445 .disconnect = usb_serial_disconnect,
446 .id_table = id_table, 446 .id_table = id_table,
447 .no_dynamic_id = 1,
447}; 448};
448 449
449static struct usb_serial_driver ark3116_device = { 450static struct usb_serial_driver ark3116_device = {
@@ -452,6 +453,7 @@ static struct usb_serial_driver ark3116_device = {
452 .name = "ark3116", 453 .name = "ark3116",
453 }, 454 },
454 .id_table = id_table, 455 .id_table = id_table,
456 .usb_driver = &ark3116_driver,
455 .num_interrupt_in = 1, 457 .num_interrupt_in = 1,
456 .num_bulk_in = 1, 458 .num_bulk_in = 1,
457 .num_bulk_out = 1, 459 .num_bulk_out = 1,
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 38b4dae319ee..3b800d277c4b 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -126,6 +126,7 @@ static struct usb_serial_driver belkin_device = {
126 .name = "belkin", 126 .name = "belkin",
127 }, 127 },
128 .description = "Belkin / Peracom / GoHubs USB Serial Adapter", 128 .description = "Belkin / Peracom / GoHubs USB Serial Adapter",
129 .usb_driver = &belkin_driver,
129 .id_table = id_table_combined, 130 .id_table = id_table_combined,
130 .num_interrupt_in = 1, 131 .num_interrupt_in = 1,
131 .num_bulk_in = 1, 132 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 6542f220468f..c08a38402b93 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -103,11 +103,52 @@ exit:
103 return retval; 103 return retval;
104} 104}
105 105
106#ifdef CONFIG_HOTPLUG
107static ssize_t store_new_id(struct device_driver *driver,
108 const char *buf, size_t count)
109{
110 struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
111 ssize_t retval = usb_store_new_id(&usb_drv->dynids, driver, buf, count);
112
113 if (retval >= 0 && usb_drv->usb_driver != NULL)
114 retval = usb_store_new_id(&usb_drv->usb_driver->dynids,
115 &usb_drv->usb_driver->drvwrap.driver,
116 buf, count);
117 return retval;
118}
119
120static struct driver_attribute drv_attrs[] = {
121 __ATTR(new_id, S_IWUSR, NULL, store_new_id),
122 __ATTR_NULL,
123};
124
125static void free_dynids(struct usb_serial_driver *drv)
126{
127 struct usb_dynid *dynid, *n;
128
129 spin_lock(&drv->dynids.lock);
130 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
131 list_del(&dynid->node);
132 kfree(dynid);
133 }
134 spin_unlock(&drv->dynids.lock);
135}
136
137#else
138static struct driver_attribute drv_attrs[] = {
139 __ATTR_NULL,
140};
141static inline void free_dynids(struct usb_driver *drv)
142{
143}
144#endif
145
106struct bus_type usb_serial_bus_type = { 146struct bus_type usb_serial_bus_type = {
107 .name = "usb-serial", 147 .name = "usb-serial",
108 .match = usb_serial_device_match, 148 .match = usb_serial_device_match,
109 .probe = usb_serial_device_probe, 149 .probe = usb_serial_device_probe,
110 .remove = usb_serial_device_remove, 150 .remove = usb_serial_device_remove,
151 .drv_attrs = drv_attrs,
111}; 152};
112 153
113int usb_serial_bus_register(struct usb_serial_driver *driver) 154int usb_serial_bus_register(struct usb_serial_driver *driver)
@@ -115,6 +156,9 @@ int usb_serial_bus_register(struct usb_serial_driver *driver)
115 int retval; 156 int retval;
116 157
117 driver->driver.bus = &usb_serial_bus_type; 158 driver->driver.bus = &usb_serial_bus_type;
159 spin_lock_init(&driver->dynids.lock);
160 INIT_LIST_HEAD(&driver->dynids.list);
161
118 retval = driver_register(&driver->driver); 162 retval = driver_register(&driver->driver);
119 163
120 return retval; 164 return retval;
@@ -122,6 +166,7 @@ int usb_serial_bus_register(struct usb_serial_driver *driver)
122 166
123void usb_serial_bus_deregister(struct usb_serial_driver *driver) 167void usb_serial_bus_deregister(struct usb_serial_driver *driver)
124{ 168{
169 free_dynids(driver);
125 driver_unregister(&driver->driver); 170 driver_unregister(&driver->driver);
126} 171}
127 172
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c
index 7ebaffd6ed86..3ec24870bca9 100644
--- a/drivers/usb/serial/cp2101.c
+++ b/drivers/usb/serial/cp2101.c
@@ -89,6 +89,7 @@ static struct usb_serial_driver cp2101_device = {
89 .owner = THIS_MODULE, 89 .owner = THIS_MODULE,
90 .name = "cp2101", 90 .name = "cp2101",
91 }, 91 },
92 .usb_driver = &cp2101_driver,
92 .id_table = id_table, 93 .id_table = id_table,
93 .num_interrupt_in = 0, 94 .num_interrupt_in = 0,
94 .num_bulk_in = 0, 95 .num_bulk_in = 0,
@@ -169,13 +170,13 @@ static int cp2101_get_config(struct usb_serial_port* port, u8 request,
169 unsigned int *data, int size) 170 unsigned int *data, int size)
170{ 171{
171 struct usb_serial *serial = port->serial; 172 struct usb_serial *serial = port->serial;
172 u32 *buf; 173 __le32 *buf;
173 int result, i, length; 174 int result, i, length;
174 175
175 /* Number of integers required to contain the array */ 176 /* Number of integers required to contain the array */
176 length = (((size - 1) | 3) + 1)/4; 177 length = (((size - 1) | 3) + 1)/4;
177 178
178 buf = kcalloc(length, sizeof(u32), GFP_KERNEL); 179 buf = kcalloc(length, sizeof(__le32), GFP_KERNEL);
179 if (!buf) { 180 if (!buf) {
180 dev_err(&port->dev, "%s - out of memory.\n", __FUNCTION__); 181 dev_err(&port->dev, "%s - out of memory.\n", __FUNCTION__);
181 return -ENOMEM; 182 return -ENOMEM;
@@ -215,13 +216,13 @@ static int cp2101_set_config(struct usb_serial_port* port, u8 request,
215 unsigned int *data, int size) 216 unsigned int *data, int size)
216{ 217{
217 struct usb_serial *serial = port->serial; 218 struct usb_serial *serial = port->serial;
218 u32 *buf; 219 __le32 *buf;
219 int result, i, length; 220 int result, i, length;
220 221
221 /* Number of integers required to contain the array */ 222 /* Number of integers required to contain the array */
222 length = (((size - 1) | 3) + 1)/4; 223 length = (((size - 1) | 3) + 1)/4;
223 224
224 buf = kmalloc(length * sizeof(u32), GFP_KERNEL); 225 buf = kmalloc(length * sizeof(__le32), GFP_KERNEL);
225 if (!buf) { 226 if (!buf) {
226 dev_err(&port->dev, "%s - out of memory.\n", 227 dev_err(&port->dev, "%s - out of memory.\n",
227 __FUNCTION__); 228 __FUNCTION__);
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index a63c3286caa0..4167753ed31f 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -88,6 +88,7 @@ static struct usb_serial_driver cyberjack_device = {
88 .name = "cyberjack", 88 .name = "cyberjack",
89 }, 89 },
90 .description = "Reiner SCT Cyberjack USB card reader", 90 .description = "Reiner SCT Cyberjack USB card reader",
91 .usb_driver = &cyberjack_driver,
91 .id_table = id_table, 92 .id_table = id_table,
92 .num_interrupt_in = 1, 93 .num_interrupt_in = 1,
93 .num_bulk_in = 1, 94 .num_bulk_in = 1,
@@ -98,7 +99,7 @@ static struct usb_serial_driver cyberjack_device = {
98 .open = cyberjack_open, 99 .open = cyberjack_open,
99 .close = cyberjack_close, 100 .close = cyberjack_close,
100 .write = cyberjack_write, 101 .write = cyberjack_write,
101 .write_room = cyberjack_write_room, 102 .write_room = cyberjack_write_room,
102 .read_int_callback = cyberjack_read_int_callback, 103 .read_int_callback = cyberjack_read_int_callback,
103 .read_bulk_callback = cyberjack_read_bulk_callback, 104 .read_bulk_callback = cyberjack_read_bulk_callback,
104 .write_bulk_callback = cyberjack_write_bulk_callback, 105 .write_bulk_callback = cyberjack_write_bulk_callback,
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 6bc1f404e186..57b8e27285fc 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -193,6 +193,7 @@ static struct usb_serial_driver cypress_earthmate_device = {
193 .name = "earthmate", 193 .name = "earthmate",
194 }, 194 },
195 .description = "DeLorme Earthmate USB", 195 .description = "DeLorme Earthmate USB",
196 .usb_driver = &cypress_driver,
196 .id_table = id_table_earthmate, 197 .id_table = id_table_earthmate,
197 .num_interrupt_in = 1, 198 .num_interrupt_in = 1,
198 .num_interrupt_out = 1, 199 .num_interrupt_out = 1,
@@ -222,6 +223,7 @@ static struct usb_serial_driver cypress_hidcom_device = {
222 .name = "cyphidcom", 223 .name = "cyphidcom",
223 }, 224 },
224 .description = "HID->COM RS232 Adapter", 225 .description = "HID->COM RS232 Adapter",
226 .usb_driver = &cypress_driver,
225 .id_table = id_table_cyphidcomrs232, 227 .id_table = id_table_cyphidcomrs232,
226 .num_interrupt_in = 1, 228 .num_interrupt_in = 1,
227 .num_interrupt_out = 1, 229 .num_interrupt_out = 1,
@@ -251,6 +253,7 @@ static struct usb_serial_driver cypress_ca42v2_device = {
251 .name = "nokiaca42v2", 253 .name = "nokiaca42v2",
252 }, 254 },
253 .description = "Nokia CA-42 V2 Adapter", 255 .description = "Nokia CA-42 V2 Adapter",
256 .usb_driver = &cypress_driver,
254 .id_table = id_table_nokiaca42v2, 257 .id_table = id_table_nokiaca42v2,
255 .num_interrupt_in = 1, 258 .num_interrupt_in = 1,
256 .num_interrupt_out = 1, 259 .num_interrupt_out = 1,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index efd9ce3f931f..0b0fb51bad3e 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -509,6 +509,7 @@ static struct usb_serial_driver digi_acceleport_2_device = {
509 .name = "digi_2", 509 .name = "digi_2",
510 }, 510 },
511 .description = "Digi 2 port USB adapter", 511 .description = "Digi 2 port USB adapter",
512 .usb_driver = &digi_driver,
512 .id_table = id_table_2, 513 .id_table = id_table_2,
513 .num_interrupt_in = 0, 514 .num_interrupt_in = 0,
514 .num_bulk_in = 4, 515 .num_bulk_in = 4,
@@ -538,6 +539,7 @@ static struct usb_serial_driver digi_acceleport_4_device = {
538 .name = "digi_4", 539 .name = "digi_4",
539 }, 540 },
540 .description = "Digi 4 port USB adapter", 541 .description = "Digi 4 port USB adapter",
542 .usb_driver = &digi_driver,
541 .id_table = id_table_4, 543 .id_table = id_table_4,
542 .num_interrupt_in = 0, 544 .num_interrupt_in = 0,
543 .num_bulk_in = 5, 545 .num_bulk_in = 5,
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 92beeb19795f..4703c8f85383 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -117,6 +117,7 @@ static struct usb_serial_driver empeg_device = {
117 .name = "empeg", 117 .name = "empeg",
118 }, 118 },
119 .id_table = id_table, 119 .id_table = id_table,
120 .usb_driver = &empeg_driver,
120 .num_interrupt_in = 0, 121 .num_interrupt_in = 0,
121 .num_bulk_in = 1, 122 .num_bulk_in = 1,
122 .num_bulk_out = 1, 123 .num_bulk_out = 1,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6986e756f7c0..4695952b6470 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -464,7 +464,6 @@ static struct usb_device_id id_table_combined [] = {
464 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 464 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
465 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 465 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
466 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, 466 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
467 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_0_PID) },
468 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, 467 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
469 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, 468 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
470 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) }, 469 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
@@ -615,6 +614,7 @@ static struct usb_serial_driver ftdi_sio_device = {
615 .name = "ftdi_sio", 614 .name = "ftdi_sio",
616 }, 615 },
617 .description = "FTDI USB Serial Device", 616 .description = "FTDI USB Serial Device",
617 .usb_driver = &ftdi_driver ,
618 .id_table = id_table_combined, 618 .id_table = id_table_combined,
619 .num_interrupt_in = 0, 619 .num_interrupt_in = 0,
620 .num_bulk_in = 1, 620 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 40dd394de58d..7eff1c03ba80 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -364,7 +364,6 @@
364 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices 364 * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
365 * and I'm not entirely sure which are used by which. 365 * and I'm not entirely sure which are used by which.
366 */ 366 */
367#define FTDI_4N_GALAXY_DE_0_PID 0x8372
368#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 367#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
369#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1 368#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
370 369
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index 2bebd63d5ed1..4092f6dc9efd 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -58,6 +58,7 @@ static struct usb_serial_driver funsoft_device = {
58 .name = "funsoft", 58 .name = "funsoft",
59 }, 59 },
60 .id_table = id_table, 60 .id_table = id_table,
61 .usb_driver = &funsoft_driver,
61 .num_interrupt_in = NUM_DONT_CARE, 62 .num_interrupt_in = NUM_DONT_CARE,
62 .num_bulk_in = NUM_DONT_CARE, 63 .num_bulk_in = NUM_DONT_CARE,
63 .num_bulk_out = NUM_DONT_CARE, 64 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 6530d391ebed..74660a3aa670 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1566,6 +1566,7 @@ static struct usb_serial_driver garmin_device = {
1566 .name = "garmin_gps", 1566 .name = "garmin_gps",
1567 }, 1567 },
1568 .description = "Garmin GPS usb/tty", 1568 .description = "Garmin GPS usb/tty",
1569 .usb_driver = &garmin_driver,
1569 .id_table = id_table, 1570 .id_table = id_table,
1570 .num_interrupt_in = 1, 1571 .num_interrupt_in = 1,
1571 .num_bulk_in = 1, 1572 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 36042937e77f..601e0648dec6 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,10 @@
20#include <linux/usb/serial.h> 20#include <linux/usb/serial.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23static int generic_probe(struct usb_interface *interface,
24 const struct usb_device_id *id);
25
26
23static int debug; 27static int debug;
24 28
25#ifdef CONFIG_USB_SERIAL_GENERIC 29#ifdef CONFIG_USB_SERIAL_GENERIC
@@ -34,6 +38,21 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
34 38
35static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ 39static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
36 40
41/* we want to look at all devices, as the vendor/product id can change
42 * depending on the command line argument */
43static struct usb_device_id generic_serial_ids[] = {
44 {.driver_info = 42},
45 {}
46};
47
48static struct usb_driver generic_driver = {
49 .name = "usbserial_generic",
50 .probe = generic_probe,
51 .disconnect = usb_serial_disconnect,
52 .id_table = generic_serial_ids,
53 .no_dynamic_id = 1,
54};
55
37/* All of the device info needed for the Generic Serial Converter */ 56/* All of the device info needed for the Generic Serial Converter */
38struct usb_serial_driver usb_serial_generic_device = { 57struct usb_serial_driver usb_serial_generic_device = {
39 .driver = { 58 .driver = {
@@ -41,6 +60,7 @@ struct usb_serial_driver usb_serial_generic_device = {
41 .name = "generic", 60 .name = "generic",
42 }, 61 },
43 .id_table = generic_device_ids, 62 .id_table = generic_device_ids,
63 .usb_driver = &generic_driver,
44 .num_interrupt_in = NUM_DONT_CARE, 64 .num_interrupt_in = NUM_DONT_CARE,
45 .num_bulk_in = NUM_DONT_CARE, 65 .num_bulk_in = NUM_DONT_CARE,
46 .num_bulk_out = NUM_DONT_CARE, 66 .num_bulk_out = NUM_DONT_CARE,
@@ -48,13 +68,6 @@ struct usb_serial_driver usb_serial_generic_device = {
48 .shutdown = usb_serial_generic_shutdown, 68 .shutdown = usb_serial_generic_shutdown,
49}; 69};
50 70
51/* we want to look at all devices, as the vendor/product id can change
52 * depending on the command line argument */
53static struct usb_device_id generic_serial_ids[] = {
54 {.driver_info = 42},
55 {}
56};
57
58static int generic_probe(struct usb_interface *interface, 71static int generic_probe(struct usb_interface *interface,
59 const struct usb_device_id *id) 72 const struct usb_device_id *id)
60{ 73{
@@ -65,14 +78,6 @@ static int generic_probe(struct usb_interface *interface,
65 return usb_serial_probe(interface, id); 78 return usb_serial_probe(interface, id);
66 return -ENODEV; 79 return -ENODEV;
67} 80}
68
69static struct usb_driver generic_driver = {
70 .name = "usbserial_generic",
71 .probe = generic_probe,
72 .disconnect = usb_serial_disconnect,
73 .id_table = generic_serial_ids,
74 .no_dynamic_id = 1,
75};
76#endif 81#endif
77 82
78int usb_serial_generic_register (int _debug) 83int usb_serial_generic_register (int _debug)
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index ebcac701b069..6c6ebae741c9 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -49,6 +49,7 @@ static struct usb_serial_driver hp49gp_device = {
49 .name = "hp4X", 49 .name = "hp4X",
50 }, 50 },
51 .id_table = id_table, 51 .id_table = id_table,
52 .usb_driver = &hp49gp_driver,
52 .num_interrupt_in = NUM_DONT_CARE, 53 .num_interrupt_in = NUM_DONT_CARE,
53 .num_bulk_in = NUM_DONT_CARE, 54 .num_bulk_in = NUM_DONT_CARE,
54 .num_bulk_out = NUM_DONT_CARE, 55 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index f623d58370a4..6a26a2e683a6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -146,6 +146,8 @@ struct edgeport_serial {
146 struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */ 146 struct edge_manuf_descriptor manuf_descriptor; /* the manufacturer descriptor */
147 struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */ 147 struct edge_boot_descriptor boot_descriptor; /* the boot firmware descriptor */
148 struct edgeport_product_info product_info; /* Product Info */ 148 struct edgeport_product_info product_info; /* Product Info */
149 struct edge_compatibility_descriptor epic_descriptor; /* Edgeport compatible descriptor */
150 int is_epic; /* flag if EPiC device or not */
149 151
150 __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */ 152 __u8 interrupt_in_endpoint; /* the interrupt endpoint handle */
151 unsigned char * interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */ 153 unsigned char * interrupt_in_buffer; /* the buffer we use for the interrupt endpoint */
@@ -240,14 +242,6 @@ static void edge_shutdown (struct usb_serial *serial);
240 242
241#include "io_tables.h" /* all of the devices that this driver supports */ 243#include "io_tables.h" /* all of the devices that this driver supports */
242 244
243static struct usb_driver io_driver = {
244 .name = "io_edgeport",
245 .probe = usb_serial_probe,
246 .disconnect = usb_serial_disconnect,
247 .id_table = id_table_combined,
248 .no_dynamic_id = 1,
249};
250
251/* function prototypes for all of our local functions */ 245/* function prototypes for all of our local functions */
252static void process_rcvd_data (struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength); 246static void process_rcvd_data (struct edgeport_serial *edge_serial, unsigned char *buffer, __u16 bufferLength);
253static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3); 247static void process_rcvd_status (struct edgeport_serial *edge_serial, __u8 byte2, __u8 byte3);
@@ -397,6 +391,7 @@ static int get_string (struct usb_device *dev, int Id, char *string, int buflen)
397 unicode_to_ascii(string, buflen, pStringDesc->wData, pStringDesc->bLength/2); 391 unicode_to_ascii(string, buflen, pStringDesc->wData, pStringDesc->bLength/2);
398 392
399 kfree(pStringDesc); 393 kfree(pStringDesc);
394 dbg("%s - USB String %s", __FUNCTION__, string);
400 return strlen(string); 395 return strlen(string);
401} 396}
402 397
@@ -434,6 +429,34 @@ static int get_string_desc (struct usb_device *dev, int Id, struct usb_string_de
434} 429}
435#endif 430#endif
436 431
432static void dump_product_info(struct edgeport_product_info *product_info)
433{
434 // Dump Product Info structure
435 dbg("**Product Information:");
436 dbg(" ProductId %x", product_info->ProductId );
437 dbg(" NumPorts %d", product_info->NumPorts );
438 dbg(" ProdInfoVer %d", product_info->ProdInfoVer );
439 dbg(" IsServer %d", product_info->IsServer);
440 dbg(" IsRS232 %d", product_info->IsRS232 );
441 dbg(" IsRS422 %d", product_info->IsRS422 );
442 dbg(" IsRS485 %d", product_info->IsRS485 );
443 dbg(" RomSize %d", product_info->RomSize );
444 dbg(" RamSize %d", product_info->RamSize );
445 dbg(" CpuRev %x", product_info->CpuRev );
446 dbg(" BoardRev %x", product_info->BoardRev);
447 dbg(" BootMajorVersion %d.%d.%d", product_info->BootMajorVersion,
448 product_info->BootMinorVersion,
449 le16_to_cpu(product_info->BootBuildNumber));
450 dbg(" FirmwareMajorVersion %d.%d.%d", product_info->FirmwareMajorVersion,
451 product_info->FirmwareMinorVersion,
452 le16_to_cpu(product_info->FirmwareBuildNumber));
453 dbg(" ManufactureDescDate %d/%d/%d", product_info->ManufactureDescDate[0],
454 product_info->ManufactureDescDate[1],
455 product_info->ManufactureDescDate[2]+1900);
456 dbg(" iDownloadFile 0x%x", product_info->iDownloadFile);
457 dbg(" EpicVer %d", product_info->EpicVer);
458}
459
437static void get_product_info(struct edgeport_serial *edge_serial) 460static void get_product_info(struct edgeport_serial *edge_serial)
438{ 461{
439 struct edgeport_product_info *product_info = &edge_serial->product_info; 462 struct edgeport_product_info *product_info = &edge_serial->product_info;
@@ -495,30 +518,60 @@ static void get_product_info(struct edgeport_serial *edge_serial)
495 break; 518 break;
496 } 519 }
497 520
498 // Dump Product Info structure 521 dump_product_info(product_info);
499 dbg("**Product Information:"); 522}
500 dbg(" ProductId %x", product_info->ProductId );
501 dbg(" NumPorts %d", product_info->NumPorts );
502 dbg(" ProdInfoVer %d", product_info->ProdInfoVer );
503 dbg(" IsServer %d", product_info->IsServer);
504 dbg(" IsRS232 %d", product_info->IsRS232 );
505 dbg(" IsRS422 %d", product_info->IsRS422 );
506 dbg(" IsRS485 %d", product_info->IsRS485 );
507 dbg(" RomSize %d", product_info->RomSize );
508 dbg(" RamSize %d", product_info->RamSize );
509 dbg(" CpuRev %x", product_info->CpuRev );
510 dbg(" BoardRev %x", product_info->BoardRev);
511 dbg(" BootMajorVersion %d.%d.%d", product_info->BootMajorVersion,
512 product_info->BootMinorVersion,
513 le16_to_cpu(product_info->BootBuildNumber));
514 dbg(" FirmwareMajorVersion %d.%d.%d", product_info->FirmwareMajorVersion,
515 product_info->FirmwareMinorVersion,
516 le16_to_cpu(product_info->FirmwareBuildNumber));
517 dbg(" ManufactureDescDate %d/%d/%d", product_info->ManufactureDescDate[0],
518 product_info->ManufactureDescDate[1],
519 product_info->ManufactureDescDate[2]+1900);
520 dbg(" iDownloadFile 0x%x", product_info->iDownloadFile);
521 523
524static int get_epic_descriptor(struct edgeport_serial *ep)
525{
526 int result;
527 struct usb_serial *serial = ep->serial;
528 struct edgeport_product_info *product_info = &ep->product_info;
529 struct edge_compatibility_descriptor *epic = &ep->epic_descriptor;
530 struct edge_compatibility_bits *bits;
531
532 ep->is_epic = 0;
533 result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
534 USB_REQUEST_ION_GET_EPIC_DESC,
535 0xC0, 0x00, 0x00,
536 &ep->epic_descriptor,
537 sizeof(struct edge_compatibility_descriptor),
538 300);
539
540 dbg("%s result = %d", __FUNCTION__, result);
541
542 if (result > 0) {
543 ep->is_epic = 1;
544 memset(product_info, 0, sizeof(struct edgeport_product_info));
545
546 product_info->NumPorts = epic->NumPorts;
547 product_info->ProdInfoVer = 0;
548 product_info->FirmwareMajorVersion = epic->MajorVersion;
549 product_info->FirmwareMinorVersion = epic->MinorVersion;
550 product_info->FirmwareBuildNumber = epic->BuildNumber;
551 product_info->iDownloadFile = epic->iDownloadFile;
552 product_info->EpicVer = epic->EpicVer;
553 product_info->Epic = epic->Supports;
554 product_info->ProductId = ION_DEVICE_ID_EDGEPORT_COMPATIBLE;
555 dump_product_info(product_info);
556
557 bits = &ep->epic_descriptor.Supports;
558 dbg("**EPIC descriptor:");
559 dbg(" VendEnableSuspend: %s", bits->VendEnableSuspend ? "TRUE": "FALSE");
560 dbg(" IOSPOpen : %s", bits->IOSPOpen ? "TRUE": "FALSE" );
561 dbg(" IOSPClose : %s", bits->IOSPClose ? "TRUE": "FALSE" );
562 dbg(" IOSPChase : %s", bits->IOSPChase ? "TRUE": "FALSE" );
563 dbg(" IOSPSetRxFlow : %s", bits->IOSPSetRxFlow ? "TRUE": "FALSE" );
564 dbg(" IOSPSetTxFlow : %s", bits->IOSPSetTxFlow ? "TRUE": "FALSE" );
565 dbg(" IOSPSetXChar : %s", bits->IOSPSetXChar ? "TRUE": "FALSE" );
566 dbg(" IOSPRxCheck : %s", bits->IOSPRxCheck ? "TRUE": "FALSE" );
567 dbg(" IOSPSetClrBreak : %s", bits->IOSPSetClrBreak ? "TRUE": "FALSE" );
568 dbg(" IOSPWriteMCR : %s", bits->IOSPWriteMCR ? "TRUE": "FALSE" );
569 dbg(" IOSPWriteLCR : %s", bits->IOSPWriteLCR ? "TRUE": "FALSE" );
570 dbg(" IOSPSetBaudRate : %s", bits->IOSPSetBaudRate ? "TRUE": "FALSE" );
571 dbg(" TrueEdgeport : %s", bits->TrueEdgeport ? "TRUE": "FALSE" );
572 }
573
574 return result;
522} 575}
523 576
524 577
@@ -1017,21 +1070,29 @@ static void edge_close (struct usb_serial_port *port, struct file * filp)
1017 1070
1018 edge_port->closePending = TRUE; 1071 edge_port->closePending = TRUE;
1019 1072
1020 /* flush and chase */ 1073 if ((!edge_serial->is_epic) ||
1021 edge_port->chaseResponsePending = TRUE; 1074 ((edge_serial->is_epic) &&
1022 1075 (edge_serial->epic_descriptor.Supports.IOSPChase))) {
1023 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__); 1076 /* flush and chase */
1024 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0); 1077 edge_port->chaseResponsePending = TRUE;
1025 if (status == 0) { 1078
1026 // block until chase finished 1079 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__);
1027 block_until_chase_response(edge_port); 1080 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
1028 } else { 1081 if (status == 0) {
1029 edge_port->chaseResponsePending = FALSE; 1082 // block until chase finished
1083 block_until_chase_response(edge_port);
1084 } else {
1085 edge_port->chaseResponsePending = FALSE;
1086 }
1030 } 1087 }
1031 1088
1032 /* close the port */ 1089 if ((!edge_serial->is_epic) ||
1033 dbg("%s - Sending IOSP_CMD_CLOSE_PORT", __FUNCTION__); 1090 ((edge_serial->is_epic) &&
1034 send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0); 1091 (edge_serial->epic_descriptor.Supports.IOSPClose))) {
1092 /* close the port */
1093 dbg("%s - Sending IOSP_CMD_CLOSE_PORT", __FUNCTION__);
1094 send_iosp_ext_cmd (edge_port, IOSP_CMD_CLOSE_PORT, 0);
1095 }
1035 1096
1036 //port->close = TRUE; 1097 //port->close = TRUE;
1037 edge_port->closePending = FALSE; 1098 edge_port->closePending = FALSE;
@@ -1694,29 +1755,38 @@ static int edge_ioctl (struct usb_serial_port *port, struct file *file, unsigned
1694static void edge_break (struct usb_serial_port *port, int break_state) 1755static void edge_break (struct usb_serial_port *port, int break_state)
1695{ 1756{
1696 struct edgeport_port *edge_port = usb_get_serial_port_data(port); 1757 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
1758 struct edgeport_serial *edge_serial = usb_get_serial_data(port->serial);
1697 int status; 1759 int status;
1698 1760
1699 /* flush and chase */ 1761 if ((!edge_serial->is_epic) ||
1700 edge_port->chaseResponsePending = TRUE; 1762 ((edge_serial->is_epic) &&
1701 1763 (edge_serial->epic_descriptor.Supports.IOSPChase))) {
1702 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__); 1764 /* flush and chase */
1703 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0); 1765 edge_port->chaseResponsePending = TRUE;
1704 if (status == 0) { 1766
1705 // block until chase finished 1767 dbg("%s - Sending IOSP_CMD_CHASE_PORT", __FUNCTION__);
1706 block_until_chase_response(edge_port); 1768 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CHASE_PORT, 0);
1707 } else { 1769 if (status == 0) {
1708 edge_port->chaseResponsePending = FALSE; 1770 // block until chase finished
1771 block_until_chase_response(edge_port);
1772 } else {
1773 edge_port->chaseResponsePending = FALSE;
1774 }
1709 } 1775 }
1710 1776
1711 if (break_state == -1) { 1777 if ((!edge_serial->is_epic) ||
1712 dbg("%s - Sending IOSP_CMD_SET_BREAK", __FUNCTION__); 1778 ((edge_serial->is_epic) &&
1713 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_BREAK, 0); 1779 (edge_serial->epic_descriptor.Supports.IOSPSetClrBreak))) {
1714 } else { 1780 if (break_state == -1) {
1715 dbg("%s - Sending IOSP_CMD_CLEAR_BREAK", __FUNCTION__); 1781 dbg("%s - Sending IOSP_CMD_SET_BREAK", __FUNCTION__);
1716 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CLEAR_BREAK, 0); 1782 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_BREAK, 0);
1717 } 1783 } else {
1718 if (status) { 1784 dbg("%s - Sending IOSP_CMD_CLEAR_BREAK", __FUNCTION__);
1719 dbg("%s - error sending break set/clear command.", __FUNCTION__); 1785 status = send_iosp_ext_cmd (edge_port, IOSP_CMD_CLEAR_BREAK, 0);
1786 }
1787 if (status) {
1788 dbg("%s - error sending break set/clear command.", __FUNCTION__);
1789 }
1720 } 1790 }
1721 1791
1722 return; 1792 return;
@@ -2288,6 +2358,7 @@ static int write_cmd_usb (struct edgeport_port *edge_port, unsigned char *buffer
2288 *****************************************************************************/ 2358 *****************************************************************************/
2289static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRate) 2359static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRate)
2290{ 2360{
2361 struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial);
2291 unsigned char *cmdBuffer; 2362 unsigned char *cmdBuffer;
2292 unsigned char *currCmd; 2363 unsigned char *currCmd;
2293 int cmdLen = 0; 2364 int cmdLen = 0;
@@ -2295,6 +2366,14 @@ static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRa
2295 int status; 2366 int status;
2296 unsigned char number = edge_port->port->number - edge_port->port->serial->minor; 2367 unsigned char number = edge_port->port->number - edge_port->port->serial->minor;
2297 2368
2369 if ((!edge_serial->is_epic) ||
2370 ((edge_serial->is_epic) &&
2371 (!edge_serial->epic_descriptor.Supports.IOSPSetBaudRate))) {
2372 dbg("SendCmdWriteBaudRate - NOT Setting baud rate for port = %d, baud = %d",
2373 edge_port->port->number, baudRate);
2374 return 0;
2375 }
2376
2298 dbg("%s - port = %d, baud = %d", __FUNCTION__, edge_port->port->number, baudRate); 2377 dbg("%s - port = %d, baud = %d", __FUNCTION__, edge_port->port->number, baudRate);
2299 2378
2300 status = calc_baud_rate_divisor (baudRate, &divisor); 2379 status = calc_baud_rate_divisor (baudRate, &divisor);
@@ -2374,6 +2453,7 @@ static int calc_baud_rate_divisor (int baudrate, int *divisor)
2374 *****************************************************************************/ 2453 *****************************************************************************/
2375static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 regNum, __u8 regValue) 2454static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 regNum, __u8 regValue)
2376{ 2455{
2456 struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial);
2377 unsigned char *cmdBuffer; 2457 unsigned char *cmdBuffer;
2378 unsigned char *currCmd; 2458 unsigned char *currCmd;
2379 unsigned long cmdLen = 0; 2459 unsigned long cmdLen = 0;
@@ -2381,6 +2461,22 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2381 2461
2382 dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __FUNCTION__, regValue); 2462 dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __FUNCTION__, regValue);
2383 2463
2464 if ((!edge_serial->is_epic) ||
2465 ((edge_serial->is_epic) &&
2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
2467 (regNum == MCR))) {
2468 dbg("SendCmdWriteUartReg - Not writting to MCR Register");
2469 return 0;
2470 }
2471
2472 if ((!edge_serial->is_epic) ||
2473 ((edge_serial->is_epic) &&
2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
2475 (regNum == LCR))) {
2476 dbg ("SendCmdWriteUartReg - Not writting to LCR Register");
2477 return 0;
2478 }
2479
2384 // Alloc memory for the string of commands. 2480 // Alloc memory for the string of commands.
2385 cmdBuffer = kmalloc (0x10, GFP_ATOMIC); 2481 cmdBuffer = kmalloc (0x10, GFP_ATOMIC);
2386 if (cmdBuffer == NULL ) { 2482 if (cmdBuffer == NULL ) {
@@ -2414,6 +2510,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2414#endif 2510#endif
2415static void change_port_settings (struct edgeport_port *edge_port, struct ktermios *old_termios) 2511static void change_port_settings (struct edgeport_port *edge_port, struct ktermios *old_termios)
2416{ 2512{
2513 struct edgeport_serial *edge_serial = usb_get_serial_data(edge_port->port->serial);
2417 struct tty_struct *tty; 2514 struct tty_struct *tty;
2418 int baud; 2515 int baud;
2419 unsigned cflag; 2516 unsigned cflag;
@@ -2494,8 +2591,12 @@ static void change_port_settings (struct edgeport_port *edge_port, struct ktermi
2494 unsigned char stop_char = STOP_CHAR(tty); 2591 unsigned char stop_char = STOP_CHAR(tty);
2495 unsigned char start_char = START_CHAR(tty); 2592 unsigned char start_char = START_CHAR(tty);
2496 2593
2497 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_XON_CHAR, start_char); 2594 if ((!edge_serial->is_epic) ||
2498 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_XOFF_CHAR, stop_char); 2595 ((edge_serial->is_epic) &&
2596 (edge_serial->epic_descriptor.Supports.IOSPSetXChar))) {
2597 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XON_CHAR, start_char);
2598 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_XOFF_CHAR, stop_char);
2599 }
2499 2600
2500 /* if we are implementing INBOUND XON/XOFF */ 2601 /* if we are implementing INBOUND XON/XOFF */
2501 if (I_IXOFF(tty)) { 2602 if (I_IXOFF(tty)) {
@@ -2515,8 +2616,14 @@ static void change_port_settings (struct edgeport_port *edge_port, struct ktermi
2515 } 2616 }
2516 2617
2517 /* Set flow control to the configured value */ 2618 /* Set flow control to the configured value */
2518 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow); 2619 if ((!edge_serial->is_epic) ||
2519 send_iosp_ext_cmd (edge_port, IOSP_CMD_SET_TX_FLOW, txFlow); 2620 ((edge_serial->is_epic) &&
2621 (edge_serial->epic_descriptor.Supports.IOSPSetRxFlow)))
2622 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_RX_FLOW, rxFlow);
2623 if ((!edge_serial->is_epic) ||
2624 ((edge_serial->is_epic) &&
2625 (edge_serial->epic_descriptor.Supports.IOSPSetTxFlow)))
2626 send_iosp_ext_cmd(edge_port, IOSP_CMD_SET_TX_FLOW, txFlow);
2520 2627
2521 2628
2522 edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK); 2629 edge_port->shadowLCR &= ~(LCR_BITS_MASK | LCR_STOP_MASK | LCR_PAR_MASK);
@@ -2728,6 +2835,13 @@ static int edge_startup (struct usb_serial *serial)
2728 struct edgeport_port *edge_port; 2835 struct edgeport_port *edge_port;
2729 struct usb_device *dev; 2836 struct usb_device *dev;
2730 int i, j; 2837 int i, j;
2838 int response;
2839 int interrupt_in_found;
2840 int bulk_in_found;
2841 int bulk_out_found;
2842 static __u32 descriptor[3] = { EDGE_COMPATIBILITY_MASK0,
2843 EDGE_COMPATIBILITY_MASK1,
2844 EDGE_COMPATIBILITY_MASK2 };
2731 2845
2732 dev = serial->dev; 2846 dev = serial->dev;
2733 2847
@@ -2750,38 +2864,50 @@ static int edge_startup (struct usb_serial *serial)
2750 2864
2751 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); 2865 dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
2752 2866
2753 /* get the manufacturing descriptor for this device */ 2867 /* Read the epic descriptor */
2754 get_manufacturing_desc (edge_serial); 2868 if (get_epic_descriptor(edge_serial) <= 0) {
2869 /* memcpy descriptor to Supports structures */
2870 memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
2871 sizeof(struct edge_compatibility_bits));
2872
2873 /* get the manufacturing descriptor for this device */
2874 get_manufacturing_desc (edge_serial);
2755 2875
2756 /* get the boot descriptor */ 2876 /* get the boot descriptor */
2757 get_boot_desc (edge_serial); 2877 get_boot_desc (edge_serial);
2758 2878
2759 get_product_info(edge_serial); 2879 get_product_info(edge_serial);
2880 }
2760 2881
2761 /* set the number of ports from the manufacturing description */ 2882 /* set the number of ports from the manufacturing description */
2762 /* serial->num_ports = serial->product_info.NumPorts; */ 2883 /* serial->num_ports = serial->product_info.NumPorts; */
2763 if (edge_serial->product_info.NumPorts != serial->num_ports) { 2884 if ((!edge_serial->is_epic) &&
2764 warn("%s - Device Reported %d serial ports vs core " 2885 (edge_serial->product_info.NumPorts != serial->num_ports)) {
2765 "thinking we have %d ports, email greg@kroah.com this info.", 2886 dev_warn(&serial->dev->dev, "Device Reported %d serial ports "
2766 __FUNCTION__, edge_serial->product_info.NumPorts, 2887 "vs. core thinking we have %d ports, email "
2767 serial->num_ports); 2888 "greg@kroah.com this information.",
2889 edge_serial->product_info.NumPorts,
2890 serial->num_ports);
2768 } 2891 }
2769 2892
2770 dbg("%s - time 1 %ld", __FUNCTION__, jiffies); 2893 dbg("%s - time 1 %ld", __FUNCTION__, jiffies);
2771 2894
2772 /* now load the application firmware into this device */ 2895 /* If not an EPiC device */
2773 load_application_firmware (edge_serial); 2896 if (!edge_serial->is_epic) {
2897 /* now load the application firmware into this device */
2898 load_application_firmware (edge_serial);
2774 2899
2775 dbg("%s - time 2 %ld", __FUNCTION__, jiffies); 2900 dbg("%s - time 2 %ld", __FUNCTION__, jiffies);
2776 2901
2777 /* Check current Edgeport EEPROM and update if necessary */ 2902 /* Check current Edgeport EEPROM and update if necessary */
2778 update_edgeport_E2PROM (edge_serial); 2903 update_edgeport_E2PROM (edge_serial);
2779
2780 dbg("%s - time 3 %ld", __FUNCTION__, jiffies);
2781 2904
2782 /* set the configuration to use #1 */ 2905 dbg("%s - time 3 %ld", __FUNCTION__, jiffies);
2783// dbg("set_configuration 1"); 2906
2784// usb_set_configuration (dev, 1); 2907 /* set the configuration to use #1 */
2908// dbg("set_configuration 1");
2909// usb_set_configuration (dev, 1);
2910 }
2785 2911
2786 /* we set up the pointers to the endpoints in the edge_open function, 2912 /* we set up the pointers to the endpoints in the edge_open function,
2787 * as the structures aren't created yet. */ 2913 * as the structures aren't created yet. */
@@ -2804,8 +2930,101 @@ static int edge_startup (struct usb_serial *serial)
2804 edge_port->port = serial->port[i]; 2930 edge_port->port = serial->port[i];
2805 usb_set_serial_port_data(serial->port[i], edge_port); 2931 usb_set_serial_port_data(serial->port[i], edge_port);
2806 } 2932 }
2807 2933
2808 return 0; 2934 response = 0;
2935
2936 if (edge_serial->is_epic) {
2937 /* EPIC thing, set up our interrupt polling now and our read urb, so
2938 * that the device knows it really is connected. */
2939 interrupt_in_found = bulk_in_found = bulk_out_found = FALSE;
2940 for (i = 0; i < serial->interface->altsetting[0].desc.bNumEndpoints; ++i) {
2941 struct usb_endpoint_descriptor *endpoint;
2942 int buffer_size;
2943
2944 endpoint = &serial->interface->altsetting[0].endpoint[i].desc;
2945 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
2946 if ((!interrupt_in_found) &&
2947 (usb_endpoint_is_int_in(endpoint))) {
2948 /* we found a interrupt in endpoint */
2949 dbg("found interrupt in");
2950
2951 /* not set up yet, so do it now */
2952 edge_serial->interrupt_read_urb = usb_alloc_urb(0, GFP_KERNEL);
2953 if (!edge_serial->interrupt_read_urb) {
2954 err("out of memory");
2955 return -ENOMEM;
2956 }
2957 edge_serial->interrupt_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
2958 if (!edge_serial->interrupt_in_buffer) {
2959 err("out of memory");
2960 usb_free_urb(edge_serial->interrupt_read_urb);
2961 return -ENOMEM;
2962 }
2963 edge_serial->interrupt_in_endpoint = endpoint->bEndpointAddress;
2964
2965 /* set up our interrupt urb */
2966 usb_fill_int_urb(edge_serial->interrupt_read_urb,
2967 dev,
2968 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
2969 edge_serial->interrupt_in_buffer,
2970 buffer_size,
2971 edge_interrupt_callback,
2972 edge_serial,
2973 endpoint->bInterval);
2974
2975 interrupt_in_found = TRUE;
2976 }
2977
2978 if ((!bulk_in_found) &&
2979 (usb_endpoint_is_bulk_in(endpoint))) {
2980 /* we found a bulk in endpoint */
2981 dbg("found bulk in");
2982
2983 /* not set up yet, so do it now */
2984 edge_serial->read_urb = usb_alloc_urb(0, GFP_KERNEL);
2985 if (!edge_serial->read_urb) {
2986 err("out of memory");
2987 return -ENOMEM;
2988 }
2989 edge_serial->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
2990 if (!edge_serial->bulk_in_buffer) {
2991 err ("out of memory");
2992 usb_free_urb(edge_serial->read_urb);
2993 return -ENOMEM;
2994 }
2995 edge_serial->bulk_in_endpoint = endpoint->bEndpointAddress;
2996
2997 /* set up our bulk in urb */
2998 usb_fill_bulk_urb(edge_serial->read_urb, dev,
2999 usb_rcvbulkpipe(dev, endpoint->bEndpointAddress),
3000 edge_serial->bulk_in_buffer,
3001 endpoint->wMaxPacketSize,
3002 edge_bulk_in_callback,
3003 edge_serial);
3004 bulk_in_found = TRUE;
3005 }
3006
3007 if ((!bulk_out_found) &&
3008 (usb_endpoint_is_bulk_out(endpoint))) {
3009 /* we found a bulk out endpoint */
3010 dbg("found bulk out");
3011 edge_serial->bulk_out_endpoint = endpoint->bEndpointAddress;
3012 bulk_out_found = TRUE;
3013 }
3014 }
3015
3016 if ((!interrupt_in_found) || (!bulk_in_found) || (!bulk_out_found)) {
3017 err ("Error - the proper endpoints were not found!");
3018 return -ENODEV;
3019 }
3020
3021 /* start interrupt read for this edgeport this interrupt will
3022 * continue as long as the edgeport is connected */
3023 response = usb_submit_urb(edge_serial->interrupt_read_urb, GFP_KERNEL);
3024 if (response)
3025 err("%s - Error %d submitting control urb", __FUNCTION__, response);
3026 }
3027 return response;
2809} 3028}
2810 3029
2811 3030
@@ -2815,6 +3034,7 @@ static int edge_startup (struct usb_serial *serial)
2815 ****************************************************************************/ 3034 ****************************************************************************/
2816static void edge_shutdown (struct usb_serial *serial) 3035static void edge_shutdown (struct usb_serial *serial)
2817{ 3036{
3037 struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
2818 int i; 3038 int i;
2819 3039
2820 dbg("%s", __FUNCTION__); 3040 dbg("%s", __FUNCTION__);
@@ -2824,7 +3044,18 @@ static void edge_shutdown (struct usb_serial *serial)
2824 kfree (usb_get_serial_port_data(serial->port[i])); 3044 kfree (usb_get_serial_port_data(serial->port[i]));
2825 usb_set_serial_port_data(serial->port[i], NULL); 3045 usb_set_serial_port_data(serial->port[i], NULL);
2826 } 3046 }
2827 kfree (usb_get_serial_data(serial)); 3047 /* free up our endpoint stuff */
3048 if (edge_serial->is_epic) {
3049 usb_unlink_urb(edge_serial->interrupt_read_urb);
3050 usb_free_urb(edge_serial->interrupt_read_urb);
3051 kfree(edge_serial->interrupt_in_buffer);
3052
3053 usb_unlink_urb(edge_serial->read_urb);
3054 usb_free_urb(edge_serial->read_urb);
3055 kfree(edge_serial->bulk_in_buffer);
3056 }
3057
3058 kfree(edge_serial);
2828 usb_set_serial_data(serial, NULL); 3059 usb_set_serial_data(serial, NULL);
2829} 3060}
2830 3061
@@ -2846,6 +3077,9 @@ static int __init edgeport_init(void)
2846 retval = usb_serial_register(&edgeport_8port_device); 3077 retval = usb_serial_register(&edgeport_8port_device);
2847 if (retval) 3078 if (retval)
2848 goto failed_8port_device_register; 3079 goto failed_8port_device_register;
3080 retval = usb_serial_register(&epic_device);
3081 if (retval)
3082 goto failed_epic_device_register;
2849 retval = usb_register(&io_driver); 3083 retval = usb_register(&io_driver);
2850 if (retval) 3084 if (retval)
2851 goto failed_usb_register; 3085 goto failed_usb_register;
@@ -2853,6 +3087,8 @@ static int __init edgeport_init(void)
2853 return 0; 3087 return 0;
2854 3088
2855failed_usb_register: 3089failed_usb_register:
3090 usb_serial_deregister(&epic_device);
3091failed_epic_device_register:
2856 usb_serial_deregister(&edgeport_8port_device); 3092 usb_serial_deregister(&edgeport_8port_device);
2857failed_8port_device_register: 3093failed_8port_device_register:
2858 usb_serial_deregister(&edgeport_4port_device); 3094 usb_serial_deregister(&edgeport_4port_device);
@@ -2873,6 +3109,7 @@ static void __exit edgeport_exit (void)
2873 usb_serial_deregister (&edgeport_2port_device); 3109 usb_serial_deregister (&edgeport_2port_device);
2874 usb_serial_deregister (&edgeport_4port_device); 3110 usb_serial_deregister (&edgeport_4port_device);
2875 usb_serial_deregister (&edgeport_8port_device); 3111 usb_serial_deregister (&edgeport_8port_device);
3112 usb_serial_deregister (&epic_device);
2876} 3113}
2877 3114
2878module_init(edgeport_init); 3115module_init(edgeport_init);
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index 123fa8a904e6..29a913a6daca 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -111,10 +111,12 @@ struct edgeport_product_info {
111 __le16 FirmwareBuildNumber; /* zzzz (LE format) */ 111 __le16 FirmwareBuildNumber; /* zzzz (LE format) */
112 112
113 __u8 ManufactureDescDate[3]; /* MM/DD/YY when descriptor template was compiled */ 113 __u8 ManufactureDescDate[3]; /* MM/DD/YY when descriptor template was compiled */
114 __u8 Unused1[1]; /* Available */ 114 __u8 HardwareType;
115 115
116 __u8 iDownloadFile; /* What to download to EPiC device */ 116 __u8 iDownloadFile; /* What to download to EPiC device */
117 __u8 Unused2[2]; /* Available */ 117 __u8 EpicVer; /* What version of EPiC spec this device supports */
118
119 struct edge_compatibility_bits Epic;
118}; 120};
119 121
120/* 122/*
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index fad561c04c76..6d3008772540 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -47,6 +47,18 @@ static struct usb_device_id edgeport_8port_id_table [] = {
47 { } 47 { }
48}; 48};
49 49
50static struct usb_device_id Epic_port_id_table [] = {
51 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
52 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
53 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
54 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) },
55 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) },
56 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) },
57 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) },
58 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) },
59 { }
60};
61
50/* Devices that this driver supports */ 62/* Devices that this driver supports */
51static struct usb_device_id id_table_combined [] = { 63static struct usb_device_id id_table_combined [] = {
52 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, 64 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
@@ -70,17 +82,34 @@ static struct usb_device_id id_table_combined [] = {
70 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) }, 82 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8R) },
71 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) }, 83 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8RR) },
72 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) }, 84 { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_412_8) },
85 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
86 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
87 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
88 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0311) },
89 { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0312) },
90 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A758) },
91 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A794) },
92 { USB_DEVICE(USB_VENDOR_ID_AXIOHM, AXIOHM_DEVICE_ID_EPIC_A225) },
73 { } /* Terminating entry */ 93 { } /* Terminating entry */
74}; 94};
75 95
76MODULE_DEVICE_TABLE (usb, id_table_combined); 96MODULE_DEVICE_TABLE (usb, id_table_combined);
77 97
98static struct usb_driver io_driver = {
99 .name = "io_edgeport",
100 .probe = usb_serial_probe,
101 .disconnect = usb_serial_disconnect,
102 .id_table = id_table_combined,
103 .no_dynamic_id = 1,
104};
105
78static struct usb_serial_driver edgeport_2port_device = { 106static struct usb_serial_driver edgeport_2port_device = {
79 .driver = { 107 .driver = {
80 .owner = THIS_MODULE, 108 .owner = THIS_MODULE,
81 .name = "edgeport_2", 109 .name = "edgeport_2",
82 }, 110 },
83 .description = "Edgeport 2 port adapter", 111 .description = "Edgeport 2 port adapter",
112 .usb_driver = &io_driver,
84 .id_table = edgeport_2port_id_table, 113 .id_table = edgeport_2port_id_table,
85 .num_interrupt_in = 1, 114 .num_interrupt_in = 1,
86 .num_bulk_in = 1, 115 .num_bulk_in = 1,
@@ -111,6 +140,7 @@ static struct usb_serial_driver edgeport_4port_device = {
111 .name = "edgeport_4", 140 .name = "edgeport_4",
112 }, 141 },
113 .description = "Edgeport 4 port adapter", 142 .description = "Edgeport 4 port adapter",
143 .usb_driver = &io_driver,
114 .id_table = edgeport_4port_id_table, 144 .id_table = edgeport_4port_id_table,
115 .num_interrupt_in = 1, 145 .num_interrupt_in = 1,
116 .num_bulk_in = 1, 146 .num_bulk_in = 1,
@@ -141,6 +171,7 @@ static struct usb_serial_driver edgeport_8port_device = {
141 .name = "edgeport_8", 171 .name = "edgeport_8",
142 }, 172 },
143 .description = "Edgeport 8 port adapter", 173 .description = "Edgeport 8 port adapter",
174 .usb_driver = &io_driver,
144 .id_table = edgeport_8port_id_table, 175 .id_table = edgeport_8port_id_table,
145 .num_interrupt_in = 1, 176 .num_interrupt_in = 1,
146 .num_bulk_in = 1, 177 .num_bulk_in = 1,
@@ -165,5 +196,35 @@ static struct usb_serial_driver edgeport_8port_device = {
165 .write_bulk_callback = edge_bulk_out_data_callback, 196 .write_bulk_callback = edge_bulk_out_data_callback,
166}; 197};
167 198
199static struct usb_serial_driver epic_device = {
200 .driver = {
201 .owner = THIS_MODULE,
202 .name = "epic",
203 },
204 .description = "EPiC device",
205 .id_table = Epic_port_id_table,
206 .num_interrupt_in = 1,
207 .num_bulk_in = 1,
208 .num_bulk_out = 1,
209 .num_ports = 1,
210 .open = edge_open,
211 .close = edge_close,
212 .throttle = edge_throttle,
213 .unthrottle = edge_unthrottle,
214 .attach = edge_startup,
215 .shutdown = edge_shutdown,
216 .ioctl = edge_ioctl,
217 .set_termios = edge_set_termios,
218 .tiocmget = edge_tiocmget,
219 .tiocmset = edge_tiocmset,
220 .write = edge_write,
221 .write_room = edge_write_room,
222 .chars_in_buffer = edge_chars_in_buffer,
223 .break_ctl = edge_break,
224 .read_int_callback = edge_interrupt_callback,
225 .read_bulk_callback = edge_bulk_in_callback,
226 .write_bulk_callback = edge_bulk_out_data_callback,
227};
228
168#endif 229#endif
169 230
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 980285c0233a..544098d2b775 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2979,6 +2979,7 @@ static struct usb_serial_driver edgeport_1port_device = {
2979 .name = "edgeport_ti_1", 2979 .name = "edgeport_ti_1",
2980 }, 2980 },
2981 .description = "Edgeport TI 1 port adapter", 2981 .description = "Edgeport TI 1 port adapter",
2982 .usb_driver = &io_driver,
2982 .id_table = edgeport_1port_id_table, 2983 .id_table = edgeport_1port_id_table,
2983 .num_interrupt_in = 1, 2984 .num_interrupt_in = 1,
2984 .num_bulk_in = 1, 2985 .num_bulk_in = 1,
@@ -3009,6 +3010,7 @@ static struct usb_serial_driver edgeport_2port_device = {
3009 .name = "edgeport_ti_2", 3010 .name = "edgeport_ti_2",
3010 }, 3011 },
3011 .description = "Edgeport TI 2 port adapter", 3012 .description = "Edgeport TI 2 port adapter",
3013 .usb_driver = &io_driver,
3012 .id_table = edgeport_2port_id_table, 3014 .id_table = edgeport_2port_id_table,
3013 .num_interrupt_in = 1, 3015 .num_interrupt_in = 1,
3014 .num_bulk_in = 2, 3016 .num_bulk_in = 2,
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index f1804fd5a3dd..e57fa117e486 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -30,6 +30,7 @@
30 30
31#define USB_VENDOR_ID_ION 0x1608 // Our VID 31#define USB_VENDOR_ID_ION 0x1608 // Our VID
32#define USB_VENDOR_ID_TI 0x0451 // TI VID 32#define USB_VENDOR_ID_TI 0x0451 // TI VID
33#define USB_VENDOR_ID_AXIOHM 0x05D9 /* Axiohm VID */
33 34
34// 35//
35// Definitions of USB product IDs (PID) 36// Definitions of USB product IDs (PID)
@@ -334,6 +335,10 @@ struct edge_compatibility_bits
334 335
335}; 336};
336 337
338#define EDGE_COMPATIBILITY_MASK0 0x0001
339#define EDGE_COMPATIBILITY_MASK1 0x3FFF
340#define EDGE_COMPATIBILITY_MASK2 0x0001
341
337struct edge_compatibility_descriptor 342struct edge_compatibility_descriptor
338{ 343{
339 __u8 Length; // Descriptor Length (per USB spec) 344 __u8 Length; // Descriptor Length (per USB spec)
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 42f757a5b876..a408184334ea 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -563,6 +563,7 @@ static struct usb_serial_driver ipaq_device = {
563 .name = "ipaq", 563 .name = "ipaq",
564 }, 564 },
565 .description = "PocketPC PDA", 565 .description = "PocketPC PDA",
566 .usb_driver = &ipaq_driver,
566 .id_table = ipaq_id_table, 567 .id_table = ipaq_id_table,
567 .num_interrupt_in = NUM_DONT_CARE, 568 .num_interrupt_in = NUM_DONT_CARE,
568 .num_bulk_in = 1, 569 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index d3b9a351cef8..1bc586064c77 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -442,6 +442,7 @@ static struct usb_serial_driver ipw_device = {
442 .name = "ipw", 442 .name = "ipw",
443 }, 443 },
444 .description = "IPWireless converter", 444 .description = "IPWireless converter",
445 .usb_driver = &usb_ipw_driver,
445 .id_table = usb_ipw_ids, 446 .id_table = usb_ipw_ids,
446 .num_interrupt_in = NUM_DONT_CARE, 447 .num_interrupt_in = NUM_DONT_CARE,
447 .num_bulk_in = 1, 448 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 8fdf486e3465..9d847f69291c 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -138,6 +138,7 @@ static struct usb_serial_driver ir_device = {
138 .name = "ir-usb", 138 .name = "ir-usb",
139 }, 139 },
140 .description = "IR Dongle", 140 .description = "IR Dongle",
141 .usb_driver = &ir_driver,
141 .id_table = id_table, 142 .id_table = id_table,
142 .num_interrupt_in = 1, 143 .num_interrupt_in = 1,
143 .num_bulk_in = 1, 144 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 9d2fdfd6865f..e6966f12ed5a 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1275,11 +1275,31 @@ static int keyspan_fake_startup (struct usb_serial *serial)
1275} 1275}
1276 1276
1277/* Helper functions used by keyspan_setup_urbs */ 1277/* Helper functions used by keyspan_setup_urbs */
1278static struct usb_endpoint_descriptor const *find_ep(struct usb_serial const *serial,
1279 int endpoint)
1280{
1281 struct usb_host_interface *iface_desc;
1282 struct usb_endpoint_descriptor *ep;
1283 int i;
1284
1285 iface_desc = serial->interface->cur_altsetting;
1286 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1287 ep = &iface_desc->endpoint[i].desc;
1288 if (ep->bEndpointAddress == endpoint)
1289 return ep;
1290 }
1291 dev_warn(&serial->interface->dev, "found no endpoint descriptor for "
1292 "endpoint %x\n", endpoint);
1293 return NULL;
1294}
1295
1278static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint, 1296static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint,
1279 int dir, void *ctx, char *buf, int len, 1297 int dir, void *ctx, char *buf, int len,
1280 void (*callback)(struct urb *)) 1298 void (*callback)(struct urb *))
1281{ 1299{
1282 struct urb *urb; 1300 struct urb *urb;
1301 struct usb_endpoint_descriptor const *ep_desc;
1302 char const *ep_type_name;
1283 1303
1284 if (endpoint == -1) 1304 if (endpoint == -1)
1285 return NULL; /* endpoint not needed */ 1305 return NULL; /* endpoint not needed */
@@ -1291,11 +1311,32 @@ static struct urb *keyspan_setup_urb (struct usb_serial *serial, int endpoint,
1291 return NULL; 1311 return NULL;
1292 } 1312 }
1293 1313
1294 /* Fill URB using supplied data. */ 1314 ep_desc = find_ep(serial, endpoint);
1295 usb_fill_bulk_urb(urb, serial->dev, 1315 if (!ep_desc) {
1296 usb_sndbulkpipe(serial->dev, endpoint) | dir, 1316 /* leak the urb, something's wrong and the callers don't care */
1297 buf, len, callback, ctx); 1317 return urb;
1318 }
1319 if (usb_endpoint_xfer_int(ep_desc)) {
1320 ep_type_name = "INT";
1321 usb_fill_int_urb(urb, serial->dev,
1322 usb_sndintpipe(serial->dev, endpoint) | dir,
1323 buf, len, callback, ctx,
1324 ep_desc->bInterval);
1325 } else if (usb_endpoint_xfer_bulk(ep_desc)) {
1326 ep_type_name = "BULK";
1327 usb_fill_bulk_urb(urb, serial->dev,
1328 usb_sndbulkpipe(serial->dev, endpoint) | dir,
1329 buf, len, callback, ctx);
1330 } else {
1331 dev_warn(&serial->interface->dev,
1332 "unsupported endpoint type %x\n",
1333 ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
1334 usb_free_urb(urb);
1335 return NULL;
1336 }
1298 1337
1338 dbg("%s - using urb %p for %s endpoint %x",
1339 __func__, urb, ep_type_name, endpoint);
1299 return urb; 1340 return urb;
1300} 1341}
1301 1342
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 6413d73c139c..c6830cbdc6df 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -229,7 +229,6 @@ struct ezusb_hex_record {
229#define keyspan_usa28_product_id 0x010f 229#define keyspan_usa28_product_id 0x010f
230#define keyspan_usa28x_product_id 0x0110 230#define keyspan_usa28x_product_id 0x0110
231#define keyspan_usa28xa_product_id 0x0115 231#define keyspan_usa28xa_product_id 0x0115
232#define keyspan_usa28xb_product_id 0x0110
233#define keyspan_usa49w_product_id 0x010a 232#define keyspan_usa49w_product_id 0x010a
234#define keyspan_usa49wlc_product_id 0x012a 233#define keyspan_usa49wlc_product_id 0x012a
235 234
@@ -511,7 +510,6 @@ static struct usb_device_id keyspan_ids_combined[] = {
511 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, 510 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
512 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, 511 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
513 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, 512 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
514 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_product_id) },
515 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)}, 513 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id)},
516 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)}, 514 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
517 { } /* Terminating entry */ 515 { } /* Terminating entry */
@@ -559,7 +557,6 @@ static struct usb_device_id keyspan_2port_ids[] = {
559 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, 557 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
560 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, 558 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
561 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, 559 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
562 { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xb_product_id) },
563 { } /* Terminating entry */ 560 { } /* Terminating entry */
564}; 561};
565 562
@@ -576,6 +573,7 @@ static struct usb_serial_driver keyspan_pre_device = {
576 .name = "keyspan_no_firm", 573 .name = "keyspan_no_firm",
577 }, 574 },
578 .description = "Keyspan - (without firmware)", 575 .description = "Keyspan - (without firmware)",
576 .usb_driver = &keyspan_driver,
579 .id_table = keyspan_pre_ids, 577 .id_table = keyspan_pre_ids,
580 .num_interrupt_in = NUM_DONT_CARE, 578 .num_interrupt_in = NUM_DONT_CARE,
581 .num_bulk_in = NUM_DONT_CARE, 579 .num_bulk_in = NUM_DONT_CARE,
@@ -590,6 +588,7 @@ static struct usb_serial_driver keyspan_1port_device = {
590 .name = "keyspan_1", 588 .name = "keyspan_1",
591 }, 589 },
592 .description = "Keyspan 1 port adapter", 590 .description = "Keyspan 1 port adapter",
591 .usb_driver = &keyspan_driver,
593 .id_table = keyspan_1port_ids, 592 .id_table = keyspan_1port_ids,
594 .num_interrupt_in = NUM_DONT_CARE, 593 .num_interrupt_in = NUM_DONT_CARE,
595 .num_bulk_in = NUM_DONT_CARE, 594 .num_bulk_in = NUM_DONT_CARE,
@@ -617,6 +616,7 @@ static struct usb_serial_driver keyspan_2port_device = {
617 .name = "keyspan_2", 616 .name = "keyspan_2",
618 }, 617 },
619 .description = "Keyspan 2 port adapter", 618 .description = "Keyspan 2 port adapter",
619 .usb_driver = &keyspan_driver,
620 .id_table = keyspan_2port_ids, 620 .id_table = keyspan_2port_ids,
621 .num_interrupt_in = NUM_DONT_CARE, 621 .num_interrupt_in = NUM_DONT_CARE,
622 .num_bulk_in = NUM_DONT_CARE, 622 .num_bulk_in = NUM_DONT_CARE,
@@ -644,6 +644,7 @@ static struct usb_serial_driver keyspan_4port_device = {
644 .name = "keyspan_4", 644 .name = "keyspan_4",
645 }, 645 },
646 .description = "Keyspan 4 port adapter", 646 .description = "Keyspan 4 port adapter",
647 .usb_driver = &keyspan_driver,
647 .id_table = keyspan_4port_ids, 648 .id_table = keyspan_4port_ids,
648 .num_interrupt_in = NUM_DONT_CARE, 649 .num_interrupt_in = NUM_DONT_CARE,
649 .num_bulk_in = 5, 650 .num_bulk_in = 5,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 126b9703bbaf..da514cb785b3 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -793,6 +793,7 @@ static struct usb_serial_driver keyspan_pda_fake_device = {
793 .name = "keyspan_pda_pre", 793 .name = "keyspan_pda_pre",
794 }, 794 },
795 .description = "Keyspan PDA - (prerenumeration)", 795 .description = "Keyspan PDA - (prerenumeration)",
796 .usb_driver = &keyspan_pda_driver,
796 .id_table = id_table_fake, 797 .id_table = id_table_fake,
797 .num_interrupt_in = NUM_DONT_CARE, 798 .num_interrupt_in = NUM_DONT_CARE,
798 .num_bulk_in = NUM_DONT_CARE, 799 .num_bulk_in = NUM_DONT_CARE,
@@ -809,6 +810,7 @@ static struct usb_serial_driver xircom_pgs_fake_device = {
809 .name = "xircom_no_firm", 810 .name = "xircom_no_firm",
810 }, 811 },
811 .description = "Xircom / Entregra PGS - (prerenumeration)", 812 .description = "Xircom / Entregra PGS - (prerenumeration)",
813 .usb_driver = &keyspan_pda_driver,
812 .id_table = id_table_fake_xircom, 814 .id_table = id_table_fake_xircom,
813 .num_interrupt_in = NUM_DONT_CARE, 815 .num_interrupt_in = NUM_DONT_CARE,
814 .num_bulk_in = NUM_DONT_CARE, 816 .num_bulk_in = NUM_DONT_CARE,
@@ -824,6 +826,7 @@ static struct usb_serial_driver keyspan_pda_device = {
824 .name = "keyspan_pda", 826 .name = "keyspan_pda",
825 }, 827 },
826 .description = "Keyspan PDA", 828 .description = "Keyspan PDA",
829 .usb_driver = &keyspan_pda_driver,
827 .id_table = id_table_std, 830 .id_table = id_table_std,
828 .num_interrupt_in = 1, 831 .num_interrupt_in = 1,
829 .num_bulk_in = 0, 832 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 5c4b06a99ac0..b2097c45a235 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -124,6 +124,7 @@ static struct usb_serial_driver kl5kusb105d_device = {
124 .name = "kl5kusb105d", 124 .name = "kl5kusb105d",
125 }, 125 },
126 .description = "KL5KUSB105D / PalmConnect", 126 .description = "KL5KUSB105D / PalmConnect",
127 .usb_driver = &kl5kusb105d_driver,
127 .id_table = id_table, 128 .id_table = id_table,
128 .num_interrupt_in = 1, 129 .num_interrupt_in = 1,
129 .num_bulk_in = 1, 130 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 62bea0c923bd..0683b51f0932 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -110,6 +110,7 @@ static struct usb_serial_driver kobil_device = {
110 .name = "kobil", 110 .name = "kobil",
111 }, 111 },
112 .description = "KOBIL USB smart card terminal", 112 .description = "KOBIL USB smart card terminal",
113 .usb_driver = &kobil_driver,
113 .id_table = id_table, 114 .id_table = id_table,
114 .num_interrupt_in = NUM_DONT_CARE, 115 .num_interrupt_in = NUM_DONT_CARE,
115 .num_bulk_in = 0, 116 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 38b1d17e06ef..4cd839b1407f 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -137,6 +137,7 @@ static struct usb_serial_driver mct_u232_device = {
137 .name = "mct_u232", 137 .name = "mct_u232",
138 }, 138 },
139 .description = "MCT U232", 139 .description = "MCT U232",
140 .usb_driver = &mct_u232_driver,
140 .id_table = id_table_combined, 141 .id_table = id_table_combined,
141 .num_interrupt_in = 2, 142 .num_interrupt_in = 2,
142 .num_bulk_in = 0, 143 .num_bulk_in = 0,
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index e55f4ed81d7b..6109c6704a73 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1605,12 +1605,21 @@ static void mos7720_shutdown(struct usb_serial *serial)
1605 usb_set_serial_data(serial, NULL); 1605 usb_set_serial_data(serial, NULL);
1606} 1606}
1607 1607
1608static struct usb_driver usb_driver = {
1609 .name = "moschip7720",
1610 .probe = usb_serial_probe,
1611 .disconnect = usb_serial_disconnect,
1612 .id_table = moschip_port_id_table,
1613 .no_dynamic_id = 1,
1614};
1615
1608static struct usb_serial_driver moschip7720_2port_driver = { 1616static struct usb_serial_driver moschip7720_2port_driver = {
1609 .driver = { 1617 .driver = {
1610 .owner = THIS_MODULE, 1618 .owner = THIS_MODULE,
1611 .name = "moschip7720", 1619 .name = "moschip7720",
1612 }, 1620 },
1613 .description = "Moschip 2 port adapter", 1621 .description = "Moschip 2 port adapter",
1622 .usb_driver = &usb_driver,
1614 .id_table = moschip_port_id_table, 1623 .id_table = moschip_port_id_table,
1615 .num_interrupt_in = 1, 1624 .num_interrupt_in = 1,
1616 .num_bulk_in = 2, 1625 .num_bulk_in = 2,
@@ -1631,13 +1640,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
1631 .read_bulk_callback = mos7720_bulk_in_callback, 1640 .read_bulk_callback = mos7720_bulk_in_callback,
1632}; 1641};
1633 1642
1634static struct usb_driver usb_driver = {
1635 .name = "moschip7720",
1636 .probe = usb_serial_probe,
1637 .disconnect = usb_serial_disconnect,
1638 .id_table = moschip_port_id_table,
1639};
1640
1641static int __init moschip7720_init(void) 1643static int __init moschip7720_init(void)
1642{ 1644{
1643 int retval; 1645 int retval;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 83f661403ba1..b2264a87617b 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2834,12 +2834,21 @@ static void mos7840_shutdown(struct usb_serial *serial)
2834 2834
2835} 2835}
2836 2836
2837static struct usb_driver io_driver = {
2838 .name = "mos7840",
2839 .probe = usb_serial_probe,
2840 .disconnect = usb_serial_disconnect,
2841 .id_table = moschip_id_table_combined,
2842 .no_dynamic_id = 1,
2843};
2844
2837static struct usb_serial_driver moschip7840_4port_device = { 2845static struct usb_serial_driver moschip7840_4port_device = {
2838 .driver = { 2846 .driver = {
2839 .owner = THIS_MODULE, 2847 .owner = THIS_MODULE,
2840 .name = "mos7840", 2848 .name = "mos7840",
2841 }, 2849 },
2842 .description = DRIVER_DESC, 2850 .description = DRIVER_DESC,
2851 .usb_driver = &io_driver,
2843 .id_table = moschip_port_id_table, 2852 .id_table = moschip_port_id_table,
2844 .num_interrupt_in = 1, //NUM_DONT_CARE,//1, 2853 .num_interrupt_in = 1, //NUM_DONT_CARE,//1,
2845#ifdef check 2854#ifdef check
@@ -2869,13 +2878,6 @@ static struct usb_serial_driver moschip7840_4port_device = {
2869 .read_int_callback = mos7840_interrupt_callback, 2878 .read_int_callback = mos7840_interrupt_callback,
2870}; 2879};
2871 2880
2872static struct usb_driver io_driver = {
2873 .name = "mos7840",
2874 .probe = usb_serial_probe,
2875 .disconnect = usb_serial_disconnect,
2876 .id_table = moschip_id_table_combined,
2877};
2878
2879/**************************************************************************** 2881/****************************************************************************
2880 * moschip7840_init 2882 * moschip7840_init
2881 * This is called by the module subsystem, or on startup to initialize us 2883 * This is called by the module subsystem, or on startup to initialize us
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 054abee81652..90701111d746 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -119,6 +119,7 @@ static struct usb_serial_driver navman_device = {
119 .name = "navman", 119 .name = "navman",
120 }, 120 },
121 .id_table = id_table, 121 .id_table = id_table,
122 .usb_driver = &navman_driver,
122 .num_interrupt_in = NUM_DONT_CARE, 123 .num_interrupt_in = NUM_DONT_CARE,
123 .num_bulk_in = NUM_DONT_CARE, 124 .num_bulk_in = NUM_DONT_CARE,
124 .num_bulk_out = NUM_DONT_CARE, 125 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index bc91d3b726fc..0216ac12a27d 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -93,6 +93,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
93 .name = "omninet", 93 .name = "omninet",
94 }, 94 },
95 .description = "ZyXEL - omni.net lcd plus usb", 95 .description = "ZyXEL - omni.net lcd plus usb",
96 .usb_driver = &omninet_driver,
96 .id_table = id_table, 97 .id_table = id_table,
97 .num_interrupt_in = 1, 98 .num_interrupt_in = 1,
98 .num_bulk_in = 1, 99 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0fed43a96871..ced9f32b29d9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -135,6 +135,7 @@ static struct usb_serial_driver option_1port_device = {
135 .name = "option1", 135 .name = "option1",
136 }, 136 },
137 .description = "GSM modem (1-port)", 137 .description = "GSM modem (1-port)",
138 .usb_driver = &option_driver,
138 .id_table = option_ids1, 139 .id_table = option_ids1,
139 .num_interrupt_in = NUM_DONT_CARE, 140 .num_interrupt_in = NUM_DONT_CARE,
140 .num_bulk_in = NUM_DONT_CARE, 141 .num_bulk_in = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 5dc2ac9afa90..6c083d4e2c9b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1118,6 +1118,7 @@ static struct usb_serial_driver pl2303_device = {
1118 .name = "pl2303", 1118 .name = "pl2303",
1119 }, 1119 },
1120 .id_table = id_table, 1120 .id_table = id_table,
1121 .usb_driver = &pl2303_driver,
1121 .num_interrupt_in = NUM_DONT_CARE, 1122 .num_interrupt_in = NUM_DONT_CARE,
1122 .num_bulk_in = 1, 1123 .num_bulk_in = 1,
1123 .num_bulk_out = 1, 1124 .num_bulk_out = 1,
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 30b7ebc8d45d..5a03a3fc9386 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -402,6 +402,7 @@ static struct usb_serial_driver safe_device = {
402 .name = "safe_serial", 402 .name = "safe_serial",
403 }, 403 },
404 .id_table = id_table, 404 .id_table = id_table,
405 .usb_driver = &safe_driver,
405 .num_interrupt_in = NUM_DONT_CARE, 406 .num_interrupt_in = NUM_DONT_CARE,
406 .num_bulk_in = NUM_DONT_CARE, 407 .num_bulk_in = NUM_DONT_CARE,
407 .num_bulk_out = NUM_DONT_CARE, 408 .num_bulk_out = NUM_DONT_CARE,
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 6d8e91e00ecf..ecedd833818d 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -13,10 +13,9 @@
13 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> 13 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
15 15
16 History:
17*/ 16*/
18 17
19#define DRIVER_VERSION "v.1.0.5" 18#define DRIVER_VERSION "v.1.0.6"
20#define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>" 19#define DRIVER_AUTHOR "Kevin Lloyd <linux@sierrawireless.com>"
21#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 20#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
22 21
@@ -31,14 +30,15 @@
31 30
32 31
33static struct usb_device_id id_table [] = { 32static struct usb_device_id id_table [] = {
33 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
34 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 34 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
35 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
35 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 36 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
36 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
37 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 37 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
38 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 38 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
39 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 39 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
40 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
40 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 41 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
41 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 for Europe */
42 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ 42 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */
43 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 43 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
44 44
@@ -55,14 +55,15 @@ static struct usb_device_id id_table_1port [] = {
55}; 55};
56 56
57static struct usb_device_id id_table_3port [] = { 57static struct usb_device_id id_table_3port [] = {
58 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
58 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ 59 { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */
60 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
59 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 61 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
60 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
61 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 62 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
62 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 63 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
63 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 64 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
65 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
64 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 66 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
65 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 for Europe */
66 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */ 67 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 */
67 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 68 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
68 { } 69 { }
@@ -81,7 +82,7 @@ static int debug;
81 82
82/* per port private data */ 83/* per port private data */
83#define N_IN_URB 4 84#define N_IN_URB 4
84#define N_OUT_URB 1 85#define N_OUT_URB 4
85#define IN_BUFLEN 4096 86#define IN_BUFLEN 4096
86#define OUT_BUFLEN 128 87#define OUT_BUFLEN 128
87 88
@@ -396,6 +397,8 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
396 struct usb_serial *serial = port->serial; 397 struct usb_serial *serial = port->serial;
397 int i, err; 398 int i, err;
398 struct urb *urb; 399 struct urb *urb;
400 int result;
401 __u16 set_mode_dzero = 0x0000;
399 402
400 portdata = usb_get_serial_port_data(port); 403 portdata = usb_get_serial_port_data(port);
401 404
@@ -442,6 +445,12 @@ static int sierra_open(struct usb_serial_port *port, struct file *filp)
442 445
443 port->tty->low_latency = 1; 446 port->tty->low_latency = 1;
444 447
448 /* set mode to D0 */
449 result = usb_control_msg(serial->dev,
450 usb_rcvctrlpipe(serial->dev, 0),
451 0x00, 0x40, set_mode_dzero, 0, NULL,
452 0, USB_CTRL_SET_TIMEOUT);
453
445 sierra_send_setup(port); 454 sierra_send_setup(port);
446 455
447 return (0); 456 return (0);
@@ -614,6 +623,7 @@ static struct usb_serial_driver sierra_1port_device = {
614 }, 623 },
615 .description = "Sierra USB modem (1 port)", 624 .description = "Sierra USB modem (1 port)",
616 .id_table = id_table_1port, 625 .id_table = id_table_1port,
626 .usb_driver = &sierra_driver,
617 .num_interrupt_in = NUM_DONT_CARE, 627 .num_interrupt_in = NUM_DONT_CARE,
618 .num_bulk_in = 1, 628 .num_bulk_in = 1,
619 .num_bulk_out = 1, 629 .num_bulk_out = 1,
@@ -642,6 +652,7 @@ static struct usb_serial_driver sierra_3port_device = {
642 }, 652 },
643 .description = "Sierra USB modem (3 port)", 653 .description = "Sierra USB modem (3 port)",
644 .id_table = id_table_3port, 654 .id_table = id_table_3port,
655 .usb_driver = &sierra_driver,
645 .num_interrupt_in = NUM_DONT_CARE, 656 .num_interrupt_in = NUM_DONT_CARE,
646 .num_bulk_in = 3, 657 .num_bulk_in = 3,
647 .num_bulk_out = 3, 658 .num_bulk_out = 3,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 83189005c6fb..4203e2b1a761 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -262,6 +262,7 @@ static struct usb_serial_driver ti_1port_device = {
262 .name = "ti_usb_3410_5052_1", 262 .name = "ti_usb_3410_5052_1",
263 }, 263 },
264 .description = "TI USB 3410 1 port adapter", 264 .description = "TI USB 3410 1 port adapter",
265 .usb_driver = &ti_usb_driver,
265 .id_table = ti_id_table_3410, 266 .id_table = ti_id_table_3410,
266 .num_interrupt_in = 1, 267 .num_interrupt_in = 1,
267 .num_bulk_in = 1, 268 .num_bulk_in = 1,
@@ -292,6 +293,7 @@ static struct usb_serial_driver ti_2port_device = {
292 .name = "ti_usb_3410_5052_2", 293 .name = "ti_usb_3410_5052_2",
293 }, 294 },
294 .description = "TI USB 5052 2 port adapter", 295 .description = "TI USB 5052 2 port adapter",
296 .usb_driver = &ti_usb_driver,
295 .id_table = ti_id_table_5052, 297 .id_table = ti_id_table_5052,
296 .num_interrupt_in = 1, 298 .num_interrupt_in = 1,
297 .num_bulk_in = 2, 299 .num_bulk_in = 2,
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 716f6806cc89..6bf22a28adb8 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -59,14 +59,19 @@ static struct usb_driver usb_serial_driver = {
59 59
60static int debug; 60static int debug;
61static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; /* initially all NULL */ 61static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; /* initially all NULL */
62static spinlock_t table_lock;
62static LIST_HEAD(usb_serial_driver_list); 63static LIST_HEAD(usb_serial_driver_list);
63 64
64struct usb_serial *usb_serial_get_by_index(unsigned index) 65struct usb_serial *usb_serial_get_by_index(unsigned index)
65{ 66{
66 struct usb_serial *serial = serial_table[index]; 67 struct usb_serial *serial;
68
69 spin_lock(&table_lock);
70 serial = serial_table[index];
67 71
68 if (serial) 72 if (serial)
69 kref_get(&serial->kref); 73 kref_get(&serial->kref);
74 spin_unlock(&table_lock);
70 return serial; 75 return serial;
71} 76}
72 77
@@ -78,6 +83,7 @@ static struct usb_serial *get_free_serial (struct usb_serial *serial, int num_po
78 dbg("%s %d", __FUNCTION__, num_ports); 83 dbg("%s %d", __FUNCTION__, num_ports);
79 84
80 *minor = 0; 85 *minor = 0;
86 spin_lock(&table_lock);
81 for (i = 0; i < SERIAL_TTY_MINORS; ++i) { 87 for (i = 0; i < SERIAL_TTY_MINORS; ++i) {
82 if (serial_table[i]) 88 if (serial_table[i])
83 continue; 89 continue;
@@ -96,8 +102,10 @@ static struct usb_serial *get_free_serial (struct usb_serial *serial, int num_po
96 dbg("%s - minor base = %d", __FUNCTION__, *minor); 102 dbg("%s - minor base = %d", __FUNCTION__, *minor);
97 for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i) 103 for (i = *minor; (i < (*minor + num_ports)) && (i < SERIAL_TTY_MINORS); ++i)
98 serial_table[i] = serial; 104 serial_table[i] = serial;
105 spin_unlock(&table_lock);
99 return serial; 106 return serial;
100 } 107 }
108 spin_unlock(&table_lock);
101 return NULL; 109 return NULL;
102} 110}
103 111
@@ -110,9 +118,11 @@ static void return_serial(struct usb_serial *serial)
110 if (serial == NULL) 118 if (serial == NULL)
111 return; 119 return;
112 120
121 spin_lock(&table_lock);
113 for (i = 0; i < serial->num_ports; ++i) { 122 for (i = 0; i < serial->num_ports; ++i) {
114 serial_table[serial->minor + i] = NULL; 123 serial_table[serial->minor + i] = NULL;
115 } 124 }
125 spin_unlock(&table_lock);
116} 126}
117 127
118static void destroy_serial(struct kref *kref) 128static void destroy_serial(struct kref *kref)
@@ -271,7 +281,7 @@ static void serial_close(struct tty_struct *tty, struct file * filp)
271static int serial_write (struct tty_struct * tty, const unsigned char *buf, int count) 281static int serial_write (struct tty_struct * tty, const unsigned char *buf, int count)
272{ 282{
273 struct usb_serial_port *port = tty->driver_data; 283 struct usb_serial_port *port = tty->driver_data;
274 int retval = -EINVAL; 284 int retval = -ENODEV;
275 285
276 if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) 286 if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED)
277 goto exit; 287 goto exit;
@@ -279,6 +289,7 @@ static int serial_write (struct tty_struct * tty, const unsigned char *buf, int
279 dbg("%s - port %d, %d byte(s)", __FUNCTION__, port->number, count); 289 dbg("%s - port %d, %d byte(s)", __FUNCTION__, port->number, count);
280 290
281 if (!port->open_count) { 291 if (!port->open_count) {
292 retval = -EINVAL;
282 dbg("%s - port not opened", __FUNCTION__); 293 dbg("%s - port not opened", __FUNCTION__);
283 goto exit; 294 goto exit;
284 } 295 }
@@ -559,15 +570,20 @@ static void port_release(struct device *dev)
559 port_free(port); 570 port_free(port);
560} 571}
561 572
562static void port_free(struct usb_serial_port *port) 573static void kill_traffic(struct usb_serial_port *port)
563{ 574{
564 usb_kill_urb(port->read_urb); 575 usb_kill_urb(port->read_urb);
565 usb_free_urb(port->read_urb);
566 usb_kill_urb(port->write_urb); 576 usb_kill_urb(port->write_urb);
567 usb_free_urb(port->write_urb);
568 usb_kill_urb(port->interrupt_in_urb); 577 usb_kill_urb(port->interrupt_in_urb);
569 usb_free_urb(port->interrupt_in_urb);
570 usb_kill_urb(port->interrupt_out_urb); 578 usb_kill_urb(port->interrupt_out_urb);
579}
580
581static void port_free(struct usb_serial_port *port)
582{
583 kill_traffic(port);
584 usb_free_urb(port->read_urb);
585 usb_free_urb(port->write_urb);
586 usb_free_urb(port->interrupt_in_urb);
571 usb_free_urb(port->interrupt_out_urb); 587 usb_free_urb(port->interrupt_out_urb);
572 kfree(port->bulk_in_buffer); 588 kfree(port->bulk_in_buffer);
573 kfree(port->bulk_out_buffer); 589 kfree(port->bulk_out_buffer);
@@ -596,6 +612,39 @@ static struct usb_serial * create_serial (struct usb_device *dev,
596 return serial; 612 return serial;
597} 613}
598 614
615static const struct usb_device_id *match_dynamic_id(struct usb_interface *intf,
616 struct usb_serial_driver *drv)
617{
618 struct usb_dynid *dynid;
619
620 spin_lock(&drv->dynids.lock);
621 list_for_each_entry(dynid, &drv->dynids.list, node) {
622 if (usb_match_one_id(intf, &dynid->id)) {
623 spin_unlock(&drv->dynids.lock);
624 return &dynid->id;
625 }
626 }
627 spin_unlock(&drv->dynids.lock);
628 return NULL;
629}
630
631static const struct usb_device_id *get_iface_id(struct usb_serial_driver *drv,
632 struct usb_interface *intf)
633{
634 const struct usb_device_id *id;
635
636 id = usb_match_id(intf, drv->id_table);
637 if (id) {
638 dbg("static descriptor matches");
639 goto exit;
640 }
641 id = match_dynamic_id(intf, drv);
642 if (id)
643 dbg("dynamic descriptor matches");
644exit:
645 return id;
646}
647
599static struct usb_serial_driver *search_serial_device(struct usb_interface *iface) 648static struct usb_serial_driver *search_serial_device(struct usb_interface *iface)
600{ 649{
601 struct list_head *p; 650 struct list_head *p;
@@ -605,11 +654,9 @@ static struct usb_serial_driver *search_serial_device(struct usb_interface *ifac
605 /* Check if the usb id matches a known device */ 654 /* Check if the usb id matches a known device */
606 list_for_each(p, &usb_serial_driver_list) { 655 list_for_each(p, &usb_serial_driver_list) {
607 t = list_entry(p, struct usb_serial_driver, driver_list); 656 t = list_entry(p, struct usb_serial_driver, driver_list);
608 id = usb_match_id(iface, t->id_table); 657 id = get_iface_id(t, iface);
609 if (id != NULL) { 658 if (id)
610 dbg("descriptor matches");
611 return t; 659 return t;
612 }
613 } 660 }
614 661
615 return NULL; 662 return NULL;
@@ -639,14 +686,17 @@ int usb_serial_probe(struct usb_interface *interface,
639 int num_ports = 0; 686 int num_ports = 0;
640 int max_endpoints; 687 int max_endpoints;
641 688
689 lock_kernel(); /* guard against unloading a serial driver module */
642 type = search_serial_device(interface); 690 type = search_serial_device(interface);
643 if (!type) { 691 if (!type) {
692 unlock_kernel();
644 dbg("none matched"); 693 dbg("none matched");
645 return -ENODEV; 694 return -ENODEV;
646 } 695 }
647 696
648 serial = create_serial (dev, interface, type); 697 serial = create_serial (dev, interface, type);
649 if (!serial) { 698 if (!serial) {
699 unlock_kernel();
650 dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__); 700 dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__);
651 return -ENOMEM; 701 return -ENOMEM;
652 } 702 }
@@ -656,16 +706,18 @@ int usb_serial_probe(struct usb_interface *interface,
656 const struct usb_device_id *id; 706 const struct usb_device_id *id;
657 707
658 if (!try_module_get(type->driver.owner)) { 708 if (!try_module_get(type->driver.owner)) {
709 unlock_kernel();
659 dev_err(&interface->dev, "module get failed, exiting\n"); 710 dev_err(&interface->dev, "module get failed, exiting\n");
660 kfree (serial); 711 kfree (serial);
661 return -EIO; 712 return -EIO;
662 } 713 }
663 714
664 id = usb_match_id(interface, type->id_table); 715 id = get_iface_id(type, interface);
665 retval = type->probe(serial, id); 716 retval = type->probe(serial, id);
666 module_put(type->driver.owner); 717 module_put(type->driver.owner);
667 718
668 if (retval) { 719 if (retval) {
720 unlock_kernel();
669 dbg ("sub driver rejected device"); 721 dbg ("sub driver rejected device");
670 kfree (serial); 722 kfree (serial);
671 return retval; 723 return retval;
@@ -735,6 +787,7 @@ int usb_serial_probe(struct usb_interface *interface,
735 * properly during a later invocation of usb_serial_probe 787 * properly during a later invocation of usb_serial_probe
736 */ 788 */
737 if (num_bulk_in == 0 || num_bulk_out == 0) { 789 if (num_bulk_in == 0 || num_bulk_out == 0) {
790 unlock_kernel();
738 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 791 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
739 kfree (serial); 792 kfree (serial);
740 return -ENODEV; 793 return -ENODEV;
@@ -750,6 +803,7 @@ int usb_serial_probe(struct usb_interface *interface,
750 if (type == &usb_serial_generic_device) { 803 if (type == &usb_serial_generic_device) {
751 num_ports = num_bulk_out; 804 num_ports = num_bulk_out;
752 if (num_ports == 0) { 805 if (num_ports == 0) {
806 unlock_kernel();
753 dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n"); 807 dev_err(&interface->dev, "Generic device with no bulk out, not allowed.\n");
754 kfree (serial); 808 kfree (serial);
755 return -EIO; 809 return -EIO;
@@ -760,6 +814,7 @@ int usb_serial_probe(struct usb_interface *interface,
760 /* if this device type has a calc_num_ports function, call it */ 814 /* if this device type has a calc_num_ports function, call it */
761 if (type->calc_num_ports) { 815 if (type->calc_num_ports) {
762 if (!try_module_get(type->driver.owner)) { 816 if (!try_module_get(type->driver.owner)) {
817 unlock_kernel();
763 dev_err(&interface->dev, "module get failed, exiting\n"); 818 dev_err(&interface->dev, "module get failed, exiting\n");
764 kfree (serial); 819 kfree (serial);
765 return -EIO; 820 return -EIO;
@@ -771,12 +826,6 @@ int usb_serial_probe(struct usb_interface *interface,
771 num_ports = type->num_ports; 826 num_ports = type->num_ports;
772 } 827 }
773 828
774 if (get_free_serial (serial, num_ports, &minor) == NULL) {
775 dev_err(&interface->dev, "No more free serial devices\n");
776 kfree (serial);
777 return -ENOMEM;
778 }
779
780 serial->minor = minor; 829 serial->minor = minor;
781 serial->num_ports = num_ports; 830 serial->num_ports = num_ports;
782 serial->num_bulk_in = num_bulk_in; 831 serial->num_bulk_in = num_bulk_in;
@@ -791,6 +840,8 @@ int usb_serial_probe(struct usb_interface *interface,
791 max_endpoints = max(max_endpoints, num_interrupt_out); 840 max_endpoints = max(max_endpoints, num_interrupt_out);
792 max_endpoints = max(max_endpoints, (int)serial->num_ports); 841 max_endpoints = max(max_endpoints, (int)serial->num_ports);
793 serial->num_port_pointers = max_endpoints; 842 serial->num_port_pointers = max_endpoints;
843 unlock_kernel();
844
794 dbg("%s - setting up %d port structures for this device", __FUNCTION__, max_endpoints); 845 dbg("%s - setting up %d port structures for this device", __FUNCTION__, max_endpoints);
795 for (i = 0; i < max_endpoints; ++i) { 846 for (i = 0; i < max_endpoints; ++i) {
796 port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL); 847 port = kzalloc(sizeof(struct usb_serial_port), GFP_KERNEL);
@@ -925,6 +976,11 @@ int usb_serial_probe(struct usb_interface *interface,
925 } 976 }
926 } 977 }
927 978
979 if (get_free_serial (serial, num_ports, &minor) == NULL) {
980 dev_err(&interface->dev, "No more free serial devices\n");
981 goto probe_error;
982 }
983
928 /* register all of the individual ports with the driver core */ 984 /* register all of the individual ports with the driver core */
929 for (i = 0; i < num_ports; ++i) { 985 for (i = 0; i < num_ports; ++i) {
930 port = serial->port[i]; 986 port = serial->port[i];
@@ -1002,8 +1058,11 @@ void usb_serial_disconnect(struct usb_interface *interface)
1002 if (serial) { 1058 if (serial) {
1003 for (i = 0; i < serial->num_ports; ++i) { 1059 for (i = 0; i < serial->num_ports; ++i) {
1004 port = serial->port[i]; 1060 port = serial->port[i];
1005 if (port && port->tty) 1061 if (port) {
1006 tty_hangup(port->tty); 1062 if (port->tty)
1063 tty_hangup(port->tty);
1064 kill_traffic(port);
1065 }
1007 } 1066 }
1008 /* let the last holder of this object 1067 /* let the last holder of this object
1009 * cause it to be cleaned up */ 1068 * cause it to be cleaned up */
@@ -1040,6 +1099,7 @@ static int __init usb_serial_init(void)
1040 return -ENOMEM; 1099 return -ENOMEM;
1041 1100
1042 /* Initialize our global data */ 1101 /* Initialize our global data */
1102 spin_lock_init(&table_lock);
1043 for (i = 0; i < SERIAL_TTY_MINORS; ++i) { 1103 for (i = 0; i < SERIAL_TTY_MINORS; ++i) {
1044 serial_table[i] = NULL; 1104 serial_table[i] = NULL;
1045 } 1105 }
@@ -1138,7 +1198,7 @@ static void fixup_generic(struct usb_serial_driver *device)
1138 set_to_generic_if_null(device, shutdown); 1198 set_to_generic_if_null(device, shutdown);
1139} 1199}
1140 1200
1141int usb_serial_register(struct usb_serial_driver *driver) 1201int usb_serial_register(struct usb_serial_driver *driver) /* must be called with BKL held */
1142{ 1202{
1143 int retval; 1203 int retval;
1144 1204
@@ -1162,7 +1222,7 @@ int usb_serial_register(struct usb_serial_driver *driver)
1162} 1222}
1163 1223
1164 1224
1165void usb_serial_deregister(struct usb_serial_driver *device) 1225void usb_serial_deregister(struct usb_serial_driver *device) /* must be called with BKL held */
1166{ 1226{
1167 info("USB Serial deregistering driver %s", device->description); 1227 info("USB Serial deregistering driver %s", device->description);
1168 list_del(&device->driver_list); 1228 list_del(&device->driver_list);
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index b09f06096056..2f59ff226e2c 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -90,8 +90,6 @@ static struct usb_device_id id_table [] = {
90 .driver_info = (kernel_ulong_t)&palm_os_4_probe }, 90 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
91 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID), 91 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID),
92 .driver_info = (kernel_ulong_t)&palm_os_4_probe }, 92 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
93 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE31_ID),
94 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
95 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID), 93 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID),
96 .driver_info = (kernel_ulong_t)&palm_os_4_probe }, 94 .driver_info = (kernel_ulong_t)&palm_os_4_probe },
97 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID), 95 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID),
@@ -151,7 +149,6 @@ static struct usb_device_id id_table_combined [] = {
151 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) }, 149 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_T_ID) },
152 { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) }, 150 { USB_DEVICE(PALM_VENDOR_ID, PALM_TREO_650) },
153 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) }, 151 { USB_DEVICE(PALM_VENDOR_ID, PALM_TUNGSTEN_Z_ID) },
154 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE31_ID) },
155 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) }, 152 { USB_DEVICE(PALM_VENDOR_ID, PALM_ZIRE_ID) },
156 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) }, 153 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_3_5_ID) },
157 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) }, 154 { USB_DEVICE(SONY_VENDOR_ID, SONY_CLIE_4_0_ID) },
@@ -189,6 +186,7 @@ static struct usb_serial_driver handspring_device = {
189 .name = "visor", 186 .name = "visor",
190 }, 187 },
191 .description = "Handspring Visor / Palm OS", 188 .description = "Handspring Visor / Palm OS",
189 .usb_driver = &visor_driver,
192 .id_table = id_table, 190 .id_table = id_table,
193 .num_interrupt_in = NUM_DONT_CARE, 191 .num_interrupt_in = NUM_DONT_CARE,
194 .num_bulk_in = 2, 192 .num_bulk_in = 2,
@@ -219,6 +217,7 @@ static struct usb_serial_driver clie_5_device = {
219 .name = "clie_5", 217 .name = "clie_5",
220 }, 218 },
221 .description = "Sony Clie 5.0", 219 .description = "Sony Clie 5.0",
220 .usb_driver = &visor_driver,
222 .id_table = clie_id_5_table, 221 .id_table = clie_id_5_table,
223 .num_interrupt_in = NUM_DONT_CARE, 222 .num_interrupt_in = NUM_DONT_CARE,
224 .num_bulk_in = 2, 223 .num_bulk_in = 2,
@@ -249,6 +248,7 @@ static struct usb_serial_driver clie_3_5_device = {
249 .name = "clie_3.5", 248 .name = "clie_3.5",
250 }, 249 },
251 .description = "Sony Clie 3.5", 250 .description = "Sony Clie 3.5",
251 .usb_driver = &visor_driver,
252 .id_table = clie_id_3_5_table, 252 .id_table = clie_id_3_5_table,
253 .num_interrupt_in = 0, 253 .num_interrupt_in = 0,
254 .num_bulk_in = 1, 254 .num_bulk_in = 1,
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 765118d83fb6..4ce6f62a6f39 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -32,7 +32,6 @@
32#define PALM_TUNGSTEN_T_ID 0x0060 32#define PALM_TUNGSTEN_T_ID 0x0060
33#define PALM_TREO_650 0x0061 33#define PALM_TREO_650 0x0061
34#define PALM_TUNGSTEN_Z_ID 0x0031 34#define PALM_TUNGSTEN_Z_ID 0x0031
35#define PALM_ZIRE31_ID 0x0061
36#define PALM_ZIRE_ID 0x0070 35#define PALM_ZIRE_ID 0x0070
37#define PALM_M100_ID 0x0080 36#define PALM_M100_ID 0x0080
38 37
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5483d8564c1b..bf16e9e1d84e 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -161,6 +161,7 @@ static struct usb_serial_driver whiteheat_fake_device = {
161 .name = "whiteheatnofirm", 161 .name = "whiteheatnofirm",
162 }, 162 },
163 .description = "Connect Tech - WhiteHEAT - (prerenumeration)", 163 .description = "Connect Tech - WhiteHEAT - (prerenumeration)",
164 .usb_driver = &whiteheat_driver,
164 .id_table = id_table_prerenumeration, 165 .id_table = id_table_prerenumeration,
165 .num_interrupt_in = NUM_DONT_CARE, 166 .num_interrupt_in = NUM_DONT_CARE,
166 .num_bulk_in = NUM_DONT_CARE, 167 .num_bulk_in = NUM_DONT_CARE,
@@ -176,6 +177,7 @@ static struct usb_serial_driver whiteheat_device = {
176 .name = "whiteheat", 177 .name = "whiteheat",
177 }, 178 },
178 .description = "Connect Tech - WhiteHEAT", 179 .description = "Connect Tech - WhiteHEAT",
180 .usb_driver = &whiteheat_driver,
179 .id_table = id_table_std, 181 .id_table = id_table_std,
180 .num_interrupt_in = NUM_DONT_CARE, 182 .num_interrupt_in = NUM_DONT_CARE,
181 .num_bulk_in = NUM_DONT_CARE, 183 .num_bulk_in = NUM_DONT_CARE,
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index e565d3d2ab29..6d3dad3d1dae 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -33,7 +33,6 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/usb_ch9.h>
37#include <linux/usb/input.h> 36#include <linux/usb/input.h>
38#include "usb.h" 37#include "usb.h"
39#include "onetouch.h" 38#include "onetouch.h"
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index e1072d52d641..70234f5dbeeb 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -110,23 +110,6 @@ static int slave_configure(struct scsi_device *sdev)
110 * the end, scatter-gather buffers follow page boundaries. */ 110 * the end, scatter-gather buffers follow page boundaries. */
111 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 111 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
112 112
113 /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
114 * what is originally reported. We need this to avoid confusing
115 * the SCSI layer with devices that report 0 or 1, but need 10-byte
116 * commands (ala ATAPI devices behind certain bridges, or devices
117 * which simply have broken INQUIRY data).
118 *
119 * NOTE: This means /dev/sg programs (ala cdrecord) will get the
120 * actual information. This seems to be the preference for
121 * programs like that.
122 *
123 * NOTE: This also means that /proc/scsi/scsi and sysfs may report
124 * the actual value or the modified one, depending on where the
125 * data comes from.
126 */
127 if (sdev->scsi_level < SCSI_2)
128 sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
129
130 /* Many devices have trouble transfering more than 32KB at a time, 113 /* Many devices have trouble transfering more than 32KB at a time,
131 * while others have trouble with more than 64K. At this time we 114 * while others have trouble with more than 64K. At this time we
132 * are limiting both to 32K (64 sectores). 115 * are limiting both to 32K (64 sectores).
@@ -176,7 +159,9 @@ static int slave_configure(struct scsi_device *sdev)
176 * a Get-Max-LUN request, we won't lose much by setting the 159 * a Get-Max-LUN request, we won't lose much by setting the
177 * revision level down to 2. The only devices that would be 160 * revision level down to 2. The only devices that would be
178 * affected are those with sparse LUNs. */ 161 * affected are those with sparse LUNs. */
179 sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2; 162 if (sdev->scsi_level > SCSI_2)
163 sdev->sdev_target->scsi_level =
164 sdev->scsi_level = SCSI_2;
180 165
181 /* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable 166 /* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
182 * Hardware Error) when any low-level error occurs, 167 * Hardware Error) when any low-level error occurs,
@@ -194,6 +179,16 @@ static int slave_configure(struct scsi_device *sdev)
194 sdev->use_10_for_ms = 1; 179 sdev->use_10_for_ms = 1;
195 } 180 }
196 181
182 /* The CB and CBI transports have no way to pass LUN values
183 * other than the bits in the second byte of a CDB. But those
184 * bits don't get set to the LUN value if the device reports
185 * scsi_level == 0 (UNKNOWN). Hence such devices must necessarily
186 * be single-LUN.
187 */
188 if ((us->protocol == US_PR_CB || us->protocol == US_PR_CBI) &&
189 sdev->scsi_level == SCSI_UNKNOWN)
190 us->max_lun = 0;
191
197 /* Some devices choke when they receive a PREVENT-ALLOW MEDIUM 192 /* Some devices choke when they receive a PREVENT-ALLOW MEDIUM
198 * REMOVAL command, so suppress those commands. */ 193 * REMOVAL command, so suppress those commands. */
199 if (us->flags & US_FL_NOT_LOCKABLE) 194 if (us->flags & US_FL_NOT_LOCKABLE)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index b49f2a78189e..f49a62fc32d2 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -573,7 +573,7 @@ UNUSUAL_DEV( 0x054c, 0x002b, 0x0100, 0x0110,
573#endif 573#endif
574 574
575/* Submitted by Olaf Hering, <olh@suse.de> SuSE Bugzilla #49049 */ 575/* Submitted by Olaf Hering, <olh@suse.de> SuSE Bugzilla #49049 */
576UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x0501, 576UNUSUAL_DEV( 0x054c, 0x002c, 0x0501, 0x2000,
577 "Sony", 577 "Sony",
578 "USB Floppy Drive", 578 "USB Floppy Drive",
579 US_SC_DEVICE, US_PR_DEVICE, NULL, 579 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1325,13 +1325,6 @@ UNUSUAL_DEV( 0x0fce, 0xe031, 0x0000, 0x0000,
1325 US_SC_DEVICE, US_PR_DEVICE, NULL, 1325 US_SC_DEVICE, US_PR_DEVICE, NULL,
1326 US_FL_FIX_CAPACITY ), 1326 US_FL_FIX_CAPACITY ),
1327 1327
1328/* Reported by Jan Mate <mate@fiit.stuba.sk> */
1329UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000,
1330 "Sony Ericsson",
1331 "P990i",
1332 US_SC_DEVICE, US_PR_DEVICE, NULL,
1333 US_FL_FIX_CAPACITY ),
1334
1335/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu> 1328/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
1336 * Tested on hardware version 1.10. 1329 * Tested on hardware version 1.10.
1337 * Entry is needed only for the initializer function override. 1330 * Entry is needed only for the initializer function override.
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 70644506651f..7e7ec29782f1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -731,26 +731,27 @@ static int get_pipes(struct us_data *us)
731 struct usb_endpoint_descriptor *ep_int = NULL; 731 struct usb_endpoint_descriptor *ep_int = NULL;
732 732
733 /* 733 /*
734 * Find the endpoints we need. 734 * Find the first endpoint of each type we need.
735 * We are expecting a minimum of 2 endpoints - in and out (bulk). 735 * We are expecting a minimum of 2 endpoints - in and out (bulk).
736 * An optional interrupt is OK (necessary for CBI protocol). 736 * An optional interrupt-in is OK (necessary for CBI protocol).
737 * We will ignore any others. 737 * We will ignore any others.
738 */ 738 */
739 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { 739 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
740 ep = &altsetting->endpoint[i].desc; 740 ep = &altsetting->endpoint[i].desc;
741 741
742 /* Is it a BULK endpoint? */
743 if (usb_endpoint_xfer_bulk(ep)) { 742 if (usb_endpoint_xfer_bulk(ep)) {
744 /* BULK in or out? */ 743 if (usb_endpoint_dir_in(ep)) {
745 if (usb_endpoint_dir_in(ep)) 744 if (!ep_in)
746 ep_in = ep; 745 ep_in = ep;
747 else 746 } else {
748 ep_out = ep; 747 if (!ep_out)
748 ep_out = ep;
749 }
749 } 750 }
750 751
751 /* Is it an interrupt endpoint? */ 752 else if (usb_endpoint_is_int_in(ep)) {
752 else if (usb_endpoint_xfer_int(ep)) { 753 if (!ep_int)
753 ep_int = ep; 754 ep_int = ep;
754 } 755 }
755 } 756 }
756 757
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4e83f01e894e..45fe65d8d7a0 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1444,8 +1444,8 @@ config FB_PMAG_AA
1444 used mainly in the MIPS-based DECstation series. 1444 used mainly in the MIPS-based DECstation series.
1445 1445
1446config FB_PMAG_BA 1446config FB_PMAG_BA
1447 bool "PMAG-BA TURBOchannel framebuffer support" 1447 tristate "PMAG-BA TURBOchannel framebuffer support"
1448 depends on (FB = y) && TC 1448 depends on FB && TC
1449 select FB_CFB_FILLRECT 1449 select FB_CFB_FILLRECT
1450 select FB_CFB_COPYAREA 1450 select FB_CFB_COPYAREA
1451 select FB_CFB_IMAGEBLIT 1451 select FB_CFB_IMAGEBLIT
@@ -1454,8 +1454,8 @@ config FB_PMAG_BA
1454 used mainly in the MIPS-based DECstation series. 1454 used mainly in the MIPS-based DECstation series.
1455 1455
1456config FB_PMAGB_B 1456config FB_PMAGB_B
1457 bool "PMAGB-B TURBOchannel framebuffer support" 1457 tristate "PMAGB-B TURBOchannel framebuffer support"
1458 depends on (FB = y) && TC 1458 depends on TC
1459 select FB_CFB_FILLRECT 1459 select FB_CFB_FILLRECT
1460 select FB_CFB_COPYAREA 1460 select FB_CFB_COPYAREA
1461 select FB_CFB_IMAGEBLIT 1461 select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/output.c b/drivers/video/output.c
new file mode 100644
index 000000000000..1473f2c892d2
--- /dev/null
+++ b/drivers/video/output.c
@@ -0,0 +1,129 @@
1/*
2 * output.c - Display Output Switch driver
3 *
4 * Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 */
24#include <linux/module.h>
25#include <linux/video_output.h>
26#include <linux/err.h>
27#include <linux/ctype.h>
28
29
30MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
31MODULE_LICENSE("GPL");
32MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
33
34static ssize_t video_output_show_state(struct class_device *dev,char *buf)
35{
36 ssize_t ret_size = 0;
37 struct output_device *od = to_output_device(dev);
38 if (od->props)
39 ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
40 return ret_size;
41}
42
43static ssize_t video_output_store_state(struct class_device *dev,
44 const char *buf,size_t count)
45{
46 char *endp;
47 struct output_device *od = to_output_device(dev);
48 int request_state = simple_strtoul(buf,&endp,0);
49 size_t size = endp - buf;
50
51 if (*endp && isspace(*endp))
52 size++;
53 if (size != count)
54 return -EINVAL;
55
56 if (od->props) {
57 od->request_state = request_state;
58 od->props->set_state(od);
59 }
60 return count;
61}
62
63static void video_output_class_release(struct class_device *dev)
64{
65 struct output_device *od = to_output_device(dev);
66 kfree(od);
67}
68
69static struct class_device_attribute video_output_attributes[] = {
70 __ATTR(state, 0644, video_output_show_state, video_output_store_state),
71 __ATTR_NULL,
72};
73
74static struct class video_output_class = {
75 .name = "video_output",
76 .release = video_output_class_release,
77 .class_dev_attrs = video_output_attributes,
78};
79
80struct output_device *video_output_register(const char *name,
81 struct device *dev,
82 void *devdata,
83 struct output_properties *op)
84{
85 struct output_device *new_dev;
86 int ret_code = 0;
87
88 new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
89 if (!new_dev) {
90 ret_code = -ENOMEM;
91 goto error_return;
92 }
93 new_dev->props = op;
94 new_dev->class_dev.class = &video_output_class;
95 new_dev->class_dev.dev = dev;
96 strlcpy(new_dev->class_dev.class_id,name,KOBJ_NAME_LEN);
97 class_set_devdata(&new_dev->class_dev,devdata);
98 ret_code = class_device_register(&new_dev->class_dev);
99 if (ret_code) {
100 kfree(new_dev);
101 goto error_return;
102 }
103 return new_dev;
104
105error_return:
106 return ERR_PTR(ret_code);
107}
108EXPORT_SYMBOL(video_output_register);
109
110void video_output_unregister(struct output_device *dev)
111{
112 if (!dev)
113 return;
114 class_device_unregister(&dev->class_dev);
115}
116EXPORT_SYMBOL(video_output_unregister);
117
118static void __exit video_output_class_exit(void)
119{
120 class_unregister(&video_output_class);
121}
122
123static int __init video_output_class_init(void)
124{
125 return class_register(&video_output_class);
126}
127
128postcore_initcall(video_output_class_init);
129module_exit(video_output_class_exit);
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c
index f5361cd8ccce..264d37243fad 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/pmag-ba-fb.c
@@ -15,7 +15,8 @@
15 * Michael Engel <engel@unix-ag.org>, 15 * Michael Engel <engel@unix-ag.org>,
16 * Karsten Merker <merker@linuxtag.org> and 16 * Karsten Merker <merker@linuxtag.org> and
17 * Harald Koerfgen. 17 * Harald Koerfgen.
18 * Copyright (c) 2005 Maciej W. Rozycki 18 * Copyright (c) 2005, 2006 Maciej W. Rozycki
19 * Copyright (c) 2005 James Simmons
19 * 20 *
20 * This file is subject to the terms and conditions of the GNU General 21 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file COPYING in the main directory of this 22 * Public License. See the file COPYING in the main directory of this
@@ -28,26 +29,21 @@
28#include <linux/init.h> 29#include <linux/init.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
30#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/tc.h>
31#include <linux/types.h> 33#include <linux/types.h>
32 34
33#include <asm/io.h> 35#include <asm/io.h>
34#include <asm/system.h> 36#include <asm/system.h>
35 37
36#include <asm/dec/tc.h>
37
38#include <video/pmag-ba-fb.h> 38#include <video/pmag-ba-fb.h>
39 39
40 40
41struct pmagbafb_par { 41struct pmagbafb_par {
42 struct fb_info *next;
43 volatile void __iomem *mmio; 42 volatile void __iomem *mmio;
44 volatile u32 __iomem *dac; 43 volatile u32 __iomem *dac;
45 int slot;
46}; 44};
47 45
48 46
49static struct fb_info *root_pmagbafb_dev;
50
51static struct fb_var_screeninfo pmagbafb_defined __initdata = { 47static struct fb_var_screeninfo pmagbafb_defined __initdata = {
52 .xres = 1024, 48 .xres = 1024,
53 .yres = 864, 49 .yres = 864,
@@ -145,24 +141,19 @@ static void __init pmagbafb_erase_cursor(struct fb_info *info)
145} 141}
146 142
147 143
148static int __init pmagbafb_init_one(int slot) 144static int __init pmagbafb_probe(struct device *dev)
149{ 145{
146 struct tc_dev *tdev = to_tc_dev(dev);
147 resource_size_t start, len;
150 struct fb_info *info; 148 struct fb_info *info;
151 struct pmagbafb_par *par; 149 struct pmagbafb_par *par;
152 unsigned long base_addr;
153 150
154 info = framebuffer_alloc(sizeof(struct pmagbafb_par), NULL); 151 info = framebuffer_alloc(sizeof(struct pmagbafb_par), dev);
155 if (!info) 152 if (!info)
156 return -ENOMEM; 153 return -ENOMEM;
157 154
158 par = info->par; 155 par = info->par;
159 par->slot = slot; 156 dev_set_drvdata(dev, info);
160 claim_tc_card(par->slot);
161
162 base_addr = get_tc_base_addr(par->slot);
163
164 par->next = root_pmagbafb_dev;
165 root_pmagbafb_dev = info;
166 157
167 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 158 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
168 goto err_alloc; 159 goto err_alloc;
@@ -172,15 +163,21 @@ static int __init pmagbafb_init_one(int slot)
172 info->var = pmagbafb_defined; 163 info->var = pmagbafb_defined;
173 info->flags = FBINFO_DEFAULT; 164 info->flags = FBINFO_DEFAULT;
174 165
166 /* Request the I/O MEM resource. */
167 start = tdev->resource.start;
168 len = tdev->resource.end - start + 1;
169 if (!request_mem_region(start, len, dev->bus_id))
170 goto err_cmap;
171
175 /* MMIO mapping setup. */ 172 /* MMIO mapping setup. */
176 info->fix.mmio_start = base_addr; 173 info->fix.mmio_start = start;
177 par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); 174 par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
178 if (!par->mmio) 175 if (!par->mmio)
179 goto err_cmap; 176 goto err_resource;
180 par->dac = par->mmio + PMAG_BA_BT459; 177 par->dac = par->mmio + PMAG_BA_BT459;
181 178
182 /* Frame buffer mapping setup. */ 179 /* Frame buffer mapping setup. */
183 info->fix.smem_start = base_addr + PMAG_BA_FBMEM; 180 info->fix.smem_start = start + PMAG_BA_FBMEM;
184 info->screen_base = ioremap_nocache(info->fix.smem_start, 181 info->screen_base = ioremap_nocache(info->fix.smem_start,
185 info->fix.smem_len); 182 info->fix.smem_len);
186 if (!info->screen_base) 183 if (!info->screen_base)
@@ -192,8 +189,10 @@ static int __init pmagbafb_init_one(int slot)
192 if (register_framebuffer(info) < 0) 189 if (register_framebuffer(info) < 0)
193 goto err_smem_map; 190 goto err_smem_map;
194 191
195 pr_info("fb%d: %s frame buffer device in slot %d\n", 192 get_device(dev);
196 info->node, info->fix.id, par->slot); 193
194 pr_info("fb%d: %s frame buffer device at %s\n",
195 info->node, info->fix.id, dev->bus_id);
197 196
198 return 0; 197 return 0;
199 198
@@ -204,54 +203,68 @@ err_smem_map:
204err_mmio_map: 203err_mmio_map:
205 iounmap(par->mmio); 204 iounmap(par->mmio);
206 205
206err_resource:
207 release_mem_region(start, len);
208
207err_cmap: 209err_cmap:
208 fb_dealloc_cmap(&info->cmap); 210 fb_dealloc_cmap(&info->cmap);
209 211
210err_alloc: 212err_alloc:
211 root_pmagbafb_dev = par->next;
212 release_tc_card(par->slot);
213 framebuffer_release(info); 213 framebuffer_release(info);
214 return -ENXIO; 214 return -ENXIO;
215} 215}
216 216
217static void __exit pmagbafb_exit_one(void) 217static int __exit pmagbafb_remove(struct device *dev)
218{ 218{
219 struct fb_info *info = root_pmagbafb_dev; 219 struct tc_dev *tdev = to_tc_dev(dev);
220 struct fb_info *info = dev_get_drvdata(dev);
220 struct pmagbafb_par *par = info->par; 221 struct pmagbafb_par *par = info->par;
222 resource_size_t start, len;
221 223
224 put_device(dev);
222 unregister_framebuffer(info); 225 unregister_framebuffer(info);
223 iounmap(info->screen_base); 226 iounmap(info->screen_base);
224 iounmap(par->mmio); 227 iounmap(par->mmio);
228 start = tdev->resource.start;
229 len = tdev->resource.end - start + 1;
230 release_mem_region(start, len);
225 fb_dealloc_cmap(&info->cmap); 231 fb_dealloc_cmap(&info->cmap);
226 root_pmagbafb_dev = par->next;
227 release_tc_card(par->slot);
228 framebuffer_release(info); 232 framebuffer_release(info);
233 return 0;
229} 234}
230 235
231 236
232/* 237/*
233 * Initialise the framebuffer. 238 * Initialize the framebuffer.
234 */ 239 */
240static const struct tc_device_id pmagbafb_tc_table[] = {
241 { "DEC ", "PMAG-BA " },
242 { }
243};
244MODULE_DEVICE_TABLE(tc, pmagbafb_tc_table);
245
246static struct tc_driver pmagbafb_driver = {
247 .id_table = pmagbafb_tc_table,
248 .driver = {
249 .name = "pmagbafb",
250 .bus = &tc_bus_type,
251 .probe = pmagbafb_probe,
252 .remove = __exit_p(pmagbafb_remove),
253 },
254};
255
235static int __init pmagbafb_init(void) 256static int __init pmagbafb_init(void)
236{ 257{
237 int count = 0; 258#ifndef MODULE
238 int slot;
239
240 if (fb_get_options("pmagbafb", NULL)) 259 if (fb_get_options("pmagbafb", NULL))
241 return -ENXIO; 260 return -ENXIO;
242 261#endif
243 while ((slot = search_tc_card("PMAG-BA")) >= 0) { 262 return tc_register_driver(&pmagbafb_driver);
244 if (pmagbafb_init_one(slot) < 0)
245 break;
246 count++;
247 }
248 return (count > 0) ? 0 : -ENXIO;
249} 263}
250 264
251static void __exit pmagbafb_exit(void) 265static void __exit pmagbafb_exit(void)
252{ 266{
253 while (root_pmagbafb_dev) 267 tc_unregister_driver(&pmagbafb_driver);
254 pmagbafb_exit_one();
255} 268}
256 269
257 270
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c
index a06a064ad757..7a0ce7d5af6b 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/pmagb-b-fb.c
@@ -11,7 +11,7 @@
11 * Michael Engel <engel@unix-ag.org>, 11 * Michael Engel <engel@unix-ag.org>,
12 * Karsten Merker <merker@linuxtag.org> and 12 * Karsten Merker <merker@linuxtag.org> and
13 * Harald Koerfgen. 13 * Harald Koerfgen.
14 * Copyright (c) 2005 Maciej W. Rozycki 14 * Copyright (c) 2005, 2006 Maciej W. Rozycki
15 * 15 *
16 * This file is subject to the terms and conditions of the GNU General 16 * This file is subject to the terms and conditions of the GNU General
17 * Public License. See the file COPYING in the main directory of this 17 * Public License. See the file COPYING in the main directory of this
@@ -25,18 +25,16 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/tc.h>
28#include <linux/types.h> 29#include <linux/types.h>
29 30
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/system.h> 32#include <asm/system.h>
32 33
33#include <asm/dec/tc.h>
34
35#include <video/pmagb-b-fb.h> 34#include <video/pmagb-b-fb.h>
36 35
37 36
38struct pmagbbfb_par { 37struct pmagbbfb_par {
39 struct fb_info *next;
40 volatile void __iomem *mmio; 38 volatile void __iomem *mmio;
41 volatile void __iomem *smem; 39 volatile void __iomem *smem;
42 volatile u32 __iomem *sfb; 40 volatile u32 __iomem *sfb;
@@ -47,8 +45,6 @@ struct pmagbbfb_par {
47}; 45};
48 46
49 47
50static struct fb_info *root_pmagbbfb_dev;
51
52static struct fb_var_screeninfo pmagbbfb_defined __initdata = { 48static struct fb_var_screeninfo pmagbbfb_defined __initdata = {
53 .bits_per_pixel = 8, 49 .bits_per_pixel = 8,
54 .red.length = 8, 50 .red.length = 8,
@@ -190,8 +186,9 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info)
190 69197, 66000, 65000, 50350, 36000, 32000, 25175 186 69197, 66000, 65000, 50350, 36000, 32000, 25175
191 }; 187 };
192 struct pmagbbfb_par *par = info->par; 188 struct pmagbbfb_par *par = info->par;
189 struct tc_bus *tbus = to_tc_dev(info->device)->bus;
193 u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8; 190 u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8;
194 u32 freq0, freq1, freqtc = get_tc_speed() / 250; 191 u32 freq0, freq1, freqtc = tc_get_speed(tbus) / 250;
195 int i, j; 192 int i, j;
196 193
197 gp0_write(par, 0); /* select Osc0 */ 194 gp0_write(par, 0); /* select Osc0 */
@@ -249,26 +246,21 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info)
249}; 246};
250 247
251 248
252static int __init pmagbbfb_init_one(int slot) 249static int __init pmagbbfb_probe(struct device *dev)
253{ 250{
254 char freq0[12], freq1[12]; 251 struct tc_dev *tdev = to_tc_dev(dev);
252 resource_size_t start, len;
255 struct fb_info *info; 253 struct fb_info *info;
256 struct pmagbbfb_par *par; 254 struct pmagbbfb_par *par;
257 unsigned long base_addr; 255 char freq0[12], freq1[12];
258 u32 vid_base; 256 u32 vid_base;
259 257
260 info = framebuffer_alloc(sizeof(struct pmagbbfb_par), NULL); 258 info = framebuffer_alloc(sizeof(struct pmagbbfb_par), dev);
261 if (!info) 259 if (!info)
262 return -ENOMEM; 260 return -ENOMEM;
263 261
264 par = info->par; 262 par = info->par;
265 par->slot = slot; 263 dev_set_drvdata(dev, info);
266 claim_tc_card(par->slot);
267
268 base_addr = get_tc_base_addr(par->slot);
269
270 par->next = root_pmagbbfb_dev;
271 root_pmagbbfb_dev = info;
272 264
273 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) 265 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
274 goto err_alloc; 266 goto err_alloc;
@@ -278,16 +270,22 @@ static int __init pmagbbfb_init_one(int slot)
278 info->var = pmagbbfb_defined; 270 info->var = pmagbbfb_defined;
279 info->flags = FBINFO_DEFAULT; 271 info->flags = FBINFO_DEFAULT;
280 272
273 /* Request the I/O MEM resource. */
274 start = tdev->resource.start;
275 len = tdev->resource.end - start + 1;
276 if (!request_mem_region(start, len, dev->bus_id))
277 goto err_cmap;
278
281 /* MMIO mapping setup. */ 279 /* MMIO mapping setup. */
282 info->fix.mmio_start = base_addr; 280 info->fix.mmio_start = start;
283 par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); 281 par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
284 if (!par->mmio) 282 if (!par->mmio)
285 goto err_cmap; 283 goto err_resource;
286 par->sfb = par->mmio + PMAGB_B_SFB; 284 par->sfb = par->mmio + PMAGB_B_SFB;
287 par->dac = par->mmio + PMAGB_B_BT459; 285 par->dac = par->mmio + PMAGB_B_BT459;
288 286
289 /* Frame buffer mapping setup. */ 287 /* Frame buffer mapping setup. */
290 info->fix.smem_start = base_addr + PMAGB_B_FBMEM; 288 info->fix.smem_start = start + PMAGB_B_FBMEM;
291 par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); 289 par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len);
292 if (!par->smem) 290 if (!par->smem)
293 goto err_mmio_map; 291 goto err_mmio_map;
@@ -302,13 +300,15 @@ static int __init pmagbbfb_init_one(int slot)
302 if (register_framebuffer(info) < 0) 300 if (register_framebuffer(info) < 0)
303 goto err_smem_map; 301 goto err_smem_map;
304 302
303 get_device(dev);
304
305 snprintf(freq0, sizeof(freq0), "%u.%03uMHz", 305 snprintf(freq0, sizeof(freq0), "%u.%03uMHz",
306 par->osc0 / 1000, par->osc0 % 1000); 306 par->osc0 / 1000, par->osc0 % 1000);
307 snprintf(freq1, sizeof(freq1), "%u.%03uMHz", 307 snprintf(freq1, sizeof(freq1), "%u.%03uMHz",
308 par->osc1 / 1000, par->osc1 % 1000); 308 par->osc1 / 1000, par->osc1 % 1000);
309 309
310 pr_info("fb%d: %s frame buffer device in slot %d\n", 310 pr_info("fb%d: %s frame buffer device at %s\n",
311 info->node, info->fix.id, par->slot); 311 info->node, info->fix.id, dev->bus_id);
312 pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n", 312 pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n",
313 info->node, freq0, par->osc1 ? freq1 : "disabled", 313 info->node, freq0, par->osc1 ? freq1 : "disabled",
314 par->osc1 != 0); 314 par->osc1 != 0);
@@ -322,54 +322,68 @@ err_smem_map:
322err_mmio_map: 322err_mmio_map:
323 iounmap(par->mmio); 323 iounmap(par->mmio);
324 324
325err_resource:
326 release_mem_region(start, len);
327
325err_cmap: 328err_cmap:
326 fb_dealloc_cmap(&info->cmap); 329 fb_dealloc_cmap(&info->cmap);
327 330
328err_alloc: 331err_alloc:
329 root_pmagbbfb_dev = par->next;
330 release_tc_card(par->slot);
331 framebuffer_release(info); 332 framebuffer_release(info);
332 return -ENXIO; 333 return -ENXIO;
333} 334}
334 335
335static void __exit pmagbbfb_exit_one(void) 336static int __exit pmagbbfb_remove(struct device *dev)
336{ 337{
337 struct fb_info *info = root_pmagbbfb_dev; 338 struct tc_dev *tdev = to_tc_dev(dev);
339 struct fb_info *info = dev_get_drvdata(dev);
338 struct pmagbbfb_par *par = info->par; 340 struct pmagbbfb_par *par = info->par;
341 resource_size_t start, len;
339 342
343 put_device(dev);
340 unregister_framebuffer(info); 344 unregister_framebuffer(info);
341 iounmap(par->smem); 345 iounmap(par->smem);
342 iounmap(par->mmio); 346 iounmap(par->mmio);
347 start = tdev->resource.start;
348 len = tdev->resource.end - start + 1;
349 release_mem_region(start, len);
343 fb_dealloc_cmap(&info->cmap); 350 fb_dealloc_cmap(&info->cmap);
344 root_pmagbbfb_dev = par->next;
345 release_tc_card(par->slot);
346 framebuffer_release(info); 351 framebuffer_release(info);
352 return 0;
347} 353}
348 354
349 355
350/* 356/*
351 * Initialise the framebuffer. 357 * Initialize the framebuffer.
352 */ 358 */
359static const struct tc_device_id pmagbbfb_tc_table[] = {
360 { "DEC ", "PMAGB-BA" },
361 { }
362};
363MODULE_DEVICE_TABLE(tc, pmagbbfb_tc_table);
364
365static struct tc_driver pmagbbfb_driver = {
366 .id_table = pmagbbfb_tc_table,
367 .driver = {
368 .name = "pmagbbfb",
369 .bus = &tc_bus_type,
370 .probe = pmagbbfb_probe,
371 .remove = __exit_p(pmagbbfb_remove),
372 },
373};
374
353static int __init pmagbbfb_init(void) 375static int __init pmagbbfb_init(void)
354{ 376{
355 int count = 0; 377#ifndef MODULE
356 int slot;
357
358 if (fb_get_options("pmagbbfb", NULL)) 378 if (fb_get_options("pmagbbfb", NULL))
359 return -ENXIO; 379 return -ENXIO;
360 380#endif
361 while ((slot = search_tc_card("PMAGB-BA")) >= 0) { 381 return tc_register_driver(&pmagbbfb_driver);
362 if (pmagbbfb_init_one(slot) < 0)
363 break;
364 count++;
365 }
366 return (count > 0) ? 0 : -ENXIO;
367} 382}
368 383
369static void __exit pmagbbfb_exit(void) 384static void __exit pmagbbfb_exit(void)
370{ 385{
371 while (root_pmagbbfb_dev) 386 tc_unregister_driver(&pmagbbfb_driver);
372 pmagbbfb_exit_one();
373} 387}
374 388
375 389