aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c12
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c3
-rw-r--r--drivers/acpi/dispatcher/dsutils.c7
-rw-r--r--drivers/acpi/dispatcher/dswstate.c9
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/events/evgpe.c5
-rw-r--r--drivers/acpi/events/evgpeblk.c3
-rw-r--r--drivers/acpi/events/evmisc.c20
-rw-r--r--drivers/acpi/events/evregion.c15
-rw-r--r--drivers/acpi/events/evrgnini.c3
-rw-r--r--drivers/acpi/events/evxface.c7
-rw-r--r--drivers/acpi/events/evxfevnt.c2
-rw-r--r--drivers/acpi/executer/exconvrt.c5
-rw-r--r--drivers/acpi/executer/excreate.c6
-rw-r--r--drivers/acpi/executer/exdump.c17
-rw-r--r--drivers/acpi/executer/exmutex.c37
-rw-r--r--drivers/acpi/executer/exnames.c3
-rw-r--r--drivers/acpi/executer/exprep.c2
-rw-r--r--drivers/acpi/executer/exresop.c3
-rw-r--r--drivers/acpi/executer/exsystem.c30
-rw-r--r--drivers/acpi/executer/exutils.c104
-rw-r--r--drivers/acpi/hardware/hwsleep.c1
-rw-r--r--drivers/acpi/namespace/nseval.c13
-rw-r--r--drivers/acpi/namespace/nsinit.c7
-rw-r--r--drivers/acpi/namespace/nswalk.c6
-rw-r--r--drivers/acpi/namespace/nsxfeval.c17
-rw-r--r--drivers/acpi/osl.c45
-rw-r--r--drivers/acpi/parser/psopcode.c618
-rw-r--r--drivers/acpi/resources/rscalc.c3
-rw-r--r--drivers/acpi/resources/rscreate.c13
-rw-r--r--drivers/acpi/resources/rsdump.c8
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rslist.c7
-rw-r--r--drivers/acpi/resources/rsmisc.c4
-rw-r--r--drivers/acpi/resources/rsutils.c6
-rw-r--r--drivers/acpi/resources/rsxface.c3
-rw-r--r--drivers/acpi/sleep/main.c68
-rw-r--r--drivers/acpi/sleep/proc.c13
-rw-r--r--drivers/acpi/tables/tbfadt.c6
-rw-r--r--drivers/acpi/tables/tbxface.c16
-rw-r--r--drivers/acpi/thermal.c104
-rw-r--r--drivers/acpi/utilities/utalloc.c1
-rw-r--r--drivers/acpi/utilities/utcache.c3
-rw-r--r--drivers/acpi/utilities/utcopy.c4
-rw-r--r--drivers/acpi/utilities/utdebug.c4
-rw-r--r--drivers/acpi/utilities/utdelete.c1
-rw-r--r--drivers/acpi/utilities/utglobal.c6
-rw-r--r--drivers/acpi/utilities/utmisc.c6
-rw-r--r--drivers/acpi/utilities/utmutex.c8
-rw-r--r--drivers/acpi/utilities/utresrc.c1
-rw-r--r--drivers/acpi/utilities/utxface.c2
-rw-r--r--drivers/ata/Kconfig3
-rw-r--r--drivers/ata/libata-acpi.c3
-rw-r--r--drivers/ata/libata-core.c10
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/ata/pata_qdi.c2
-rw-r--r--drivers/ata/pata_scc.c4
-rw-r--r--drivers/ata/sata_nv.c92
-rw-r--r--drivers/ata/sata_promise.c24
-rw-r--r--drivers/ata/sata_via.c8
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/base/devres.c32
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/topology.c3
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/rd.c2
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/drm/drm_dma.c2
-rw-r--r--drivers/char/drm/drm_vm.c2
-rw-r--r--drivers/char/drm/r300_reg.h2
-rw-r--r--drivers/char/genrtc.c4
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c156
-rw-r--r--drivers/char/ipmi/Kconfig2
-rw-r--r--drivers/char/mmtimer.c4
-rw-r--r--drivers/char/pcmcia/Kconfig1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c46
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/tpm/Kconfig3
-rw-r--r--drivers/char/tty_io.c22
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/crypto/Kconfig24
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/firewire/Kconfig61
-rw-r--r--drivers/firewire/Makefile10
-rw-r--r--drivers/firewire/fw-card.c560
-rw-r--r--drivers/firewire/fw-cdev.c961
-rw-r--r--drivers/firewire/fw-device.c813
-rw-r--r--drivers/firewire/fw-device.h146
-rw-r--r--drivers/firewire/fw-iso.c163
-rw-r--r--drivers/firewire/fw-ohci.c1943
-rw-r--r--drivers/firewire/fw-ohci.h153
-rw-r--r--drivers/firewire/fw-sbp2.c1147
-rw-r--r--drivers/firewire/fw-topology.c537
-rw-r--r--drivers/firewire/fw-topology.h92
-rw-r--r--drivers/firewire/fw-transaction.c910
-rw-r--r--drivers/firewire/fw-transaction.h458
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ams/ams-input.c2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/hdaps.c2
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-at91.c7
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/i2c/chips/tps65010.c2
-rw-r--r--drivers/ide/Kconfig15
-rw-r--r--drivers/ide/Makefile2
-rw-r--r--drivers/ide/arm/bast-ide.c2
-rw-r--r--drivers/ide/arm/icside.c9
-rw-r--r--drivers/ide/arm/ide_arm.c2
-rw-r--r--drivers/ide/arm/rapide.c2
-rw-r--r--drivers/ide/cris/ide-cris.c4
-rw-r--r--drivers/ide/h8300/ide-h8300.c2
-rw-r--r--drivers/ide/ide-cd.c20
-rw-r--r--drivers/ide/ide-disk.c101
-rw-r--r--drivers/ide/ide-dma.c94
-rw-r--r--drivers/ide/ide-floppy.c30
-rw-r--r--drivers/ide/ide-generic.c2
-rw-r--r--drivers/ide/ide-io.c17
-rw-r--r--drivers/ide/ide-iops.c55
-rw-r--r--drivers/ide/ide-lib.c132
-rw-r--r--drivers/ide/ide-pnp.c2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide-proc.c344
-rw-r--r--drivers/ide/ide-tape.c49
-rw-r--r--drivers/ide/ide.c470
-rw-r--r--drivers/ide/legacy/ali14xx.c3
-rw-r--r--drivers/ide/legacy/buddha.c2
-rw-r--r--drivers/ide/legacy/dtc2278.c3
-rw-r--r--drivers/ide/legacy/falconide.c2
-rw-r--r--drivers/ide/legacy/gayle.c2
-rw-r--r--drivers/ide/legacy/ht6560b.c3
-rw-r--r--drivers/ide/legacy/ide-cs.c2
-rw-r--r--drivers/ide/legacy/macide.c6
-rw-r--r--drivers/ide/legacy/q40ide.c2
-rw-r--r--drivers/ide/legacy/qd65xx.c7
-rw-r--r--drivers/ide/legacy/umc8672.c3
-rw-r--r--drivers/ide/mips/au1xxx-ide.c3
-rw-r--r--drivers/ide/mips/swarm.c3
-rw-r--r--drivers/ide/pci/aec62xx.c62
-rw-r--r--drivers/ide/pci/alim15x3.c97
-rw-r--r--drivers/ide/pci/amd74xx.c6
-rw-r--r--drivers/ide/pci/atiixp.c40
-rw-r--r--drivers/ide/pci/cmd64x.c90
-rw-r--r--drivers/ide/pci/cs5520.c20
-rw-r--r--drivers/ide/pci/cs5535.c33
-rw-r--r--drivers/ide/pci/delkin_cb.c2
-rw-r--r--drivers/ide/pci/hpt34x.c27
-rw-r--r--drivers/ide/pci/hpt366.c85
-rw-r--r--drivers/ide/pci/it8213.c39
-rw-r--r--drivers/ide/pci/it821x.c20
-rw-r--r--drivers/ide/pci/jmicron.c40
-rw-r--r--drivers/ide/pci/pdc202xx_new.c40
-rw-r--r--drivers/ide/pci/pdc202xx_old.c54
-rw-r--r--drivers/ide/pci/piix.c163
-rw-r--r--drivers/ide/pci/scc_pata.c21
-rw-r--r--drivers/ide/pci/serverworks.c31
-rw-r--r--drivers/ide/pci/sgiioc4.c2
-rw-r--r--drivers/ide/pci/siimage.c47
-rw-r--r--drivers/ide/pci/sis5513.c44
-rw-r--r--drivers/ide/pci/slc90e66.c24
-rw-r--r--drivers/ide/pci/tc86c001.c20
-rw-r--r--drivers/ide/pci/triflex.c15
-rw-r--r--drivers/ide/ppc/pmac.c2
-rw-r--r--drivers/ide/setup-pci.c27
-rw-r--r--drivers/ieee1394/Kconfig3
-rw-r--r--drivers/ieee1394/nodemgr.c2
-rw-r--r--drivers/infiniband/Kconfig8
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/umem.c (renamed from drivers/infiniband/core/uverbs_mem.c)153
-rw-r--r--drivers/infiniband/core/uverbs.h6
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c60
-rw-r--r--drivers/infiniband/core/uverbs_main.c11
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c42
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c28
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c69
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c38
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h5
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig9
-rw-r--r--drivers/infiniband/hw/mlx4/Makefile3
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c100
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c525
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c216
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c339
-rw-r--r--drivers/infiniband/hw/mlx4/main.c651
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h285
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c184
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1294
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c334
-rw-r--r--drivers/infiniband/hw/mlx4/user.h92
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c38
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/input/Kconfig1
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/isdn/capi/Kconfig2
-rw-r--r--drivers/isdn/hardware/eicon/divasync.h2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c4
-rw-r--r--drivers/kvm/Kconfig1
-rw-r--r--drivers/kvm/kvm_main.c3
-rw-r--r--drivers/leds/Kconfig1
-rw-r--r--drivers/leds/leds-h1940.c2
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/mca/mca-bus.c28
-rw-r--r--drivers/mca/mca-driver.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-bio-list.h26
-rw-r--r--drivers/md/dm-crypt.c91
-rw-r--r--drivers/md/dm-delay.c383
-rw-r--r--drivers/md/dm-exception-store.c54
-rw-r--r--drivers/md/dm-hw-handler.h1
-rw-r--r--drivers/md/dm-io.c232
-rw-r--r--drivers/md/dm-io.h83
-rw-r--r--drivers/md/dm-log.c77
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c187
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/kcopyd.c28
-rw-r--r--drivers/md/md.c160
-rw-r--r--drivers/md/raid1.c33
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c2
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb/frontends/tda10021.c2
-rw-r--r--drivers/media/dvb/frontends/ves1x93.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/pwc/philips.txt6
-rw-r--r--drivers/media/video/usbvideo/vicam.c2
-rw-r--r--drivers/message/fusion/Kconfig1
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt2
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/i2o/Kconfig1
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/asus-laptop.c66
-rw-r--r--drivers/misc/msi-laptop.c12
-rw-r--r--drivers/misc/sony-laptop.c8
-rw-r--r--drivers/mmc/Kconfig1
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/chips/Kconfig40
-rw-r--r--drivers/mtd/chips/Makefile4
-rw-r--r--drivers/mtd/chips/amd_flash.c1396
-rw-r--r--drivers/mtd/chips/jedec.c935
-rw-r--r--drivers/mtd/chips/sharp.c601
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/maps/Kconfig18
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/arctic-mtd.c145
-rw-r--r--drivers/mtd/maps/beech-mtd.c122
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/mtdpart.c1
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/at91_nand.c10
-rw-r--r--drivers/mtd/nand/cafe_ecc.c1381
-rw-r--r--drivers/mtd/nand/cafe_nand.c (renamed from drivers/mtd/nand/cafe.c)137
-rw-r--r--drivers/mtd/nand/nand_base.c11
-rw-r--r--drivers/mtd/nand/plat_nand.c150
-rw-r--r--drivers/mtd/onenand/onenand_base.c2
-rw-r--r--drivers/net/3c509.c5
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile9
-rw-r--r--drivers/net/atl1/atl1_main.c12
-rw-r--r--drivers/net/atp.c8
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/epic100.c10
-rw-r--r--drivers/net/hamradio/Kconfig2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c2
-rw-r--r--drivers/net/meth.h2
-rw-r--r--drivers/net/mlx4/Makefile4
-rw-r--r--drivers/net/mlx4/alloc.c179
-rw-r--r--drivers/net/mlx4/catas.c70
-rw-r--r--drivers/net/mlx4/cmd.c429
-rw-r--r--drivers/net/mlx4/cq.c254
-rw-r--r--drivers/net/mlx4/eq.c696
-rw-r--r--drivers/net/mlx4/fw.c775
-rw-r--r--drivers/net/mlx4/fw.h167
-rw-r--r--drivers/net/mlx4/icm.c379
-rw-r--r--drivers/net/mlx4/icm.h135
-rw-r--r--drivers/net/mlx4/intf.c165
-rw-r--r--drivers/net/mlx4/main.c936
-rw-r--r--drivers/net/mlx4/mcg.c380
-rw-r--r--drivers/net/mlx4/mlx4.h348
-rw-r--r--drivers/net/mlx4/mr.c479
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/profile.c238
-rw-r--r--drivers/net/mlx4/qp.c280
-rw-r--r--drivers/net/mlx4/reset.c181
-rw-r--r--drivers/net/mlx4/srq.c227
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/ne2k-pci.c3
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c14
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/skge.c4
-rw-r--r--drivers/net/sundance.c3
-rw-r--r--drivers/net/tg3.c11
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/Kconfig (renamed from drivers/usb/net/Kconfig)0
-rw-r--r--drivers/net/usb/Makefile (renamed from drivers/usb/net/Makefile)0
-rw-r--r--drivers/net/usb/asix.c (renamed from drivers/usb/net/asix.c)0
-rw-r--r--drivers/net/usb/catc.c (renamed from drivers/usb/net/catc.c)0
-rw-r--r--drivers/net/usb/cdc_ether.c (renamed from drivers/usb/net/cdc_ether.c)0
-rw-r--r--drivers/net/usb/cdc_subset.c (renamed from drivers/usb/net/cdc_subset.c)0
-rw-r--r--drivers/net/usb/dm9601.c (renamed from drivers/usb/net/dm9601.c)0
-rw-r--r--drivers/net/usb/gl620a.c (renamed from drivers/usb/net/gl620a.c)0
-rw-r--r--drivers/net/usb/kaweth.c (renamed from drivers/usb/net/kaweth.c)0
-rw-r--r--drivers/net/usb/kawethfw.h (renamed from drivers/usb/net/kawethfw.h)0
-rw-r--r--drivers/net/usb/mcs7830.c (renamed from drivers/usb/net/mcs7830.c)0
-rw-r--r--drivers/net/usb/net1080.c (renamed from drivers/usb/net/net1080.c)0
-rw-r--r--drivers/net/usb/pegasus.c (renamed from drivers/usb/net/pegasus.c)0
-rw-r--r--drivers/net/usb/pegasus.h (renamed from drivers/usb/net/pegasus.h)0
-rw-r--r--drivers/net/usb/plusb.c (renamed from drivers/usb/net/plusb.c)0
-rw-r--r--drivers/net/usb/rndis_host.c (renamed from drivers/usb/net/rndis_host.c)0
-rw-r--r--drivers/net/usb/rtl8150.c (renamed from drivers/usb/net/rtl8150.c)0
-rw-r--r--drivers/net/usb/usbnet.c (renamed from drivers/usb/net/usbnet.c)0
-rw-r--r--drivers/net/usb/usbnet.h (renamed from drivers/usb/net/usbnet.h)2
-rw-r--r--drivers/net/usb/zaurus.c (renamed from drivers/usb/net/zaurus.c)0
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/airport.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h18
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c81
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h19
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/wavelan_cs.c4
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/yellowfin.c1
-rw-r--r--drivers/parport/Kconfig1
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pnp/Kconfig1
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-sh.c8
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/char/Kconfig (renamed from drivers/s390/Kconfig)111
-rw-r--r--drivers/s390/char/monreader.c14
-rw-r--r--drivers/s390/char/raw3270.c5
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c8
-rw-r--r--drivers/s390/char/zcore.c9
-rw-r--r--drivers/s390/cio/css.c3
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_ops.c11
-rw-r--r--drivers/s390/cio/qdio.c1
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/net/qeth_main.c2
-rw-r--r--drivers/s390/net/qeth_mpc.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c8
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/sbus/char/bpp.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c2
-rw-r--r--drivers/scsi/aic94xx/Makefile2
-rw-r--r--drivers/scsi/dc395x.c6
-rw-r--r--drivers/scsi/ide-scsi.c30
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/serial/Kconfig1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/atmel_spi.c5
-rw-r--r--drivers/telephony/Kconfig1
-rw-r--r--drivers/usb/Kconfig3
-rw-r--r--drivers/usb/Makefile7
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/misc/auerswald.c2
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/aircable.c4
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/video/Kconfig29
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/arkfb.c1200
-rw-r--r--drivers/video/console/softcursor.c2
-rw-r--r--drivers/video/fbmem.c4
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/matrox/matroxfb_misc.c2
-rw-r--r--drivers/video/nvidia/nv_hw.c7
-rw-r--r--drivers/video/nvidia/nvidia.c1
-rw-r--r--drivers/video/s3fb.c19
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/svgalib.c17
-rw-r--r--drivers/video/vt8623fb.c927
-rw-r--r--drivers/w1/Kconfig1
422 files changed, 24690 insertions, 7901 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 26ca9031ea49..adad2f3d438a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_FC4) += fc4/
36obj-$(CONFIG_SCSI) += scsi/ 36obj-$(CONFIG_SCSI) += scsi/
37obj-$(CONFIG_ATA) += ata/ 37obj-$(CONFIG_ATA) += ata/
38obj-$(CONFIG_FUSION) += message/ 38obj-$(CONFIG_FUSION) += message/
39obj-$(CONFIG_FIREWIRE) += firewire/
39obj-$(CONFIG_IEEE1394) += ieee1394/ 40obj-$(CONFIG_IEEE1394) += ieee1394/
40obj-y += cdrom/ 41obj-y += cdrom/
41obj-y += auxdisplay/ 42obj-y += auxdisplay/
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 1683e5c5b94c..1cbe61905824 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -231,8 +231,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
231 * Obtain the method mutex if necessary. Do not acquire mutex for a 231 * Obtain the method mutex if necessary. Do not acquire mutex for a
232 * recursive call. 232 * recursive call.
233 */ 233 */
234 if (acpi_os_get_thread_id() != 234 if (!walk_state ||
235 obj_desc->method.mutex->mutex.owner_thread_id) { 235 !obj_desc->method.mutex->mutex.owner_thread ||
236 (walk_state->thread !=
237 obj_desc->method.mutex->mutex.owner_thread)) {
236 /* 238 /*
237 * Acquire the method mutex. This releases the interpreter if we 239 * Acquire the method mutex. This releases the interpreter if we
238 * block (and reacquires it before it returns) 240 * block (and reacquires it before it returns)
@@ -246,14 +248,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
246 } 248 }
247 249
248 /* Update the mutex and walk info and save the original sync_level */ 250 /* Update the mutex and walk info and save the original sync_level */
249 obj_desc->method.mutex->mutex.owner_thread_id =
250 acpi_os_get_thread_id();
251 251
252 if (walk_state) { 252 if (walk_state) {
253 obj_desc->method.mutex->mutex. 253 obj_desc->method.mutex->mutex.
254 original_sync_level = 254 original_sync_level =
255 walk_state->thread->current_sync_level; 255 walk_state->thread->current_sync_level;
256 256
257 obj_desc->method.mutex->mutex.owner_thread =
258 walk_state->thread;
257 walk_state->thread->current_sync_level = 259 walk_state->thread->current_sync_level =
258 obj_desc->method.sync_level; 260 obj_desc->method.sync_level;
259 } else { 261 } else {
@@ -567,7 +569,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
567 569
568 acpi_os_release_mutex(method_desc->method.mutex->mutex. 570 acpi_os_release_mutex(method_desc->method.mutex->mutex.
569 os_mutex); 571 os_mutex);
570 method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; 572 method_desc->method.mutex->mutex.owner_thread = NULL;
571 } 573 }
572 } 574 }
573 575
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6c6104a7a247..fc9da4879cbf 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -866,8 +866,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
866 ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && 866 ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
867 (op->common.parent->common.aml_opcode != 867 (op->common.parent->common.aml_opcode !=
868 AML_VAR_PACKAGE_OP) 868 AML_VAR_PACKAGE_OP)
869 && (op->common.parent->common.aml_opcode != 869 && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
870 AML_NAME_OP))) {
871 walk_state->result_obj = obj_desc; 870 walk_state->result_obj = obj_desc;
872 } 871 }
873 } 872 }
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index e4073e05a75c..71503c036f7c 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -556,10 +556,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
556 * indicate this to the interpreter, set the 556 * indicate this to the interpreter, set the
557 * object to the root 557 * object to the root
558 */ 558 */
559 obj_desc = 559 obj_desc = ACPI_CAST_PTR(union
560 ACPI_CAST_PTR(union 560 acpi_operand_object,
561 acpi_operand_object, 561 acpi_gbl_root_node);
562 acpi_gbl_root_node);
563 status = AE_OK; 562 status = AE_OK;
564 } else { 563 } else {
565 /* 564 /*
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 16c8e38b51ef..5afcdd9c7449 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -630,12 +630,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
630 * 630 *
631 ******************************************************************************/ 631 ******************************************************************************/
632 632
633struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, 633struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
634 union acpi_parse_object 634 *origin, union acpi_operand_object
635 *origin, 635 *method_desc, struct acpi_thread_state
636 union acpi_operand_object
637 *method_desc,
638 struct acpi_thread_state
639 *thread) 636 *thread)
640{ 637{
641 struct acpi_walk_state *walk_state; 638 struct acpi_walk_state *walk_state;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e08cf98f504f..82f496c07675 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -147,9 +147,10 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event,
147 return 0; 147 return 0;
148} 148}
149 149
150static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count) 150static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event,
151 unsigned count, int force_poll)
151{ 152{
152 if (acpi_ec_mode == EC_POLL) { 153 if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) {
153 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 154 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
154 while (time_before(jiffies, delay)) { 155 while (time_before(jiffies, delay)) {
155 if (acpi_ec_check_status(ec, event, 0)) 156 if (acpi_ec_check_status(ec, event, 0))
@@ -173,14 +174,15 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count)
173 174
174static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, 175static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
175 const u8 * wdata, unsigned wdata_len, 176 const u8 * wdata, unsigned wdata_len,
176 u8 * rdata, unsigned rdata_len) 177 u8 * rdata, unsigned rdata_len,
178 int force_poll)
177{ 179{
178 int result = 0; 180 int result = 0;
179 unsigned count = atomic_read(&ec->event_count); 181 unsigned count = atomic_read(&ec->event_count);
180 acpi_ec_write_cmd(ec, command); 182 acpi_ec_write_cmd(ec, command);
181 183
182 for (; wdata_len > 0; --wdata_len) { 184 for (; wdata_len > 0; --wdata_len) {
183 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); 185 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
184 if (result) { 186 if (result) {
185 printk(KERN_ERR PREFIX 187 printk(KERN_ERR PREFIX
186 "write_cmd timeout, command = %d\n", command); 188 "write_cmd timeout, command = %d\n", command);
@@ -191,7 +193,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
191 } 193 }
192 194
193 if (!rdata_len) { 195 if (!rdata_len) {
194 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); 196 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
195 if (result) { 197 if (result) {
196 printk(KERN_ERR PREFIX 198 printk(KERN_ERR PREFIX
197 "finish-write timeout, command = %d\n", command); 199 "finish-write timeout, command = %d\n", command);
@@ -202,7 +204,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
202 } 204 }
203 205
204 for (; rdata_len > 0; --rdata_len) { 206 for (; rdata_len > 0; --rdata_len) {
205 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count); 207 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll);
206 if (result) { 208 if (result) {
207 printk(KERN_ERR PREFIX "read timeout, command = %d\n", 209 printk(KERN_ERR PREFIX "read timeout, command = %d\n",
208 command); 210 command);
@@ -217,7 +219,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
217 219
218static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, 220static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
219 const u8 * wdata, unsigned wdata_len, 221 const u8 * wdata, unsigned wdata_len,
220 u8 * rdata, unsigned rdata_len) 222 u8 * rdata, unsigned rdata_len,
223 int force_poll)
221{ 224{
222 int status; 225 int status;
223 u32 glk; 226 u32 glk;
@@ -240,7 +243,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
240 /* Make sure GPE is enabled before doing transaction */ 243 /* Make sure GPE is enabled before doing transaction */
241 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 244 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
242 245
243 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0); 246 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
244 if (status) { 247 if (status) {
245 printk(KERN_DEBUG PREFIX 248 printk(KERN_DEBUG PREFIX
246 "input buffer is not empty, aborting transaction\n"); 249 "input buffer is not empty, aborting transaction\n");
@@ -249,7 +252,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
249 252
250 status = acpi_ec_transaction_unlocked(ec, command, 253 status = acpi_ec_transaction_unlocked(ec, command,
251 wdata, wdata_len, 254 wdata, wdata_len,
252 rdata, rdata_len); 255 rdata, rdata_len,
256 force_poll);
253 257
254 end: 258 end:
255 259
@@ -267,12 +271,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
267int acpi_ec_burst_enable(struct acpi_ec *ec) 271int acpi_ec_burst_enable(struct acpi_ec *ec)
268{ 272{
269 u8 d; 273 u8 d;
270 return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1); 274 return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0);
271} 275}
272 276
273int acpi_ec_burst_disable(struct acpi_ec *ec) 277int acpi_ec_burst_disable(struct acpi_ec *ec)
274{ 278{
275 return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0); 279 return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
276} 280}
277 281
278static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) 282static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
@@ -281,7 +285,7 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
281 u8 d; 285 u8 d;
282 286
283 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ, 287 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
284 &address, 1, &d, 1); 288 &address, 1, &d, 1, 0);
285 *data = d; 289 *data = d;
286 return result; 290 return result;
287} 291}
@@ -290,7 +294,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
290{ 294{
291 u8 wdata[2] = { address, data }; 295 u8 wdata[2] = { address, data };
292 return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE, 296 return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
293 wdata, 2, NULL, 0); 297 wdata, 2, NULL, 0, 0);
294} 298}
295 299
296/* 300/*
@@ -349,13 +353,15 @@ EXPORT_SYMBOL(ec_write);
349 353
350int ec_transaction(u8 command, 354int ec_transaction(u8 command,
351 const u8 * wdata, unsigned wdata_len, 355 const u8 * wdata, unsigned wdata_len,
352 u8 * rdata, unsigned rdata_len) 356 u8 * rdata, unsigned rdata_len,
357 int force_poll)
353{ 358{
354 if (!first_ec) 359 if (!first_ec)
355 return -ENODEV; 360 return -ENODEV;
356 361
357 return acpi_ec_transaction(first_ec, command, wdata, 362 return acpi_ec_transaction(first_ec, command, wdata,
358 wdata_len, rdata, rdata_len); 363 wdata_len, rdata, rdata_len,
364 force_poll);
359} 365}
360 366
361EXPORT_SYMBOL(ec_transaction); 367EXPORT_SYMBOL(ec_transaction);
@@ -374,7 +380,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
374 * bit to be cleared (and thus clearing the interrupt source). 380 * bit to be cleared (and thus clearing the interrupt source).
375 */ 381 */
376 382
377 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1); 383 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0);
378 if (result) 384 if (result)
379 return result; 385 return result;
380 386
@@ -410,6 +416,7 @@ static u32 acpi_ec_gpe_handler(void *data)
410 acpi_status status = AE_OK; 416 acpi_status status = AE_OK;
411 u8 value; 417 u8 value;
412 struct acpi_ec *ec = data; 418 struct acpi_ec *ec = data;
419
413 atomic_inc(&ec->event_count); 420 atomic_inc(&ec->event_count);
414 421
415 if (acpi_ec_mode == EC_INTR) { 422 if (acpi_ec_mode == EC_INTR) {
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index 635ba449ebc2..e22f4a973c0f 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -341,9 +341,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
341 341
342 /* A Non-NULL gpe_device means this is a GPE Block Device */ 342 /* A Non-NULL gpe_device means this is a GPE Block Device */
343 343
344 obj_desc = 344 obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
345 acpi_ns_get_attached_object((struct acpi_namespace_node *) 345 gpe_device);
346 gpe_device);
347 if (!obj_desc || !obj_desc->device.gpe_block) { 346 if (!obj_desc || !obj_desc->device.gpe_block) {
348 return (NULL); 347 return (NULL);
349 } 348 }
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index ad5bc76edf46..902c287b3a4f 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -1033,8 +1033,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1033 1033
1034 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 1034 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
1035 ACPI_GPE_DISPATCH_METHOD) 1035 ACPI_GPE_DISPATCH_METHOD)
1036 && (gpe_event_info-> 1036 && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
1037 flags & ACPI_GPE_TYPE_RUNTIME)) {
1038 gpe_enabled_count++; 1037 gpe_enabled_count++;
1039 } 1038 }
1040 1039
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index cae786ca8600..21cb749d0c75 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -196,15 +196,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
196 notify_info->notify.value = (u16) notify_value; 196 notify_info->notify.value = (u16) notify_value;
197 notify_info->notify.handler_obj = handler_obj; 197 notify_info->notify.handler_obj = handler_obj;
198 198
199 acpi_ex_exit_interpreter(); 199 status =
200 200 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
201 acpi_ev_notify_dispatch(notify_info); 201 notify_info);
202
203 status = acpi_ex_enter_interpreter();
204 if (ACPI_FAILURE(status)) { 202 if (ACPI_FAILURE(status)) {
205 return_ACPI_STATUS(status); 203 acpi_ut_delete_generic_state(notify_info);
206 } 204 }
207
208 } 205 }
209 206
210 if (!handler_obj) { 207 if (!handler_obj) {
@@ -323,8 +320,9 @@ static u32 acpi_ev_global_lock_handler(void *context)
323 acpi_gbl_global_lock_acquired = TRUE; 320 acpi_gbl_global_lock_acquired = TRUE;
324 /* Send a unit to the semaphore */ 321 /* Send a unit to the semaphore */
325 322
326 if (ACPI_FAILURE(acpi_os_signal_semaphore( 323 if (ACPI_FAILURE
327 acpi_gbl_global_lock_semaphore, 1))) { 324 (acpi_os_signal_semaphore
325 (acpi_gbl_global_lock_semaphore, 1))) {
328 ACPI_ERROR((AE_INFO, 326 ACPI_ERROR((AE_INFO,
329 "Could not signal Global Lock semaphore")); 327 "Could not signal Global Lock semaphore"));
330 } 328 }
@@ -450,7 +448,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
450 } 448 }
451 449
452 if (ACPI_FAILURE(status)) { 450 if (ACPI_FAILURE(status)) {
453 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout); 451 status =
452 acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex,
453 timeout);
454 } 454 }
455 if (ACPI_FAILURE(status)) { 455 if (ACPI_FAILURE(status)) {
456 return_ACPI_STATUS(status); 456 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 96b0e8431748..e99f0c435a47 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
291 u32 bit_width, acpi_integer * value) 291 u32 bit_width, acpi_integer * value)
292{ 292{
293 acpi_status status; 293 acpi_status status;
294 acpi_status status2;
295 acpi_adr_space_handler handler; 294 acpi_adr_space_handler handler;
296 acpi_adr_space_setup region_setup; 295 acpi_adr_space_setup region_setup;
297 union acpi_operand_object *handler_desc; 296 union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
345 * setup will potentially execute control methods 344 * setup will potentially execute control methods
346 * (e.g., _REG method for this region) 345 * (e.g., _REG method for this region)
347 */ 346 */
348 acpi_ex_exit_interpreter(); 347 acpi_ex_relinquish_interpreter();
349 348
350 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 349 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
351 handler_desc->address_space.context, 350 handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 352
354 /* Re-enter the interpreter */ 353 /* Re-enter the interpreter */
355 354
356 status2 = acpi_ex_enter_interpreter(); 355 acpi_ex_reacquire_interpreter();
357 if (ACPI_FAILURE(status2)) {
358 return_ACPI_STATUS(status2);
359 }
360 356
361 /* Check for failure of the Region Setup */ 357 /* Check for failure of the Region Setup */
362 358
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
409 * exit the interpreter because the handler *might* block -- we don't 405 * exit the interpreter because the handler *might* block -- we don't
410 * know what it will do, so we can't hold the lock on the intepreter. 406 * know what it will do, so we can't hold the lock on the intepreter.
411 */ 407 */
412 acpi_ex_exit_interpreter(); 408 acpi_ex_relinquish_interpreter();
413 } 409 }
414 410
415 /* Call the handler */ 411 /* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
430 * We just returned from a non-default handler, we must re-enter the 426 * We just returned from a non-default handler, we must re-enter the
431 * interpreter 427 * interpreter
432 */ 428 */
433 status2 = acpi_ex_enter_interpreter(); 429 acpi_ex_reacquire_interpreter();
434 if (ACPI_FAILURE(status2)) {
435 return_ACPI_STATUS(status2);
436 }
437 } 430 }
438 431
439 return_ACPI_STATUS(status); 432 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index a4fa7e6822a3..400d90fca966 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -228,7 +228,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
228 228
229 /* Install a handler for this PCI root bridge */ 229 /* Install a handler for this PCI root bridge */
230 230
231 status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 231 status =
232 acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
232 if (ACPI_FAILURE(status)) { 233 if (ACPI_FAILURE(status)) {
233 if (status == AE_SAME_HANDLER) { 234 if (status == AE_SAME_HANDLER) {
234 /* 235 /*
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index a3379bafa676..6d866a01f5f4 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -91,7 +91,6 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
91 91
92ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) 92ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
93#endif /* ACPI_FUTURE_USAGE */ 93#endif /* ACPI_FUTURE_USAGE */
94
95/******************************************************************************* 94/*******************************************************************************
96 * 95 *
97 * FUNCTION: acpi_install_fixed_event_handler 96 * FUNCTION: acpi_install_fixed_event_handler
@@ -768,11 +767,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
768 return (AE_BAD_PARAMETER); 767 return (AE_BAD_PARAMETER);
769 } 768 }
770 769
771 status = acpi_ex_enter_interpreter(); 770 /* Must lock interpreter to prevent race conditions */
772 if (ACPI_FAILURE(status)) {
773 return (status);
774 }
775 771
772 acpi_ex_enter_interpreter();
776 status = acpi_ev_acquire_global_lock(timeout); 773 status = acpi_ev_acquire_global_lock(timeout);
777 acpi_ex_exit_interpreter(); 774 acpi_ex_exit_interpreter();
778 775
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 17065e98807c..9cbd3414a574 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
472} 472}
473 473
474ACPI_EXPORT_SYMBOL(acpi_clear_gpe) 474ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
475
476#ifdef ACPI_FUTURE_USAGE 475#ifdef ACPI_FUTURE_USAGE
477/******************************************************************************* 476/*******************************************************************************
478 * 477 *
@@ -568,7 +567,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
568 567
569ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 568ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
570#endif /* ACPI_FUTURE_USAGE */ 569#endif /* ACPI_FUTURE_USAGE */
571
572/******************************************************************************* 570/*******************************************************************************
573 * 571 *
574 * FUNCTION: acpi_install_gpe_block 572 * FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index d470e8b1f4ea..79f2c0d42c06 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -512,9 +512,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
512 * Create a new string object and string buffer 512 * Create a new string object and string buffer
513 * (-1 because of extra separator included in string_length from above) 513 * (-1 because of extra separator included in string_length from above)
514 */ 514 */
515 return_desc = 515 return_desc = acpi_ut_create_string_object((acpi_size)
516 acpi_ut_create_string_object((acpi_size) 516 (string_length - 1));
517 (string_length - 1));
518 if (!return_desc) { 517 if (!return_desc) {
519 return_ACPI_STATUS(AE_NO_MEMORY); 518 return_ACPI_STATUS(AE_NO_MEMORY);
520 } 519 }
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index ae97812681a3..6e9a23e47fef 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -50,7 +50,6 @@
50 50
51#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("excreate") 52ACPI_MODULE_NAME("excreate")
53
54#ifndef ACPI_NO_METHOD_EXECUTION 53#ifndef ACPI_NO_METHOD_EXECUTION
55/******************************************************************************* 54/*******************************************************************************
56 * 55 *
@@ -583,10 +582,7 @@ acpi_ex_create_method(u8 * aml_start,
583 * Get the sync_level. If method is serialized, a mutex will be 582 * Get the sync_level. If method is serialized, a mutex will be
584 * created for this method when it is parsed. 583 * created for this method when it is parsed.
585 */ 584 */
586 if (acpi_gbl_all_methods_serialized) { 585 if (method_flags & AML_METHOD_SERIALIZED) {
587 obj_desc->method.sync_level = 0;
588 obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
589 } else if (method_flags & AML_METHOD_SERIALIZED) {
590 /* 586 /*
591 * ACPI 1.0: sync_level = 0 587 * ACPI 1.0: sync_level = 0
592 * ACPI 2.0: sync_level = sync_level in method declaration 588 * ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 1a73c14df2c5..51c9c29987c3 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = {
134static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { 134static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
135 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, 135 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
136 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, 136 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
137 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"}, 137 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
138 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), 138 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
139 "Acquire Depth"}, 139 "Acquire Depth"},
140 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} 140 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
@@ -451,9 +451,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
451 451
452 ACPI_FUNCTION_NAME(ex_dump_operand) 452 ACPI_FUNCTION_NAME(ex_dump_operand)
453 453
454 if (! 454 if (!((ACPI_LV_EXEC & acpi_dbg_level)
455 ((ACPI_LV_EXEC & acpi_dbg_level) 455 && (_COMPONENT & acpi_dbg_layer))) {
456 && (_COMPONENT & acpi_dbg_layer))) {
457 return; 456 return;
458 } 457 }
459 458
@@ -844,9 +843,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
844 ACPI_FUNCTION_ENTRY(); 843 ACPI_FUNCTION_ENTRY();
845 844
846 if (!flags) { 845 if (!flags) {
847 if (! 846 if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
848 ((ACPI_LV_OBJECTS & acpi_dbg_level) 847 && (_COMPONENT & acpi_dbg_layer))) {
849 && (_COMPONENT & acpi_dbg_layer))) {
850 return; 848 return;
851 } 849 }
852 } 850 }
@@ -1011,9 +1009,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
1011 } 1009 }
1012 1010
1013 if (!flags) { 1011 if (!flags) {
1014 if (! 1012 if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
1015 ((ACPI_LV_OBJECTS & acpi_dbg_level) 1013 && (_COMPONENT & acpi_dbg_layer))) {
1016 && (_COMPONENT & acpi_dbg_layer))) {
1017 return_VOID; 1014 return_VOID;
1018 } 1015 }
1019 } 1016 }
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index 4eb883bda6ae..6748e3ef0997 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -66,9 +66,10 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
66 * 66 *
67 ******************************************************************************/ 67 ******************************************************************************/
68 68
69void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc, 69void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
70 struct acpi_thread_state *thread)
71{ 70{
71 struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
72
72 if (!thread) { 73 if (!thread) {
73 return; 74 return;
74 } 75 }
@@ -173,13 +174,16 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
173 174
174 /* Support for multiple acquires by the owning thread */ 175 /* Support for multiple acquires by the owning thread */
175 176
176 if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) { 177 if (obj_desc->mutex.owner_thread) {
177 /* 178 if (obj_desc->mutex.owner_thread->thread_id ==
178 * The mutex is already owned by this thread, just increment the 179 walk_state->thread->thread_id) {
179 * acquisition depth 180 /*
180 */ 181 * The mutex is already owned by this thread, just increment the
181 obj_desc->mutex.acquisition_depth++; 182 * acquisition depth
182 return_ACPI_STATUS(AE_OK); 183 */
184 obj_desc->mutex.acquisition_depth++;
185 return_ACPI_STATUS(AE_OK);
186 }
183 } 187 }
184 188
185 /* Acquire the mutex, wait if necessary. Special case for Global Lock */ 189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
@@ -202,7 +206,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
202 206
203 /* Have the mutex: update mutex and walk info and save the sync_level */ 207 /* Have the mutex: update mutex and walk info and save the sync_level */
204 208
205 obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id(); 209 obj_desc->mutex.owner_thread = walk_state->thread;
206 obj_desc->mutex.acquisition_depth = 1; 210 obj_desc->mutex.acquisition_depth = 1;
207 obj_desc->mutex.original_sync_level = 211 obj_desc->mutex.original_sync_level =
208 walk_state->thread->current_sync_level; 212 walk_state->thread->current_sync_level;
@@ -242,7 +246,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
242 246
243 /* The mutex must have been previously acquired in order to release it */ 247 /* The mutex must have been previously acquired in order to release it */
244 248
245 if (!obj_desc->mutex.owner_thread_id) { 249 if (!obj_desc->mutex.owner_thread) {
246 ACPI_ERROR((AE_INFO, 250 ACPI_ERROR((AE_INFO,
247 "Cannot release Mutex [%4.4s], not acquired", 251 "Cannot release Mutex [%4.4s], not acquired",
248 acpi_ut_get_node_name(obj_desc->mutex.node))); 252 acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -262,14 +266,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
262 * The Mutex is owned, but this thread must be the owner. 266 * The Mutex is owned, but this thread must be the owner.
263 * Special case for Global Lock, any thread can release 267 * Special case for Global Lock, any thread can release
264 */ 268 */
265 if ((obj_desc->mutex.owner_thread_id != 269 if ((obj_desc->mutex.owner_thread->thread_id !=
266 walk_state->thread->thread_id) 270 walk_state->thread->thread_id)
267 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) { 271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
268 ACPI_ERROR((AE_INFO, 272 ACPI_ERROR((AE_INFO,
269 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
270 (unsigned long)walk_state->thread->thread_id, 274 (unsigned long)walk_state->thread->thread_id,
271 acpi_ut_get_node_name(obj_desc->mutex.node), 275 acpi_ut_get_node_name(obj_desc->mutex.node),
272 (unsigned long)obj_desc->mutex.owner_thread_id)); 276 (unsigned long)obj_desc->mutex.owner_thread->
277 thread_id));
273 return_ACPI_STATUS(AE_AML_NOT_OWNER); 278 return_ACPI_STATUS(AE_AML_NOT_OWNER);
274 } 279 }
275 280
@@ -296,7 +301,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
296 301
297 /* Unlink the mutex from the owner's list */ 302 /* Unlink the mutex from the owner's list */
298 303
299 acpi_ex_unlink_mutex(obj_desc, walk_state->thread); 304 acpi_ex_unlink_mutex(obj_desc);
300 305
301 /* Release the mutex, special case for Global Lock */ 306 /* Release the mutex, special case for Global Lock */
302 307
@@ -308,7 +313,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
308 313
309 /* Update the mutex and restore sync_level */ 314 /* Update the mutex and restore sync_level */
310 315
311 obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; 316 obj_desc->mutex.owner_thread = NULL;
312 walk_state->thread->current_sync_level = 317 walk_state->thread->current_sync_level =
313 obj_desc->mutex.original_sync_level; 318 obj_desc->mutex.original_sync_level;
314 319
@@ -363,7 +368,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
363 368
364 /* Mark mutex unowned */ 369 /* Mark mutex unowned */
365 370
366 obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; 371 obj_desc->mutex.owner_thread = NULL;
367 372
368 /* Update Thread sync_level (Last mutex is the important one) */ 373 /* Update Thread sync_level (Last mutex is the important one) */
369 374
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index 1ee4fb1175c6..308eae52dc05 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -177,8 +177,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
177 177
178 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); 178 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
179 179
180 for (index = 0; 180 for (index = 0; (index < ACPI_NAME_SIZE)
181 (index < ACPI_NAME_SIZE)
182 && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { 181 && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
183 char_buf[index] = *aml_address++; 182 char_buf[index] = *aml_address++;
184 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); 183 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index a6696621ff1b..efe5d4b461a4 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -242,7 +242,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
242 obj_desc->common_field.bit_length, 242 obj_desc->common_field.bit_length,
243 0xFFFFFFFF 243 0xFFFFFFFF
244 /* Temp until we pass region_length as parameter */ 244 /* Temp until we pass region_length as parameter */
245 ); 245 );
246 bit_length = byte_alignment * 8; 246 bit_length = byte_alignment * 8;
247#endif 247#endif
248 248
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index ba761862a599..09d897b3f6d5 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -354,8 +354,7 @@ acpi_ex_resolve_operands(u16 opcode,
354 if ((opcode == AML_STORE_OP) && 354 if ((opcode == AML_STORE_OP) &&
355 (ACPI_GET_OBJECT_TYPE(*stack_ptr) == 355 (ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
356 ACPI_TYPE_LOCAL_REFERENCE) 356 ACPI_TYPE_LOCAL_REFERENCE)
357 && ((*stack_ptr)->reference.opcode == 357 && ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) {
358 AML_INDEX_OP)) {
359 goto next_operand; 358 goto next_operand;
360 } 359 }
361 break; 360 break;
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index b2edf620ba89..9460baff3032 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) 66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
67{ 67{
68 acpi_status status; 68 acpi_status status;
69 acpi_status status2;
70 69
71 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); 70 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
72 71
@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
79 78
80 /* We must wait, so unlock the interpreter */ 79 /* We must wait, so unlock the interpreter */
81 80
82 acpi_ex_exit_interpreter(); 81 acpi_ex_relinquish_interpreter();
83 82
84 status = acpi_os_wait_semaphore(semaphore, 1, timeout); 83 status = acpi_os_wait_semaphore(semaphore, 1, timeout);
85 84
@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
89 88
90 /* Reacquire the interpreter */ 89 /* Reacquire the interpreter */
91 90
92 status2 = acpi_ex_enter_interpreter(); 91 acpi_ex_reacquire_interpreter();
93 if (ACPI_FAILURE(status2)) {
94
95 /* Report fatal error, could not acquire interpreter */
96
97 return_ACPI_STATUS(status2);
98 }
99 } 92 }
100 93
101 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
119acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) 112acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
120{ 113{
121 acpi_status status; 114 acpi_status status;
122 acpi_status status2;
123 115
124 ACPI_FUNCTION_TRACE(ex_system_wait_mutex); 116 ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
125 117
@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
132 124
133 /* We must wait, so unlock the interpreter */ 125 /* We must wait, so unlock the interpreter */
134 126
135 acpi_ex_exit_interpreter(); 127 acpi_ex_relinquish_interpreter();
136 128
137 status = acpi_os_acquire_mutex(mutex, timeout); 129 status = acpi_os_acquire_mutex(mutex, timeout);
138 130
@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
142 134
143 /* Reacquire the interpreter */ 135 /* Reacquire the interpreter */
144 136
145 status2 = acpi_ex_enter_interpreter(); 137 acpi_ex_reacquire_interpreter();
146 if (ACPI_FAILURE(status2)) {
147
148 /* Report fatal error, could not acquire interpreter */
149
150 return_ACPI_STATUS(status2);
151 }
152 } 138 }
153 139
154 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
@@ -209,20 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
209 195
210acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) 196acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
211{ 197{
212 acpi_status status;
213
214 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
215 199
216 /* Since this thread will sleep, we must release the interpreter */ 200 /* Since this thread will sleep, we must release the interpreter */
217 201
218 acpi_ex_exit_interpreter(); 202 acpi_ex_relinquish_interpreter();
219 203
220 acpi_os_sleep(how_long); 204 acpi_os_sleep(how_long);
221 205
222 /* And now we must get the interpreter again */ 206 /* And now we must get the interpreter again */
223 207
224 status = acpi_ex_enter_interpreter(); 208 acpi_ex_reacquire_interpreter();
225 return (status); 209 return (AE_OK);
226} 210}
227 211
228/******************************************************************************* 212/*******************************************************************************
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index aea461f3a48c..6b0aeccbb69b 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
76 * 76 *
77 * PARAMETERS: None 77 * PARAMETERS: None
78 * 78 *
79 * RETURN: Status 79 * RETURN: None
80 * 80 *
81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter 81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter
82 * the interpreter region is a fatal system error 82 * the interpreter region is a fatal system error. Used in
83 * conjunction with exit_interpreter.
83 * 84 *
84 ******************************************************************************/ 85 ******************************************************************************/
85 86
86acpi_status acpi_ex_enter_interpreter(void) 87void acpi_ex_enter_interpreter(void)
87{ 88{
88 acpi_status status; 89 acpi_status status;
89 90
@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
91 92
92 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); 93 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
93 if (ACPI_FAILURE(status)) { 94 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); 95 ACPI_ERROR((AE_INFO,
96 "Could not acquire AML Interpreter mutex"));
95 } 97 }
96 98
97 return_ACPI_STATUS(status); 99 return_VOID;
98} 100}
99 101
100/******************************************************************************* 102/*******************************************************************************
101 * 103 *
102 * FUNCTION: acpi_ex_exit_interpreter 104 * FUNCTION: acpi_ex_reacquire_interpreter
103 * 105 *
104 * PARAMETERS: None 106 * PARAMETERS: None
105 * 107 *
106 * RETURN: None 108 * RETURN: None
107 * 109 *
108 * DESCRIPTION: Exit the interpreter execution region 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with
113 * relinquish_interpreter
114 *
115 ******************************************************************************/
116
117void acpi_ex_reacquire_interpreter(void)
118{
119 ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
120
121 /*
122 * If the global serialized flag is set, do not release the interpreter,
123 * since it was not actually released by acpi_ex_relinquish_interpreter.
124 * This forces the interpreter to be single threaded.
125 */
126 if (!acpi_gbl_all_methods_serialized) {
127 acpi_ex_enter_interpreter();
128 }
129
130 return_VOID;
131}
132
133/*******************************************************************************
134 *
135 * FUNCTION: acpi_ex_exit_interpreter
136 *
137 * PARAMETERS: None
138 *
139 * RETURN: None
109 * 140 *
110 * Cases where the interpreter is unlocked: 141 * DESCRIPTION: Exit the interpreter execution region. This is the top level
111 * 1) Completion of the execution of a control method 142 * routine used to exit the interpreter when all processing has
112 * 2) Method blocked on a Sleep() AML opcode 143 * been completed.
113 * 3) Method blocked on an Acquire() AML opcode
114 * 4) Method blocked on a Wait() AML opcode
115 * 5) Method blocked to acquire the global lock
116 * 6) Method blocked to execute a serialized control method that is
117 * already executing
118 * 7) About to invoke a user-installed opregion handler
119 * 144 *
120 ******************************************************************************/ 145 ******************************************************************************/
121 146
@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
127 152
128 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); 153 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
129 if (ACPI_FAILURE(status)) { 154 if (ACPI_FAILURE(status)) {
130 ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); 155 ACPI_ERROR((AE_INFO,
156 "Could not release AML Interpreter mutex"));
157 }
158
159 return_VOID;
160}
161
162/*******************************************************************************
163 *
164 * FUNCTION: acpi_ex_relinquish_interpreter
165 *
166 * PARAMETERS: None
167 *
168 * RETURN: None
169 *
170 * DESCRIPTION: Exit the interpreter execution region, from within the
171 * interpreter - before attempting an operation that will possibly
172 * block the running thread.
173 *
174 * Cases where the interpreter is unlocked internally
175 * 1) Method to be blocked on a Sleep() AML opcode
176 * 2) Method to be blocked on an Acquire() AML opcode
177 * 3) Method to be blocked on a Wait() AML opcode
178 * 4) Method to be blocked to acquire the global lock
179 * 5) Method to be blocked waiting to execute a serialized control method
180 * that is currently executing
181 * 6) About to invoke a user-installed opregion handler
182 *
183 ******************************************************************************/
184
185void acpi_ex_relinquish_interpreter(void)
186{
187 ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
188
189 /*
190 * If the global serialized flag is set, do not release the interpreter.
191 * This forces the interpreter to be single threaded.
192 */
193 if (!acpi_gbl_all_methods_serialized) {
194 acpi_ex_exit_interpreter();
131 } 195 }
132 196
133 return_VOID; 197 return_VOID;
@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
141 * 205 *
142 * RETURN: none 206 * RETURN: none
143 * 207 *
144 * DESCRIPTION: Truncate a number to 32-bits if the currently executing method 208 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
145 * belongs to a 32-bit ACPI table. 209 * 32-bit, as determined by the revision of the DSDT.
146 * 210 *
147 ******************************************************************************/ 211 ******************************************************************************/
148 212
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index c84b1faba28c..76c525dc590b 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -152,7 +152,6 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
152 152
153ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector) 153ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
154#endif 154#endif
155
156/******************************************************************************* 155/*******************************************************************************
157 * 156 *
158 * FUNCTION: acpi_enter_sleep_state_prep 157 * FUNCTION: acpi_enter_sleep_state_prep
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 26fd0dd6953d..97b2ac57c16b 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -75,7 +75,7 @@ ACPI_MODULE_NAME("nseval")
75 * MUTEX: Locks interpreter 75 * MUTEX: Locks interpreter
76 * 76 *
77 ******************************************************************************/ 77 ******************************************************************************/
78acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) 78acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
79{ 79{
80 acpi_status status; 80 acpi_status status;
81 81
@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
154 * Execute the method via the interpreter. The interpreter is locked 154 * Execute the method via the interpreter. The interpreter is locked
155 * here before calling into the AML parser 155 * here before calling into the AML parser
156 */ 156 */
157 status = acpi_ex_enter_interpreter(); 157 acpi_ex_enter_interpreter();
158 if (ACPI_FAILURE(status)) {
159 return_ACPI_STATUS(status);
160 }
161
162 status = acpi_ps_execute_method(info); 158 status = acpi_ps_execute_method(info);
163 acpi_ex_exit_interpreter(); 159 acpi_ex_exit_interpreter();
164 } else { 160 } else {
@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
182 * resolution, we must lock it because we could access an opregion. 178 * resolution, we must lock it because we could access an opregion.
183 * The opregion access code assumes that the interpreter is locked. 179 * The opregion access code assumes that the interpreter is locked.
184 */ 180 */
185 status = acpi_ex_enter_interpreter(); 181 acpi_ex_enter_interpreter();
186 if (ACPI_FAILURE(status)) {
187 return_ACPI_STATUS(status);
188 }
189 182
190 /* Function has a strange interface */ 183 /* Function has a strange interface */
191 184
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index c4ab615f77fe..33db2241044e 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -214,7 +214,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
214 u32 level, void *context, void **return_value) 214 u32 level, void *context, void **return_value)
215{ 215{
216 acpi_object_type type; 216 acpi_object_type type;
217 acpi_status status; 217 acpi_status status = AE_OK;
218 struct acpi_init_walk_info *info = 218 struct acpi_init_walk_info *info =
219 (struct acpi_init_walk_info *)context; 219 (struct acpi_init_walk_info *)context;
220 struct acpi_namespace_node *node = 220 struct acpi_namespace_node *node =
@@ -268,10 +268,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
268 /* 268 /*
269 * Must lock the interpreter before executing AML code 269 * Must lock the interpreter before executing AML code
270 */ 270 */
271 status = acpi_ex_enter_interpreter(); 271 acpi_ex_enter_interpreter();
272 if (ACPI_FAILURE(status)) {
273 return (status);
274 }
275 272
276 /* 273 /*
277 * Each of these types can contain executable AML code within the 274 * Each of these types can contain executable AML code within the
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index 94eb8f332d94..280b8357c46c 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -65,10 +65,8 @@ ACPI_MODULE_NAME("nswalk")
65 * within Scope is returned. 65 * within Scope is returned.
66 * 66 *
67 ******************************************************************************/ 67 ******************************************************************************/
68struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, 68struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
69 struct acpi_namespace_node 69 *parent_node, struct acpi_namespace_node
70 *parent_node,
71 struct acpi_namespace_node
72 *child_node) 70 *child_node)
73{ 71{
74 struct acpi_namespace_node *next_node = NULL; 72 struct acpi_namespace_node *next_node = NULL;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index 8904d0fae6a2..be4f2899de74 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -48,7 +48,6 @@
48 48
49#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsxfeval") 50ACPI_MODULE_NAME("nsxfeval")
51
52#ifdef ACPI_FUTURE_USAGE 51#ifdef ACPI_FUTURE_USAGE
53/******************************************************************************* 52/*******************************************************************************
54 * 53 *
@@ -73,8 +72,8 @@ ACPI_MODULE_NAME("nsxfeval")
73acpi_status 72acpi_status
74acpi_evaluate_object_typed(acpi_handle handle, 73acpi_evaluate_object_typed(acpi_handle handle,
75 acpi_string pathname, 74 acpi_string pathname,
76 struct acpi_object_list * external_params, 75 struct acpi_object_list *external_params,
77 struct acpi_buffer * return_buffer, 76 struct acpi_buffer *return_buffer,
78 acpi_object_type return_type) 77 acpi_object_type return_type)
79{ 78{
80 acpi_status status; 79 acpi_status status;
@@ -143,7 +142,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
143 142
144ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) 143ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
145#endif /* ACPI_FUTURE_USAGE */ 144#endif /* ACPI_FUTURE_USAGE */
146
147/******************************************************************************* 145/*******************************************************************************
148 * 146 *
149 * FUNCTION: acpi_evaluate_object 147 * FUNCTION: acpi_evaluate_object
@@ -170,7 +168,6 @@ acpi_evaluate_object(acpi_handle handle,
170 struct acpi_buffer *return_buffer) 168 struct acpi_buffer *return_buffer)
171{ 169{
172 acpi_status status; 170 acpi_status status;
173 acpi_status status2;
174 struct acpi_evaluate_info *info; 171 struct acpi_evaluate_info *info;
175 acpi_size buffer_space_needed; 172 acpi_size buffer_space_needed;
176 u32 i; 173 u32 i;
@@ -329,14 +326,12 @@ acpi_evaluate_object(acpi_handle handle,
329 * Delete the internal return object. NOTE: Interpreter must be 326 * Delete the internal return object. NOTE: Interpreter must be
330 * locked to avoid race condition. 327 * locked to avoid race condition.
331 */ 328 */
332 status2 = acpi_ex_enter_interpreter(); 329 acpi_ex_enter_interpreter();
333 if (ACPI_SUCCESS(status2)) {
334 330
335 /* Remove one reference on the return object (should delete it) */ 331 /* Remove one reference on the return object (should delete it) */
336 332
337 acpi_ut_remove_reference(info->return_object); 333 acpi_ut_remove_reference(info->return_object);
338 acpi_ex_exit_interpreter(); 334 acpi_ex_exit_interpreter();
339 }
340 } 335 }
341 336
342 cleanup: 337 cleanup:
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c2bed56915e1..b998340e23d4 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq;
71static acpi_osd_handler acpi_irq_handler; 71static acpi_osd_handler acpi_irq_handler;
72static void *acpi_irq_context; 72static void *acpi_irq_context;
73static struct workqueue_struct *kacpid_wq; 73static struct workqueue_struct *kacpid_wq;
74static struct workqueue_struct *kacpi_notify_wq;
74 75
75static void __init acpi_request_region (struct acpi_generic_address *addr, 76static void __init acpi_request_region (struct acpi_generic_address *addr,
76 unsigned int length, char *desc) 77 unsigned int length, char *desc)
@@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void)
137 return AE_NULL_ENTRY; 138 return AE_NULL_ENTRY;
138 } 139 }
139 kacpid_wq = create_singlethread_workqueue("kacpid"); 140 kacpid_wq = create_singlethread_workqueue("kacpid");
141 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
140 BUG_ON(!kacpid_wq); 142 BUG_ON(!kacpid_wq);
141 143 BUG_ON(!kacpi_notify_wq);
142 return AE_OK; 144 return AE_OK;
143} 145}
144 146
@@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void)
150 } 152 }
151 153
152 destroy_workqueue(kacpid_wq); 154 destroy_workqueue(kacpid_wq);
155 destroy_workqueue(kacpi_notify_wq);
153 156
154 return AE_OK; 157 return AE_OK;
155} 158}
@@ -603,6 +606,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
603static void acpi_os_execute_deferred(struct work_struct *work) 606static void acpi_os_execute_deferred(struct work_struct *work)
604{ 607{
605 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 608 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
609 if (!dpc) {
610 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
611 return;
612 }
613
614 dpc->function(dpc->context);
615 kfree(dpc);
616
617 /* Yield cpu to notify thread */
618 cond_resched();
619
620 return;
621}
622
623static void acpi_os_execute_notify(struct work_struct *work)
624{
625 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
606 626
607 if (!dpc) { 627 if (!dpc) {
608 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 628 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
@@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
637 acpi_status status = AE_OK; 657 acpi_status status = AE_OK;
638 struct acpi_os_dpc *dpc; 658 struct acpi_os_dpc *dpc;
639 659
640 ACPI_FUNCTION_TRACE("os_queue_for_execution");
641
642 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 660 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
643 "Scheduling function [%p(%p)] for deferred execution.\n", 661 "Scheduling function [%p(%p)] for deferred execution.\n",
644 function, context)); 662 function, context));
645 663
646 if (!function) 664 if (!function)
647 return_ACPI_STATUS(AE_BAD_PARAMETER); 665 return AE_BAD_PARAMETER;
648 666
649 /* 667 /*
650 * Allocate/initialize DPC structure. Note that this memory will be 668 * Allocate/initialize DPC structure. Note that this memory will be
@@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type,
662 dpc->function = function; 680 dpc->function = function;
663 dpc->context = context; 681 dpc->context = context;
664 682
665 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 683 if (type == OSL_NOTIFY_HANDLER) {
666 if (!queue_work(kacpid_wq, &dpc->work)) { 684 INIT_WORK(&dpc->work, acpi_os_execute_notify);
667 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 685 if (!queue_work(kacpi_notify_wq, &dpc->work)) {
686 status = AE_ERROR;
687 kfree(dpc);
688 }
689 } else {
690 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
691 if (!queue_work(kacpid_wq, &dpc->work)) {
692 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
668 "Call to queue_work() failed.\n")); 693 "Call to queue_work() failed.\n"));
669 kfree(dpc); 694 status = AE_ERROR;
670 status = AE_ERROR; 695 kfree(dpc);
696 }
671 } 697 }
672
673 return_ACPI_STATUS(status); 698 return_ACPI_STATUS(status);
674} 699}
675 700
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 16d8b6cc3c22..9296e86761d7 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -185,459 +185,453 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
185/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */ 185/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */
186 186
187/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER, 187/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
188 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), 188 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
189/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER, 189/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
190 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), 190 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
191/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP, 191/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
192 ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT, 192 ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
193 AML_TYPE_NAMED_SIMPLE, 193 AML_TYPE_NAMED_SIMPLE,
194 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 194 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
195 AML_NSNODE | AML_NAMED), 195 AML_NSNODE | AML_NAMED),
196/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY, 196/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
197 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, 197 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
198 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 198 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
199 AML_NSNODE | AML_NAMED), 199 AML_NSNODE | AML_NAMED),
200/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP, 200/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
201 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 201 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
202 AML_TYPE_LITERAL, AML_CONSTANT), 202 AML_TYPE_LITERAL, AML_CONSTANT),
203/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP, 203/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
204 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 204 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
205 AML_TYPE_LITERAL, AML_CONSTANT), 205 AML_TYPE_LITERAL, AML_CONSTANT),
206/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP, 206/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
207 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 207 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
208 AML_TYPE_LITERAL, AML_CONSTANT), 208 AML_TYPE_LITERAL, AML_CONSTANT),
209/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP, 209/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
210 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, 210 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
211 AML_TYPE_LITERAL, AML_CONSTANT), 211 AML_TYPE_LITERAL, AML_CONSTANT),
212/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP, 212/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
213 ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT, 213 ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
214 AML_TYPE_NAMED_NO_OBJ, 214 AML_TYPE_NAMED_NO_OBJ,
215 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 215 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
216 AML_NSNODE | AML_NAMED), 216 AML_NSNODE | AML_NAMED),
217/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP, 217/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
218 ACPI_TYPE_BUFFER, AML_CLASS_CREATE, 218 ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
219 AML_TYPE_CREATE_OBJECT, 219 AML_TYPE_CREATE_OBJECT,
220 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), 220 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
221/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP, 221/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
222 ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, 222 ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
223 AML_TYPE_CREATE_OBJECT, 223 AML_TYPE_CREATE_OBJECT,
224 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), 224 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
225/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP, 225/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
226 ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT, 226 ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
227 AML_TYPE_NAMED_COMPLEX, 227 AML_TYPE_NAMED_COMPLEX,
228 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 228 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
229 AML_NSNODE | AML_NAMED | AML_DEFER), 229 AML_NSNODE | AML_NAMED | AML_DEFER),
230/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0, 230/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
231 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 231 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
232 AML_TYPE_LOCAL_VARIABLE, 0), 232 AML_TYPE_LOCAL_VARIABLE, 0),
233/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1, 233/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
234 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 234 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
235 AML_TYPE_LOCAL_VARIABLE, 0), 235 AML_TYPE_LOCAL_VARIABLE, 0),
236/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2, 236/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
237 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 237 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
238 AML_TYPE_LOCAL_VARIABLE, 0), 238 AML_TYPE_LOCAL_VARIABLE, 0),
239/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3, 239/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
240 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 240 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
241 AML_TYPE_LOCAL_VARIABLE, 0), 241 AML_TYPE_LOCAL_VARIABLE, 0),
242/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4, 242/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
243 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 243 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
244 AML_TYPE_LOCAL_VARIABLE, 0), 244 AML_TYPE_LOCAL_VARIABLE, 0),
245/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5, 245/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
246 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 246 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
247 AML_TYPE_LOCAL_VARIABLE, 0), 247 AML_TYPE_LOCAL_VARIABLE, 0),
248/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6, 248/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
249 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 249 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
250 AML_TYPE_LOCAL_VARIABLE, 0), 250 AML_TYPE_LOCAL_VARIABLE, 0),
251/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7, 251/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
252 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 252 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
253 AML_TYPE_LOCAL_VARIABLE, 0), 253 AML_TYPE_LOCAL_VARIABLE, 0),
254/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0, 254/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
255 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 255 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
256 AML_TYPE_METHOD_ARGUMENT, 0), 256 AML_TYPE_METHOD_ARGUMENT, 0),
257/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1, 257/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
258 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 258 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
259 AML_TYPE_METHOD_ARGUMENT, 0), 259 AML_TYPE_METHOD_ARGUMENT, 0),
260/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2, 260/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
261 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 261 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
262 AML_TYPE_METHOD_ARGUMENT, 0), 262 AML_TYPE_METHOD_ARGUMENT, 0),
263/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3, 263/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
264 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 264 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
265 AML_TYPE_METHOD_ARGUMENT, 0), 265 AML_TYPE_METHOD_ARGUMENT, 0),
266/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4, 266/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
267 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 267 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
268 AML_TYPE_METHOD_ARGUMENT, 0), 268 AML_TYPE_METHOD_ARGUMENT, 0),
269/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5, 269/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
270 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 270 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
271 AML_TYPE_METHOD_ARGUMENT, 0), 271 AML_TYPE_METHOD_ARGUMENT, 0),
272/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6, 272/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
273 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 273 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
274 AML_TYPE_METHOD_ARGUMENT, 0), 274 AML_TYPE_METHOD_ARGUMENT, 0),
275/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY, 275/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
276 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 276 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
277 AML_FLAGS_EXEC_1A_1T_1R), 277 AML_FLAGS_EXEC_1A_1T_1R),
278/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY, 278/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
279 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, 279 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
280 AML_FLAGS_EXEC_1A_0T_1R), 280 AML_FLAGS_EXEC_1A_0T_1R),
281/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY, 281/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
282 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 282 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
283 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 283 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
284/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP, 284/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
285 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 285 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
286 AML_TYPE_EXEC_2A_1T_1R, 286 AML_TYPE_EXEC_2A_1T_1R,
287 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 287 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
288/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP, 288/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
289 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 289 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
290 AML_TYPE_EXEC_2A_1T_1R, 290 AML_TYPE_EXEC_2A_1T_1R,
291 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 291 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
292/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP, 292/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
293 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 293 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
294 AML_TYPE_EXEC_1A_0T_1R, 294 AML_TYPE_EXEC_1A_0T_1R,
295 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), 295 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
296/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP, 296/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
297 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 297 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
298 AML_TYPE_EXEC_1A_0T_1R, 298 AML_TYPE_EXEC_1A_0T_1R,
299 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), 299 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
300/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP, 300/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
301 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 301 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
302 AML_TYPE_EXEC_2A_1T_1R, 302 AML_TYPE_EXEC_2A_1T_1R,
303 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 303 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
304/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP, 304/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
305 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 305 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
306 AML_TYPE_EXEC_2A_2T_1R, 306 AML_TYPE_EXEC_2A_2T_1R,
307 AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT), 307 AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
308/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP, 308/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
309 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 309 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
310 AML_TYPE_EXEC_2A_1T_1R, 310 AML_TYPE_EXEC_2A_1T_1R,
311 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 311 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
312/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP, 312/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
313 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 313 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
314 AML_TYPE_EXEC_2A_1T_1R, 314 AML_TYPE_EXEC_2A_1T_1R,
315 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 315 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
316/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY, 316/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
317 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 317 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
318 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 318 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
319/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP, 319/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
320 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 320 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
321 AML_TYPE_EXEC_2A_1T_1R, 321 AML_TYPE_EXEC_2A_1T_1R,
322 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 322 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
323/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY, 323/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
324 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 324 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
325 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 325 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
326/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY, 326/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
327 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 327 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
328 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 328 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
329/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY, 329/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
330 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 330 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
331 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 331 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
332/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY, 332/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
333 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 333 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
334 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 334 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
335/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP, 335/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
336 ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY, 336 ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
337 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 337 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
338 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 338 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
339/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP, 339/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
340 ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY, 340 ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
341 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 341 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
342 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 342 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
343/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP, 343/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
344 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 344 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
345 AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), 345 AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
346/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP, 346/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
347 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 347 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
348 AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R), 348 AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
349/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP, 349/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
350 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 350 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
351 AML_TYPE_EXEC_1A_0T_1R, 351 AML_TYPE_EXEC_1A_0T_1R,
352 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), 352 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
353/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY, 353/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
354 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 354 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
355 AML_FLAGS_EXEC_2A_1T_1R), 355 AML_FLAGS_EXEC_2A_1T_1R),
356/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY, 356/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
357 AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, 357 AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
358 AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT), 358 AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
359/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP, 359/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
360 ARGI_CREATE_DWORD_FIELD_OP, 360 ARGI_CREATE_DWORD_FIELD_OP,
361 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 361 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
362 AML_TYPE_CREATE_FIELD, 362 AML_TYPE_CREATE_FIELD,
363 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 363 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
364 AML_DEFER | AML_CREATE), 364 AML_DEFER | AML_CREATE),
365/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP, 365/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
366 ARGI_CREATE_WORD_FIELD_OP, 366 ARGI_CREATE_WORD_FIELD_OP,
367 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 367 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
368 AML_TYPE_CREATE_FIELD, 368 AML_TYPE_CREATE_FIELD,
369 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 369 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
370 AML_DEFER | AML_CREATE), 370 AML_DEFER | AML_CREATE),
371/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP, 371/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
372 ARGI_CREATE_BYTE_FIELD_OP, 372 ARGI_CREATE_BYTE_FIELD_OP,
373 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 373 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
374 AML_TYPE_CREATE_FIELD, 374 AML_TYPE_CREATE_FIELD,
375 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 375 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
376 AML_DEFER | AML_CREATE), 376 AML_DEFER | AML_CREATE),
377/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP, 377/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
378 ARGI_CREATE_BIT_FIELD_OP, 378 ARGI_CREATE_BIT_FIELD_OP,
379 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 379 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
380 AML_TYPE_CREATE_FIELD, 380 AML_TYPE_CREATE_FIELD,
381 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 381 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
382 AML_DEFER | AML_CREATE), 382 AML_DEFER | AML_CREATE),
383/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP, 383/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
384 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 384 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
385 AML_TYPE_EXEC_1A_0T_1R, 385 AML_TYPE_EXEC_1A_0T_1R,
386 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), 386 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
387/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, 387/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
388 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 388 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
389 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | 389 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
390 AML_CONSTANT),
391/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, 390/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
392 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 391 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
393 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | 392 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
394 AML_CONSTANT),
395/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, 393/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
396 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, 394 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
397 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), 395 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
398/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP, 396/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
399 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 397 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
400 AML_TYPE_EXEC_2A_0T_1R, 398 AML_TYPE_EXEC_2A_0T_1R,
401 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), 399 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
402/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP, 400/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
403 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 401 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
404 AML_TYPE_EXEC_2A_0T_1R, 402 AML_TYPE_EXEC_2A_0T_1R,
405 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), 403 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
406/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY, 404/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
407 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 405 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
408 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), 406 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
409/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY, 407/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
410 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), 408 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
411/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY, 409/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
412 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), 410 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
413/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY, 411/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
414 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), 412 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
415/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY, 413/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
416 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 414 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
417/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP, 415/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
418 ACPI_TYPE_ANY, AML_CLASS_CONTROL, 416 ACPI_TYPE_ANY, AML_CLASS_CONTROL,
419 AML_TYPE_CONTROL, AML_HAS_ARGS), 417 AML_TYPE_CONTROL, AML_HAS_ARGS),
420/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY, 418/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
421 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 419 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
422/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP, 420/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
423 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 421 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
424/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER, 422/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
425 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), 423 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
426 424
427/* Prefixed opcodes (Two-byte opcodes with a prefix op) */ 425/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
428 426
429/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX, 427/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
430 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 428 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
431 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 429 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
432 AML_NSNODE | AML_NAMED), 430 AML_NSNODE | AML_NAMED),
433/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT, 431/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
434 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 432 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
435 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), 433 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
436/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP, 434/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
437 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 435 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
438 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), 436 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
439/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP, 437/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
440 ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, 438 ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
441 AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, 439 AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
442 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 440 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
443 AML_DEFER | AML_FIELD | AML_CREATE), 441 AML_DEFER | AML_FIELD | AML_CREATE),
444/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY, 442/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
445 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, 443 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
446 AML_FLAGS_EXEC_1A_1T_0R), 444 AML_FLAGS_EXEC_1A_1T_0R),
447/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY, 445/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
448 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, 446 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
449 AML_FLAGS_EXEC_1A_0T_0R), 447 AML_FLAGS_EXEC_1A_0T_0R),
450/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY, 448/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
451 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, 449 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
452 AML_FLAGS_EXEC_1A_0T_0R), 450 AML_FLAGS_EXEC_1A_0T_0R),
453/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP, 451/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
454 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 452 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
455 AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), 453 AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
456/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP, 454/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
457 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 455 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
458 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), 456 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
459/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY, 457/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
460 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 458 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
461 AML_FLAGS_EXEC_2A_0T_1R), 459 AML_FLAGS_EXEC_2A_0T_1R),
462/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY, 460/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
463 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, 461 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
464 AML_FLAGS_EXEC_1A_0T_0R), 462 AML_FLAGS_EXEC_1A_0T_0R),
465/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP, 463/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
466 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 464 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
467 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), 465 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
468/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP, 466/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
469 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 467 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
470 AML_TYPE_EXEC_1A_1T_1R, 468 AML_TYPE_EXEC_1A_1T_1R,
471 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 469 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
472/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY, 470/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
473 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 471 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
474 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 472 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
475/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP, 473/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
476 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 474 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
477 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), 475 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
478/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP, 476/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
479 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 477 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
480 AML_TYPE_CONSTANT, 0), 478 AML_TYPE_CONSTANT, 0),
481/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP, 479/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
482 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 480 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
483 AML_TYPE_CONSTANT, 0), 481 AML_TYPE_CONSTANT, 0),
484/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY, 482/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
485 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R, 483 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
486 AML_FLAGS_EXEC_3A_0T_0R), 484 AML_FLAGS_EXEC_3A_0T_0R),
487/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP, 485/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
488 ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT, 486 ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
489 AML_TYPE_NAMED_COMPLEX, 487 AML_TYPE_NAMED_COMPLEX,
490 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 488 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
491 AML_NSNODE | AML_NAMED | AML_DEFER), 489 AML_NSNODE | AML_NAMED | AML_DEFER),
492/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, 490/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
493 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, 491 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
494 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 492 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
495 AML_FIELD),
496/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, 493/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
497 ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, 494 ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
498 AML_TYPE_NAMED_NO_OBJ, 495 AML_TYPE_NAMED_NO_OBJ,
499 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 496 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
500 AML_NSNODE | AML_NAMED), 497 AML_NSNODE | AML_NAMED),
501/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP, 498/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
502 ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT, 499 ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
503 AML_TYPE_NAMED_SIMPLE, 500 AML_TYPE_NAMED_SIMPLE,
504 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 501 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
505 AML_NSNODE | AML_NAMED), 502 AML_NSNODE | AML_NAMED),
506/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP, 503/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
507 ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT, 504 ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
508 AML_TYPE_NAMED_SIMPLE, 505 AML_TYPE_NAMED_SIMPLE,
509 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 506 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
510 AML_NSNODE | AML_NAMED), 507 AML_NSNODE | AML_NAMED),
511/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP, 508/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
512 ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL, 509 ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
513 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, 510 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
514 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 511 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
515 AML_NSNODE | AML_NAMED), 512 AML_NSNODE | AML_NAMED),
516/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, 513/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
517 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 514 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
518 AML_TYPE_NAMED_FIELD, 515 AML_TYPE_NAMED_FIELD,
519 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 516 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
520 AML_FIELD),
521/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, 517/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
522 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 518 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
523 AML_TYPE_NAMED_FIELD, 519 AML_TYPE_NAMED_FIELD,
524 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 520 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
525 AML_FIELD),
526 521
527/* Internal opcodes that map to invalid AML opcodes */ 522/* Internal opcodes that map to invalid AML opcodes */
528 523
529/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP, 524/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
530 ACPI_TYPE_ANY, AML_CLASS_INTERNAL, 525 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
531 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), 526 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
532/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP, 527/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
533 ACPI_TYPE_ANY, AML_CLASS_INTERNAL, 528 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
534 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), 529 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
535/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP, 530/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
536 ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY, 531 ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
537 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 532 AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
538 AML_HAS_ARGS | AML_CONSTANT), 533 AML_HAS_ARGS | AML_CONSTANT),
539/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP, 534/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
540 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 535 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
541 AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE), 536 AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
542/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP, 537/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
543 ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL, 538 ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
544 AML_TYPE_METHOD_CALL, 539 AML_TYPE_METHOD_CALL,
545 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE), 540 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
546/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP, 541/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
547 ACPI_TYPE_ANY, AML_CLASS_ARGUMENT, 542 ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
548 AML_TYPE_LITERAL, 0), 543 AML_TYPE_LITERAL, 0),
549/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP, 544/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
550 ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY, 545 ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
551 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), 546 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
552/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP, 547/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
553 ACPI_TYPE_ANY, AML_CLASS_INTERNAL, 548 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
554 AML_TYPE_BOGUS, 549 AML_TYPE_BOGUS,
555 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), 550 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
556/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP, 551/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
557 ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY, 552 ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
558 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), 553 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
559/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP, 554/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
560 ARGI_STATICSTRING_OP, ACPI_TYPE_ANY, 555 ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
561 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), 556 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
562/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, 557/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
563 AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN, 558 AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
564 AML_HAS_ARGS | AML_HAS_RETVAL), 559 AML_HAS_ARGS | AML_HAS_RETVAL),
565/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID, 560/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
566 AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS), 561 AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
567/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, 562/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
568 AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS), 563 AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
569/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, 564/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
570 AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS), 565 AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
571 566
572/* ACPI 2.0 opcodes */ 567/* ACPI 2.0 opcodes */
573 568
574/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP, 569/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
575 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 570 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
576 AML_TYPE_LITERAL, AML_CONSTANT), 571 AML_TYPE_LITERAL, AML_CONSTANT),
577 /* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP, 572 /* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
578 ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE, 573 ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
579 AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT, 574 AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
580 AML_HAS_ARGS | AML_DEFER), 575 AML_HAS_ARGS | AML_DEFER),
581/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP, 576/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
582 ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY, 577 ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
583 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 578 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
584 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 579 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
585/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY, 580/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
586 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 581 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
587 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 582 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
588/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP, 583/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
589 ARGI_CREATE_QWORD_FIELD_OP, 584 ARGI_CREATE_QWORD_FIELD_OP,
590 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 585 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
591 AML_TYPE_CREATE_FIELD, 586 AML_TYPE_CREATE_FIELD,
592 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 587 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
593 AML_DEFER | AML_CREATE), 588 AML_DEFER | AML_CREATE),
594/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP, 589/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
595 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 590 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
596 AML_TYPE_EXEC_1A_1T_1R, 591 AML_TYPE_EXEC_1A_1T_1R,
597 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 592 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
598/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP, 593/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
599 ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY, 594 ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
600 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 595 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
601 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 596 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
602/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP, 597/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
603 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 598 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
604 AML_TYPE_EXEC_1A_1T_1R, 599 AML_TYPE_EXEC_1A_1T_1R,
605 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 600 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
606/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP, 601/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
607 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 602 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
608 AML_TYPE_EXEC_1A_1T_1R, 603 AML_TYPE_EXEC_1A_1T_1R,
609 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 604 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
610/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP, 605/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
611 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 606 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
612 AML_TYPE_EXEC_2A_1T_1R, 607 AML_TYPE_EXEC_2A_1T_1R,
613 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 608 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
614/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP, 609/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
615 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 610 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
616 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), 611 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
617/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY, 612/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
618 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R, 613 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
619 AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT), 614 AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
620/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP, 615/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
621 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 616 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
622/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP, 617/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
623 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 618 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
624 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), 619 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
625/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP, 620/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
626 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION, 621 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
627 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 622 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
628 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 623 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
629 AML_NSNODE | AML_NAMED), 624 AML_NSNODE | AML_NAMED),
630/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, 625/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
631 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 626 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
632 AML_TYPE_NAMED_NO_OBJ, 627 AML_TYPE_NAMED_NO_OBJ,
633 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 628 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
634 AML_NSNODE),
635 629
636/* ACPI 3.0 opcodes */ 630/* ACPI 3.0 opcodes */
637 631
638/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, 632/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
639 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, 633 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
640 AML_FLAGS_EXEC_0A_0T_1R) 634 AML_FLAGS_EXEC_0A_0T_1R)
641 635
642/*! [End] no source code translation !*/ 636/*! [End] no source code translation !*/
643}; 637};
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 8c6d3fdec38a..0dd2ce8a3475 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -567,7 +567,8 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
567 (*sub_object_list)->string. 567 (*sub_object_list)->string.
568 length + 1); 568 length + 1);
569 } else { 569 } else {
570 temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); 570 temp_size_needed +=
571 acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
571 } 572 }
572 } else { 573 } else {
573 /* 574 /*
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index cc48ab05676c..50da494c3ee2 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -267,16 +267,19 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
267 * If BIOS erroneously reversed the _PRT source_name and source_index, 267 * If BIOS erroneously reversed the _PRT source_name and source_index,
268 * then reverse them back. 268 * then reverse them back.
269 */ 269 */
270 if (ACPI_GET_OBJECT_TYPE (sub_object_list[3]) != ACPI_TYPE_INTEGER) { 270 if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
271 ACPI_TYPE_INTEGER) {
271 if (acpi_gbl_enable_interpreter_slack) { 272 if (acpi_gbl_enable_interpreter_slack) {
272 source_name_index = 3; 273 source_name_index = 3;
273 source_index_index = 2; 274 source_index_index = 2;
274 printk(KERN_WARNING "ACPI: Handling Garbled _PRT entry\n"); 275 printk(KERN_WARNING
276 "ACPI: Handling Garbled _PRT entry\n");
275 } else { 277 } else {
276 ACPI_ERROR((AE_INFO, 278 ACPI_ERROR((AE_INFO,
277 "(PRT[%X].source_index) Need Integer, found %s", 279 "(PRT[%X].source_index) Need Integer, found %s",
278 index, 280 index,
279 acpi_ut_get_object_type_name(sub_object_list[3]))); 281 acpi_ut_get_object_type_name
282 (sub_object_list[3])));
280 return_ACPI_STATUS(AE_BAD_DATA); 283 return_ACPI_STATUS(AE_BAD_DATA);
281 } 284 }
282 } 285 }
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index de20a5d6decf..46da116a4030 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -46,7 +46,6 @@
46 46
47#define _COMPONENT ACPI_RESOURCES 47#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsdump") 48ACPI_MODULE_NAME("rsdump")
49
50#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) 49#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
51/* Local prototypes */ 50/* Local prototypes */
52static void acpi_rs_out_string(char *title, char *value); 51static void acpi_rs_out_string(char *title, char *value);
@@ -489,10 +488,9 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
489 /* 488 /*
490 * Optional resource_source for Address resources 489 * Optional resource_source for Address resources
491 */ 490 */
492 acpi_rs_dump_resource_source(ACPI_CAST_PTR 491 acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
493 (struct 492 acpi_resource_source,
494 acpi_resource_source, 493 target));
495 target));
496 break; 494 break;
497 495
498 default: 496 default:
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 7e3c335ab320..2c2adb6292c1 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -142,7 +142,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
142}; 142};
143#endif 143#endif
144 144
145#endif /* ACPI_FUTURE_USAGE */ 145#endif /* ACPI_FUTURE_USAGE */
146/* 146/*
147 * Base sizes for external AML resource descriptors, indexed by internal type. 147 * Base sizes for external AML resource descriptors, indexed by internal type.
148 * Includes size of the descriptor header (1 byte for small descriptors, 148 * Includes size of the descriptor header (1 byte for small descriptors,
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index a92755c8877d..ca21e4660c79 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -153,10 +153,9 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
153 153
154 /* Perform the conversion */ 154 /* Perform the conversion */
155 155
156 status = acpi_rs_convert_resource_to_aml(resource, 156 status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
157 ACPI_CAST_PTR(union 157 aml_resource,
158 aml_resource, 158 aml),
159 aml),
160 acpi_gbl_set_resource_dispatch 159 acpi_gbl_set_resource_dispatch
161 [resource->type]); 160 [resource->type]);
162 if (ACPI_FAILURE(status)) { 161 if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index 3b63b561b94e..c7081afa893a 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -46,7 +46,6 @@
46 46
47#define _COMPONENT ACPI_RESOURCES 47#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsmisc") 48ACPI_MODULE_NAME("rsmisc")
49
50#define INIT_RESOURCE_TYPE(i) i->resource_offset 49#define INIT_RESOURCE_TYPE(i) i->resource_offset
51#define INIT_RESOURCE_LENGTH(i) i->aml_offset 50#define INIT_RESOURCE_LENGTH(i) i->aml_offset
52#define INIT_TABLE_LENGTH(i) i->value 51#define INIT_TABLE_LENGTH(i) i->value
@@ -429,8 +428,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
429 * Optional resource_source (Index and String) 428 * Optional resource_source (Index and String)
430 */ 429 */
431 aml_length = 430 aml_length =
432 acpi_rs_set_resource_source(aml, 431 acpi_rs_set_resource_source(aml, (acpi_rs_length)
433 (acpi_rs_length)
434 aml_length, source); 432 aml_length, source);
435 acpi_rs_set_resource_length(aml_length, aml); 433 acpi_rs_set_resource_length(aml_length, aml);
436 break; 434 break;
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 2442a8f8df57..11c0bd7b9cfd 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -353,10 +353,8 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
353 * 353 *
354 * Zero the entire area of the buffer. 354 * Zero the entire area of the buffer.
355 */ 355 */
356 total_length = 356 total_length = (u32)
357 (u32) 357 ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
358 ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
359 1;
360 total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length); 358 total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
361 359
362 ACPI_MEMSET(resource_source->string_ptr, 0, total_length); 360 ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 991f8901498c..f63813a358c5 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -217,7 +217,6 @@ acpi_get_current_resources(acpi_handle device_handle,
217} 217}
218 218
219ACPI_EXPORT_SYMBOL(acpi_get_current_resources) 219ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
220
221#ifdef ACPI_FUTURE_USAGE 220#ifdef ACPI_FUTURE_USAGE
222/******************************************************************************* 221/*******************************************************************************
223 * 222 *
@@ -261,7 +260,6 @@ acpi_get_possible_resources(acpi_handle device_handle,
261 260
262ACPI_EXPORT_SYMBOL(acpi_get_possible_resources) 261ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
263#endif /* ACPI_FUTURE_USAGE */ 262#endif /* ACPI_FUTURE_USAGE */
264
265/******************************************************************************* 263/*******************************************************************************
266 * 264 *
267 * FUNCTION: acpi_set_current_resources 265 * FUNCTION: acpi_set_current_resources
@@ -496,7 +494,6 @@ ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
496 * each resource in the list. 494 * each resource in the list.
497 * 495 *
498 ******************************************************************************/ 496 ******************************************************************************/
499
500acpi_status 497acpi_status
501acpi_walk_resources(acpi_handle device_handle, 498acpi_walk_resources(acpi_handle device_handle,
502 char *name, 499 char *name,
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index f8c63410bcbf..bc7e16ec8393 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -29,7 +29,6 @@ static u32 acpi_suspend_states[] = {
29 [PM_SUSPEND_ON] = ACPI_STATE_S0, 29 [PM_SUSPEND_ON] = ACPI_STATE_S0,
30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, 30 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
31 [PM_SUSPEND_MEM] = ACPI_STATE_S3, 31 [PM_SUSPEND_MEM] = ACPI_STATE_S3,
32 [PM_SUSPEND_DISK] = ACPI_STATE_S4,
33 [PM_SUSPEND_MAX] = ACPI_STATE_S5 32 [PM_SUSPEND_MAX] = ACPI_STATE_S5
34}; 33};
35 34
@@ -94,14 +93,6 @@ static int acpi_pm_enter(suspend_state_t pm_state)
94 do_suspend_lowlevel(); 93 do_suspend_lowlevel();
95 break; 94 break;
96 95
97 case PM_SUSPEND_DISK:
98 if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
99 status = acpi_enter_sleep_state(acpi_state);
100 break;
101 case PM_SUSPEND_MAX:
102 acpi_power_off();
103 break;
104
105 default: 96 default:
106 return -EINVAL; 97 return -EINVAL;
107 } 98 }
@@ -157,12 +148,13 @@ int acpi_suspend(u32 acpi_state)
157 suspend_state_t states[] = { 148 suspend_state_t states[] = {
158 [1] = PM_SUSPEND_STANDBY, 149 [1] = PM_SUSPEND_STANDBY,
159 [3] = PM_SUSPEND_MEM, 150 [3] = PM_SUSPEND_MEM,
160 [4] = PM_SUSPEND_DISK,
161 [5] = PM_SUSPEND_MAX 151 [5] = PM_SUSPEND_MAX
162 }; 152 };
163 153
164 if (acpi_state < 6 && states[acpi_state]) 154 if (acpi_state < 6 && states[acpi_state])
165 return pm_suspend(states[acpi_state]); 155 return pm_suspend(states[acpi_state]);
156 if (acpi_state == 4)
157 return hibernate();
166 return -EINVAL; 158 return -EINVAL;
167} 159}
168 160
@@ -189,6 +181,49 @@ static struct pm_ops acpi_pm_ops = {
189 .finish = acpi_pm_finish, 181 .finish = acpi_pm_finish,
190}; 182};
191 183
184#ifdef CONFIG_SOFTWARE_SUSPEND
185static int acpi_hibernation_prepare(void)
186{
187 return acpi_sleep_prepare(ACPI_STATE_S4);
188}
189
190static int acpi_hibernation_enter(void)
191{
192 acpi_status status = AE_OK;
193 unsigned long flags = 0;
194
195 ACPI_FLUSH_CPU_CACHE();
196
197 local_irq_save(flags);
198 acpi_enable_wakeup_device(ACPI_STATE_S4);
199 /* This shouldn't return. If it returns, we have a problem */
200 status = acpi_enter_sleep_state(ACPI_STATE_S4);
201 local_irq_restore(flags);
202
203 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
204}
205
206static void acpi_hibernation_finish(void)
207{
208 acpi_leave_sleep_state(ACPI_STATE_S4);
209 acpi_disable_wakeup_device(ACPI_STATE_S4);
210
211 /* reset firmware waking vector */
212 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
213
214 if (init_8259A_after_S1) {
215 printk("Broken toshiba laptop -> kicking interrupts\n");
216 init_8259A(0);
217 }
218}
219
220static struct hibernation_ops acpi_hibernation_ops = {
221 .prepare = acpi_hibernation_prepare,
222 .enter = acpi_hibernation_enter,
223 .finish = acpi_hibernation_finish,
224};
225#endif /* CONFIG_SOFTWARE_SUSPEND */
226
192/* 227/*
193 * Toshiba fails to preserve interrupts over S1, reinitialization 228 * Toshiba fails to preserve interrupts over S1, reinitialization
194 * of 8259 is needed after S1 resume. 229 * of 8259 is needed after S1 resume.
@@ -227,14 +262,17 @@ int __init acpi_sleep_init(void)
227 sleep_states[i] = 1; 262 sleep_states[i] = 1;
228 printk(" S%d", i); 263 printk(" S%d", i);
229 } 264 }
230 if (i == ACPI_STATE_S4) {
231 if (sleep_states[i])
232 acpi_pm_ops.pm_disk_mode = PM_DISK_PLATFORM;
233 }
234 } 265 }
235 printk(")\n"); 266 printk(")\n");
236 267
237 pm_set_ops(&acpi_pm_ops); 268 pm_set_ops(&acpi_pm_ops);
269
270#ifdef CONFIG_SOFTWARE_SUSPEND
271 if (sleep_states[ACPI_STATE_S4])
272 hibernation_set_ops(&acpi_hibernation_ops);
273#else
274 sleep_states[ACPI_STATE_S4] = 0;
275#endif
276
238 return 0; 277 return 0;
239} 278}
240
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 5a76e5be61d5..61f1822cc350 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -60,7 +60,7 @@ acpi_system_write_sleep(struct file *file,
60 state = simple_strtoul(str, NULL, 0); 60 state = simple_strtoul(str, NULL, 0);
61#ifdef CONFIG_SOFTWARE_SUSPEND 61#ifdef CONFIG_SOFTWARE_SUSPEND
62 if (state == 4) { 62 if (state == 4) {
63 error = pm_suspend(PM_SUSPEND_DISK); 63 error = hibernate();
64 goto Done; 64 goto Done;
65 } 65 }
66#endif 66#endif
@@ -349,8 +349,7 @@ acpi_system_write_alarm(struct file *file,
349 end: 349 end:
350 return_VALUE(result ? result : count); 350 return_VALUE(result ? result : count);
351} 351}
352#endif /* HAVE_ACPI_LEGACY_ALARM */ 352#endif /* HAVE_ACPI_LEGACY_ALARM */
353
354 353
355extern struct list_head acpi_wakeup_device_list; 354extern struct list_head acpi_wakeup_device_list;
356extern spinlock_t acpi_device_lock; 355extern spinlock_t acpi_device_lock;
@@ -380,8 +379,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
380 dev->wakeup.state.enabled ? "enabled" : "disabled"); 379 dev->wakeup.state.enabled ? "enabled" : "disabled");
381 if (ldev) 380 if (ldev)
382 seq_printf(seq, "%s:%s", 381 seq_printf(seq, "%s:%s",
383 ldev->bus ? ldev->bus->name : "no-bus", 382 ldev->bus ? ldev->bus->name : "no-bus",
384 ldev->bus_id); 383 ldev->bus_id);
385 seq_printf(seq, "\n"); 384 seq_printf(seq, "\n");
386 put_device(ldev); 385 put_device(ldev);
387 386
@@ -490,7 +489,7 @@ static u32 rtc_handler(void *context)
490 489
491 return ACPI_INTERRUPT_HANDLED; 490 return ACPI_INTERRUPT_HANDLED;
492} 491}
493#endif /* HAVE_ACPI_LEGACY_ALARM */ 492#endif /* HAVE_ACPI_LEGACY_ALARM */
494 493
495static int __init acpi_sleep_proc_init(void) 494static int __init acpi_sleep_proc_init(void)
496{ 495{
@@ -517,7 +516,7 @@ static int __init acpi_sleep_proc_init(void)
517 entry->proc_fops = &acpi_system_alarm_fops; 516 entry->proc_fops = &acpi_system_alarm_fops;
518 517
519 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); 518 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
520#endif /* HAVE_ACPI_LEGACY_ALARM */ 519#endif /* HAVE_ACPI_LEGACY_ALARM */
521 520
522 /* 'wakeup device' [R/W] */ 521 /* 'wakeup device' [R/W] */
523 entry = 522 entry =
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index 1db833eb2417..1285e91474fb 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -334,7 +334,8 @@ static void acpi_tb_convert_fadt(void)
334 (acpi_gbl_FADT.xpm1a_event_block.address + 334 (acpi_gbl_FADT.xpm1a_event_block.address +
335 pm1_register_length)); 335 pm1_register_length));
336 /* Don't forget to copy space_id of the GAS */ 336 /* Don't forget to copy space_id of the GAS */
337 acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id; 337 acpi_gbl_xpm1a_enable.space_id =
338 acpi_gbl_FADT.xpm1a_event_block.space_id;
338 339
339 /* The PM1B register block is optional, ignore if not present */ 340 /* The PM1B register block is optional, ignore if not present */
340 341
@@ -344,7 +345,8 @@ static void acpi_tb_convert_fadt(void)
344 (acpi_gbl_FADT.xpm1b_event_block. 345 (acpi_gbl_FADT.xpm1b_event_block.
345 address + pm1_register_length)); 346 address + pm1_register_length));
346 /* Don't forget to copy space_id of the GAS */ 347 /* Don't forget to copy space_id of the GAS */
347 acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id; 348 acpi_gbl_xpm1b_enable.space_id =
349 acpi_gbl_FADT.xpm1a_event_block.space_id;
348 350
349 } 351 }
350 352
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 417ef5fa7666..5b302c4e293f 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -201,6 +201,7 @@ acpi_status acpi_reallocate_root_table(void)
201 201
202 return_ACPI_STATUS(AE_OK); 202 return_ACPI_STATUS(AE_OK);
203} 203}
204
204/******************************************************************************* 205/*******************************************************************************
205 * 206 *
206 * FUNCTION: acpi_load_table 207 * FUNCTION: acpi_load_table
@@ -262,7 +263,7 @@ ACPI_EXPORT_SYMBOL(acpi_load_table)
262acpi_status 263acpi_status
263acpi_get_table_header(char *signature, 264acpi_get_table_header(char *signature,
264 acpi_native_uint instance, 265 acpi_native_uint instance,
265 struct acpi_table_header *out_table_header) 266 struct acpi_table_header * out_table_header)
266{ 267{
267 acpi_native_uint i; 268 acpi_native_uint i;
268 acpi_native_uint j; 269 acpi_native_uint j;
@@ -321,7 +322,6 @@ acpi_get_table_header(char *signature,
321 322
322ACPI_EXPORT_SYMBOL(acpi_get_table_header) 323ACPI_EXPORT_SYMBOL(acpi_get_table_header)
323 324
324
325/****************************************************************************** 325/******************************************************************************
326 * 326 *
327 * FUNCTION: acpi_unload_table_id 327 * FUNCTION: acpi_unload_table_id
@@ -346,11 +346,11 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
346 continue; 346 continue;
347 } 347 }
348 /* 348 /*
349 * Delete all namespace objects owned by this table. Note that these 349 * Delete all namespace objects owned by this table. Note that these
350 * objects can appear anywhere in the namespace by virtue of the AML 350 * objects can appear anywhere in the namespace by virtue of the AML
351 * "Scope" operator. Thus, we need to track ownership by an ID, not 351 * "Scope" operator. Thus, we need to track ownership by an ID, not
352 * simply a position within the hierarchy 352 * simply a position within the hierarchy
353 */ 353 */
354 acpi_tb_delete_namespace_by_owner(i); 354 acpi_tb_delete_namespace_by_owner(i);
355 status = acpi_tb_release_owner_id(i); 355 status = acpi_tb_release_owner_id(i);
356 acpi_tb_set_table_loaded_flag(i, FALSE); 356 acpi_tb_set_table_loaded_flag(i, FALSE);
@@ -376,7 +376,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
376 *****************************************************************************/ 376 *****************************************************************************/
377acpi_status 377acpi_status
378acpi_get_table(char *signature, 378acpi_get_table(char *signature,
379 acpi_native_uint instance, struct acpi_table_header ** out_table) 379 acpi_native_uint instance, struct acpi_table_header **out_table)
380{ 380{
381 acpi_native_uint i; 381 acpi_native_uint i;
382 acpi_native_uint j; 382 acpi_native_uint j;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 589b98b7b216..1ada017d01ef 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -59,8 +59,6 @@
59#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 59#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
60#define ACPI_THERMAL_NOTIFY_HOT 0xF1 60#define ACPI_THERMAL_NOTIFY_HOT 0xF1
61#define ACPI_THERMAL_MODE_ACTIVE 0x00 61#define ACPI_THERMAL_MODE_ACTIVE 0x00
62#define ACPI_THERMAL_MODE_PASSIVE 0x01
63#define ACPI_THERMAL_MODE_CRITICAL 0xff
64#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff" 62#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff"
65 63
66#define ACPI_THERMAL_MAX_ACTIVE 10 64#define ACPI_THERMAL_MAX_ACTIVE 10
@@ -86,9 +84,6 @@ static int acpi_thermal_resume(struct acpi_device *device);
86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); 84static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); 85static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); 86static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
89static ssize_t acpi_thermal_write_trip_points(struct file *,
90 const char __user *, size_t,
91 loff_t *);
92static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file); 87static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
93static ssize_t acpi_thermal_write_cooling_mode(struct file *, 88static ssize_t acpi_thermal_write_cooling_mode(struct file *,
94 const char __user *, size_t, 89 const char __user *, size_t,
@@ -167,7 +162,6 @@ struct acpi_thermal {
167 unsigned long temperature; 162 unsigned long temperature;
168 unsigned long last_temperature; 163 unsigned long last_temperature;
169 unsigned long polling_frequency; 164 unsigned long polling_frequency;
170 u8 cooling_mode;
171 volatile u8 zombie; 165 volatile u8 zombie;
172 struct acpi_thermal_flags flags; 166 struct acpi_thermal_flags flags;
173 struct acpi_thermal_state state; 167 struct acpi_thermal_state state;
@@ -193,7 +187,6 @@ static const struct file_operations acpi_thermal_temp_fops = {
193static const struct file_operations acpi_thermal_trip_fops = { 187static const struct file_operations acpi_thermal_trip_fops = {
194 .open = acpi_thermal_trip_open_fs, 188 .open = acpi_thermal_trip_open_fs,
195 .read = seq_read, 189 .read = seq_read,
196 .write = acpi_thermal_write_trip_points,
197 .llseek = seq_lseek, 190 .llseek = seq_lseek,
198 .release = single_release, 191 .release = single_release,
199}; 192};
@@ -297,11 +290,6 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
297 if (ACPI_FAILURE(status)) 290 if (ACPI_FAILURE(status))
298 return -ENODEV; 291 return -ENODEV;
299 292
300 tz->cooling_mode = mode;
301
302 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling mode [%s]\n",
303 mode ? "passive" : "active"));
304
305 return 0; 293 return 0;
306} 294}
307 295
@@ -889,67 +877,6 @@ static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
889 return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data); 877 return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
890} 878}
891 879
892static ssize_t
893acpi_thermal_write_trip_points(struct file *file,
894 const char __user * buffer,
895 size_t count, loff_t * ppos)
896{
897 struct seq_file *m = file->private_data;
898 struct acpi_thermal *tz = m->private;
899
900 char *limit_string;
901 int num, critical, hot, passive;
902 int *active;
903 int i = 0;
904
905
906 limit_string = kzalloc(ACPI_THERMAL_MAX_LIMIT_STR_LEN, GFP_KERNEL);
907 if (!limit_string)
908 return -ENOMEM;
909
910 active = kmalloc(ACPI_THERMAL_MAX_ACTIVE * sizeof(int), GFP_KERNEL);
911 if (!active) {
912 kfree(limit_string);
913 return -ENOMEM;
914 }
915
916 if (!tz || (count > ACPI_THERMAL_MAX_LIMIT_STR_LEN - 1)) {
917 count = -EINVAL;
918 goto end;
919 }
920
921 if (copy_from_user(limit_string, buffer, count)) {
922 count = -EFAULT;
923 goto end;
924 }
925
926 limit_string[count] = '\0';
927
928 num = sscanf(limit_string, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
929 &critical, &hot, &passive,
930 &active[0], &active[1], &active[2], &active[3], &active[4],
931 &active[5], &active[6], &active[7], &active[8],
932 &active[9]);
933 if (!(num >= 5 && num < (ACPI_THERMAL_MAX_ACTIVE + 3))) {
934 count = -EINVAL;
935 goto end;
936 }
937
938 tz->trips.critical.temperature = CELSIUS_TO_KELVIN(critical);
939 tz->trips.hot.temperature = CELSIUS_TO_KELVIN(hot);
940 tz->trips.passive.temperature = CELSIUS_TO_KELVIN(passive);
941 for (i = 0; i < num - 3; i++) {
942 if (!(tz->trips.active[i].flags.valid))
943 break;
944 tz->trips.active[i].temperature = CELSIUS_TO_KELVIN(active[i]);
945 }
946
947 end:
948 kfree(active);
949 kfree(limit_string);
950 return count;
951}
952
953static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset) 880static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
954{ 881{
955 struct acpi_thermal *tz = seq->private; 882 struct acpi_thermal *tz = seq->private;
@@ -958,15 +885,10 @@ static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
958 if (!tz) 885 if (!tz)
959 goto end; 886 goto end;
960 887
961 if (!tz->flags.cooling_mode) { 888 if (!tz->flags.cooling_mode)
962 seq_puts(seq, "<setting not supported>\n"); 889 seq_puts(seq, "<setting not supported>\n");
963 }
964
965 if (tz->cooling_mode == ACPI_THERMAL_MODE_CRITICAL)
966 seq_printf(seq, "cooling mode: critical\n");
967 else 890 else
968 seq_printf(seq, "cooling mode: %s\n", 891 seq_puts(seq, "0 - Active; 1 - Passive\n");
969 tz->cooling_mode ? "passive" : "active");
970 892
971 end: 893 end:
972 return 0; 894 return 0;
@@ -1223,28 +1145,6 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
1223 result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE); 1145 result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE);
1224 if (!result) 1146 if (!result)
1225 tz->flags.cooling_mode = 1; 1147 tz->flags.cooling_mode = 1;
1226 else {
1227 /* Oh,we have not _SCP method.
1228 Generally show cooling_mode by _ACx, _PSV,spec 12.2 */
1229 tz->flags.cooling_mode = 0;
1230 if (tz->trips.active[0].flags.valid
1231 && tz->trips.passive.flags.valid) {
1232 if (tz->trips.passive.temperature >
1233 tz->trips.active[0].temperature)
1234 tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
1235 else
1236 tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
1237 } else if (!tz->trips.active[0].flags.valid
1238 && tz->trips.passive.flags.valid) {
1239 tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
1240 } else if (tz->trips.active[0].flags.valid
1241 && !tz->trips.passive.flags.valid) {
1242 tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
1243 } else {
1244 /* _ACx and _PSV are optional, but _CRT is required */
1245 tz->cooling_mode = ACPI_THERMAL_MODE_CRITICAL;
1246 }
1247 }
1248 1148
1249 /* Get default polling frequency [_TZP] (optional) */ 1149 /* Get default polling frequency [_TZP] (optional) */
1250 if (tzp) 1150 if (tzp)
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 55a764807499..6e56d5f7c43a 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -107,7 +107,6 @@ acpi_status acpi_ut_create_caches(void)
107 if (ACPI_FAILURE(status)) { 107 if (ACPI_FAILURE(status)) {
108 return (status); 108 return (status);
109 } 109 }
110
111#ifdef ACPI_DBG_TRACK_ALLOCATIONS 110#ifdef ACPI_DBG_TRACK_ALLOCATIONS
112 111
113 /* Memory allocation lists */ 112 /* Memory allocation lists */
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 870f6edeb5f2..285a0f531760 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -45,7 +45,6 @@
45 45
46#define _COMPONENT ACPI_UTILITIES 46#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utcache") 47ACPI_MODULE_NAME("utcache")
48
49#ifdef ACPI_USE_LOCAL_CACHE 48#ifdef ACPI_USE_LOCAL_CACHE
50/******************************************************************************* 49/*******************************************************************************
51 * 50 *
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("utcache")
64acpi_status 63acpi_status
65acpi_os_create_cache(char *cache_name, 64acpi_os_create_cache(char *cache_name,
66 u16 object_size, 65 u16 object_size,
67 u16 max_depth, struct acpi_memory_list **return_cache) 66 u16 max_depth, struct acpi_memory_list ** return_cache)
68{ 67{
69 struct acpi_memory_list *cache; 68 struct acpi_memory_list *cache;
70 69
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 84d529db0a66..4c1e00874dff 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -814,7 +814,9 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
814 /* 814 /*
815 * Create the object array 815 * Create the object array
816 */ 816 */
817 target_object->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.count + 1) * sizeof(void *)); 817 target_object->package.elements =
818 ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.
819 count + 1) * sizeof(void *));
818 if (!target_object->package.elements) { 820 if (!target_object->package.elements) {
819 status = AE_NO_MEMORY; 821 status = AE_NO_MEMORY;
820 goto error_exit; 822 goto error_exit;
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 61ad4f2daee2..c7e128e5369b 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -45,7 +45,6 @@
45 45
46#define _COMPONENT ACPI_UTILITIES 46#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utdebug") 47ACPI_MODULE_NAME("utdebug")
48
49#ifdef ACPI_DEBUG_OUTPUT 48#ifdef ACPI_DEBUG_OUTPUT
50static acpi_thread_id acpi_gbl_prev_thread_id; 49static acpi_thread_id acpi_gbl_prev_thread_id;
51static char *acpi_gbl_fn_entry_str = "----Entry"; 50static char *acpi_gbl_fn_entry_str = "----Entry";
@@ -181,7 +180,8 @@ acpi_ut_debug_print(u32 requested_debug_level,
181 if (ACPI_LV_THREADS & acpi_dbg_level) { 180 if (ACPI_LV_THREADS & acpi_dbg_level) {
182 acpi_os_printf 181 acpi_os_printf
183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n", 182 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
184 (unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id); 183 (unsigned long)acpi_gbl_prev_thread_id,
184 (unsigned long)thread_id);
185 } 185 }
186 186
187 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 673a0caa4073..f777cebdc46d 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -170,6 +170,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
170 acpi_os_delete_mutex(object->mutex.os_mutex); 170 acpi_os_delete_mutex(object->mutex.os_mutex);
171 acpi_gbl_global_lock_mutex = NULL; 171 acpi_gbl_global_lock_mutex = NULL;
172 } else { 172 } else {
173 acpi_ex_unlink_mutex(object);
173 acpi_os_delete_mutex(object->mutex.os_mutex); 174 acpi_os_delete_mutex(object->mutex.os_mutex);
174 } 175 }
175 break; 176 break;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index af33358a964b..1621655d6e2b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -55,12 +55,10 @@ ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
55 * Static global variable initialization. 55 * Static global variable initialization.
56 * 56 *
57 ******************************************************************************/ 57 ******************************************************************************/
58
59/* 58/*
60 * We want the debug switches statically initialized so they 59 * We want the debug switches statically initialized so they
61 * are already set when the debugger is entered. 60 * are already set when the debugger is entered.
62 */ 61 */
63
64/* Debug switch - level and trace mask */ 62/* Debug switch - level and trace mask */
65u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT; 63u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
66 64
@@ -735,5 +733,5 @@ void acpi_ut_init_globals(void)
735} 733}
736 734
737ACPI_EXPORT_SYMBOL(acpi_dbg_level) 735ACPI_EXPORT_SYMBOL(acpi_dbg_level)
738ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 736 ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
739ACPI_EXPORT_SYMBOL(acpi_gpe_count) 737 ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 50133fffe420..2d19f71e9cfa 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -802,9 +802,8 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
802 802
803 valid_digits++; 803 valid_digits++;
804 804
805 if (sign_of0x 805 if (sign_of0x && ((valid_digits > 16)
806 && ((valid_digits > 16) 806 || ((valid_digits > 8) && mode32))) {
807 || ((valid_digits > 8) && mode32))) {
808 /* 807 /*
809 * This is to_integer operation case. 808 * This is to_integer operation case.
810 * No any restrictions for string-to-integer conversion, 809 * No any restrictions for string-to-integer conversion,
@@ -1049,6 +1048,7 @@ acpi_ut_exception(char *module_name,
1049 acpi_os_vprintf(format, args); 1048 acpi_os_vprintf(format, args);
1050 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1049 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
1051} 1050}
1051
1052EXPORT_SYMBOL(acpi_ut_exception); 1052EXPORT_SYMBOL(acpi_ut_exception);
1053 1053
1054void ACPI_INTERNAL_VAR_XFACE 1054void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index cbad2ef5987d..4820bc86d1f5 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -244,7 +244,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
244 244
245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
246 "Thread %lX attempting to acquire Mutex [%s]\n", 246 "Thread %lX attempting to acquire Mutex [%s]\n",
247 (unsigned long) this_thread_id, 247 (unsigned long)this_thread_id,
248 acpi_ut_get_mutex_name(mutex_id))); 248 acpi_ut_get_mutex_name(mutex_id)));
249 249
250 status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, 250 status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
@@ -252,7 +252,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
252 if (ACPI_SUCCESS(status)) { 252 if (ACPI_SUCCESS(status)) {
253 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 253 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
254 "Thread %lX acquired Mutex [%s]\n", 254 "Thread %lX acquired Mutex [%s]\n",
255 (unsigned long) this_thread_id, 255 (unsigned long)this_thread_id,
256 acpi_ut_get_mutex_name(mutex_id))); 256 acpi_ut_get_mutex_name(mutex_id)));
257 257
258 acpi_gbl_mutex_info[mutex_id].use_count++; 258 acpi_gbl_mutex_info[mutex_id].use_count++;
@@ -260,7 +260,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
260 } else { 260 } else {
261 ACPI_EXCEPTION((AE_INFO, status, 261 ACPI_EXCEPTION((AE_INFO, status,
262 "Thread %lX could not acquire Mutex [%X]", 262 "Thread %lX could not acquire Mutex [%X]",
263 (unsigned long) this_thread_id, mutex_id)); 263 (unsigned long)this_thread_id, mutex_id));
264 } 264 }
265 265
266 return (status); 266 return (status);
@@ -287,7 +287,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
287 this_thread_id = acpi_os_get_thread_id(); 287 this_thread_id = acpi_os_get_thread_id();
288 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 288 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
289 "Thread %lX releasing Mutex [%s]\n", 289 "Thread %lX releasing Mutex [%s]\n",
290 (unsigned long) this_thread_id, 290 (unsigned long)this_thread_id,
291 acpi_ut_get_mutex_name(mutex_id))); 291 acpi_ut_get_mutex_name(mutex_id)));
292 292
293 if (mutex_id > ACPI_MAX_MUTEX) { 293 if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index e8fe1ba6cc24..cbbd3315a1e2 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -46,7 +46,6 @@
46 46
47#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utresrc") 48ACPI_MODULE_NAME("utresrc")
49
50#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER) 49#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
51/* 50/*
52 * Strings used to decode resource descriptors. 51 * Strings used to decode resource descriptors.
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index de3276f4f468..e9a57806cd34 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -337,7 +337,6 @@ acpi_status acpi_terminate(void)
337} 337}
338 338
339ACPI_EXPORT_SYMBOL(acpi_terminate) 339ACPI_EXPORT_SYMBOL(acpi_terminate)
340
341#ifdef ACPI_FUTURE_USAGE 340#ifdef ACPI_FUTURE_USAGE
342/******************************************************************************* 341/*******************************************************************************
343 * 342 *
@@ -470,7 +469,6 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
470 469
471ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) 470ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
472#endif /* ACPI_FUTURE_USAGE */ 471#endif /* ACPI_FUTURE_USAGE */
473
474/***************************************************************************** 472/*****************************************************************************
475 * 473 *
476 * FUNCTION: acpi_purge_cached_objects 474 * FUNCTION: acpi_purge_cached_objects
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 45dbdc14915f..f031b8732330 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers" 5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
6 depends on HAS_IOMEM
6 7
7config ATA 8config ATA
8 tristate "ATA device support" 9 tristate "ATA device support"
@@ -435,7 +436,7 @@ config PATA_OPTIDMA
435 help 436 help
436 This option enables DMA/PIO support for the later OPTi 437 This option enables DMA/PIO support for the later OPTi
437 controllers found on some old motherboards and in some 438 controllers found on some old motherboards and in some
438 latops 439 laptops.
439 440
440 If unsure, say N. 441 If unsure, say N.
441 442
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 03a0acff6cfa..cb3eab6e379d 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -489,8 +489,7 @@ static void taskfile_load_raw(struct ata_port *ap,
489 489
490 /* convert gtf to tf */ 490 /* convert gtf to tf */
491 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */ 491 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; /* TBD */
492 tf.protocol = atadev->class == ATA_DEV_ATAPI ? 492 tf.protocol = ATA_PROT_NODATA;
493 ATA_PROT_ATAPI_NODATA : ATA_PROT_NODATA;
494 tf.feature = gtf->tfa[0]; /* 0x1f1 */ 493 tf.feature = gtf->tfa[0]; /* 0x1f1 */
495 tf.nsect = gtf->tfa[1]; /* 0x1f2 */ 494 tf.nsect = gtf->tfa[1]; /* 0x1f2 */
496 tf.lbal = gtf->tfa[2]; /* 0x1f3 */ 495 tf.lbal = gtf->tfa[2]; /* 0x1f3 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a7950885d18e..4595d1f8cf60 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -895,6 +895,7 @@ static u64 ata_read_native_max_address(struct ata_device *dev)
895/** 895/**
896 * ata_set_native_max_address_ext - LBA48 native max set 896 * ata_set_native_max_address_ext - LBA48 native max set
897 * @dev: Device to query 897 * @dev: Device to query
898 * @new_sectors: new max sectors value to set for the device
898 * 899 *
899 * Perform an LBA48 size set max upon the device in question. Return the 900 * Perform an LBA48 size set max upon the device in question. Return the
900 * actual LBA48 size or zero if the command fails. 901 * actual LBA48 size or zero if the command fails.
@@ -932,6 +933,7 @@ static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sector
932/** 933/**
933 * ata_set_native_max_address - LBA28 native max set 934 * ata_set_native_max_address - LBA28 native max set
934 * @dev: Device to query 935 * @dev: Device to query
936 * @new_sectors: new max sectors value to set for the device
935 * 937 *
936 * Perform an LBA28 size set max upon the device in question. Return the 938 * Perform an LBA28 size set max upon the device in question. Return the
937 * actual LBA28 size or zero if the command fails. 939 * actual LBA28 size or zero if the command fails.
@@ -1316,7 +1318,7 @@ void ata_port_flush_task(struct ata_port *ap)
1316 spin_unlock_irqrestore(ap->lock, flags); 1318 spin_unlock_irqrestore(ap->lock, flags);
1317 1319
1318 DPRINTK("flush #1\n"); 1320 DPRINTK("flush #1\n");
1319 flush_workqueue(ata_wq); 1321 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1320 1322
1321 /* 1323 /*
1322 * At this point, if a task is running, it's guaranteed to see 1324 * At this point, if a task is running, it's guaranteed to see
@@ -1327,7 +1329,7 @@ void ata_port_flush_task(struct ata_port *ap)
1327 if (ata_msg_ctl(ap)) 1329 if (ata_msg_ctl(ap))
1328 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", 1330 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1329 __FUNCTION__); 1331 __FUNCTION__);
1330 flush_workqueue(ata_wq); 1332 cancel_work_sync(&ap->port_task.work);
1331 } 1333 }
1332 1334
1333 spin_lock_irqsave(ap->lock, flags); 1335 spin_lock_irqsave(ap->lock, flags);
@@ -6475,9 +6477,9 @@ void ata_port_detach(struct ata_port *ap)
6475 /* Flush hotplug task. The sequence is similar to 6477 /* Flush hotplug task. The sequence is similar to
6476 * ata_port_flush_task(). 6478 * ata_port_flush_task().
6477 */ 6479 */
6478 flush_workqueue(ata_aux_wq); 6480 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6479 cancel_delayed_work(&ap->hotplug_task); 6481 cancel_delayed_work(&ap->hotplug_task);
6480 flush_workqueue(ata_aux_wq); 6482 cancel_work_sync(&ap->hotplug_task.work);
6481 6483
6482 skip_eh: 6484 skip_eh:
6483 /* remove the associated SCSI host */ 6485 /* remove the associated SCSI host */
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 75dc84797ff3..11245e331f77 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -357,6 +357,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
357 PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */ 357 PCMCIA_DEVICE_MANF_CARD(0x000a, 0x0000), /* I-O Data CFA */
358 PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */ 358 PCMCIA_DEVICE_MANF_CARD(0x001c, 0x0001), /* Mitsubishi CFA */
359 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704), 359 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
360 PCMCIA_DEVICE_MANF_CARD(0x0032, 0x2904),
360 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */ 361 PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401), /* SanDisk CFA */
361 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */ 362 PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
362 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d), 363 PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
index 27685ce63ceb..fb8c9e14b8d4 100644
--- a/drivers/ata/pata_qdi.c
+++ b/drivers/ata/pata_qdi.c
@@ -375,7 +375,7 @@ static __init int qdi_init(void)
375 res = inb(port + 3); 375 res = inb(port + 3);
376 if (res & 1) { 376 if (res & 1) {
377 /* Single channel mode */ 377 /* Single channel mode */
378 if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04)) 378 if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
379 ct++; 379 ct++;
380 } else { 380 } else {
381 /* Dual channel mode */ 381 /* Dual channel mode */
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 5df354d573e8..203f463ac39f 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -1142,14 +1142,14 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1142 static int printed_version; 1142 static int printed_version;
1143 unsigned int board_idx = (unsigned int) ent->driver_data; 1143 unsigned int board_idx = (unsigned int) ent->driver_data;
1144 const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL }; 1144 const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
1145 struct device *dev = &pdev->dev; 1145 struct ata_host *host;
1146 int rc; 1146 int rc;
1147 1147
1148 if (!printed_version++) 1148 if (!printed_version++)
1149 dev_printk(KERN_DEBUG, &pdev->dev, 1149 dev_printk(KERN_DEBUG, &pdev->dev,
1150 "version " DRV_VERSION "\n"); 1150 "version " DRV_VERSION "\n");
1151 1151
1152 host = ata_port_alloc_pinfo(&pdev->dev, ppi, 1); 1152 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1153 if (!host) 1153 if (!host)
1154 return -ENOMEM; 1154 return -ENOMEM;
1155 1155
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index e2e795e58236..a097595d4dc7 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -257,6 +257,8 @@ static void nv_adma_port_stop(struct ata_port *ap);
257static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg); 257static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
258static int nv_adma_port_resume(struct ata_port *ap); 258static int nv_adma_port_resume(struct ata_port *ap);
259#endif 259#endif
260static void nv_adma_freeze(struct ata_port *ap);
261static void nv_adma_thaw(struct ata_port *ap);
260static void nv_adma_error_handler(struct ata_port *ap); 262static void nv_adma_error_handler(struct ata_port *ap);
261static void nv_adma_host_stop(struct ata_host *host); 263static void nv_adma_host_stop(struct ata_host *host);
262static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc); 264static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
@@ -444,8 +446,8 @@ static const struct ata_port_operations nv_adma_ops = {
444 .bmdma_status = ata_bmdma_status, 446 .bmdma_status = ata_bmdma_status,
445 .qc_prep = nv_adma_qc_prep, 447 .qc_prep = nv_adma_qc_prep,
446 .qc_issue = nv_adma_qc_issue, 448 .qc_issue = nv_adma_qc_issue,
447 .freeze = nv_ck804_freeze, 449 .freeze = nv_adma_freeze,
448 .thaw = nv_ck804_thaw, 450 .thaw = nv_adma_thaw,
449 .error_handler = nv_adma_error_handler, 451 .error_handler = nv_adma_error_handler,
450 .post_internal_cmd = nv_adma_post_internal_cmd, 452 .post_internal_cmd = nv_adma_post_internal_cmd,
451 .data_xfer = ata_data_xfer, 453 .data_xfer = ata_data_xfer,
@@ -815,8 +817,16 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
815 u16 status; 817 u16 status;
816 u32 gen_ctl; 818 u32 gen_ctl;
817 u32 notifier, notifier_error; 819 u32 notifier, notifier_error;
820
821 /* if ADMA is disabled, use standard ata interrupt handler */
822 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
823 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
824 >> (NV_INT_PORT_SHIFT * i);
825 handled += nv_host_intr(ap, irq_stat);
826 continue;
827 }
818 828
819 /* if in ATA register mode, use standard ata interrupt handler */ 829 /* if in ATA register mode, check for standard interrupts */
820 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 830 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
821 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 831 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
822 >> (NV_INT_PORT_SHIFT * i); 832 >> (NV_INT_PORT_SHIFT * i);
@@ -826,7 +836,6 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
826 command is active, to prevent losing interrupts. */ 836 command is active, to prevent losing interrupts. */
827 irq_stat |= NV_INT_DEV; 837 irq_stat |= NV_INT_DEV;
828 handled += nv_host_intr(ap, irq_stat); 838 handled += nv_host_intr(ap, irq_stat);
829 continue;
830 } 839 }
831 840
832 notifier = readl(mmio + NV_ADMA_NOTIFIER); 841 notifier = readl(mmio + NV_ADMA_NOTIFIER);
@@ -912,22 +921,77 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
912 return IRQ_RETVAL(handled); 921 return IRQ_RETVAL(handled);
913} 922}
914 923
924static void nv_adma_freeze(struct ata_port *ap)
925{
926 struct nv_adma_port_priv *pp = ap->private_data;
927 void __iomem *mmio = pp->ctl_block;
928 u16 tmp;
929
930 nv_ck804_freeze(ap);
931
932 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
933 return;
934
935 /* clear any outstanding CK804 notifications */
936 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
937 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
938
939 /* Disable interrupt */
940 tmp = readw(mmio + NV_ADMA_CTL);
941 writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
942 mmio + NV_ADMA_CTL);
943 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
944}
945
946static void nv_adma_thaw(struct ata_port *ap)
947{
948 struct nv_adma_port_priv *pp = ap->private_data;
949 void __iomem *mmio = pp->ctl_block;
950 u16 tmp;
951
952 nv_ck804_thaw(ap);
953
954 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
955 return;
956
957 /* Enable interrupt */
958 tmp = readw(mmio + NV_ADMA_CTL);
959 writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
960 mmio + NV_ADMA_CTL);
961 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
962}
963
915static void nv_adma_irq_clear(struct ata_port *ap) 964static void nv_adma_irq_clear(struct ata_port *ap)
916{ 965{
917 struct nv_adma_port_priv *pp = ap->private_data; 966 struct nv_adma_port_priv *pp = ap->private_data;
918 void __iomem *mmio = pp->ctl_block; 967 void __iomem *mmio = pp->ctl_block;
919 u16 status = readw(mmio + NV_ADMA_STAT); 968 u32 notifier_clears[2];
920 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
921 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
922 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
923 969
924 /* clear ADMA status */ 970 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
925 writew(status, mmio + NV_ADMA_STAT); 971 ata_bmdma_irq_clear(ap);
926 writel(notifier | notifier_error, 972 return;
927 pp->notifier_clear_block); 973 }
974
975 /* clear any outstanding CK804 notifications */
976 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
977 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
928 978
929 /** clear legacy status */ 979 /* clear ADMA status */
930 iowrite8(ioread8(dma_stat_addr), dma_stat_addr); 980 writew(0xffff, mmio + NV_ADMA_STAT);
981
982 /* clear notifiers - note both ports need to be written with
983 something even though we are only clearing on one */
984 if (ap->port_no == 0) {
985 notifier_clears[0] = 0xFFFFFFFF;
986 notifier_clears[1] = 0;
987 } else {
988 notifier_clears[0] = 0;
989 notifier_clears[1] = 0xFFFFFFFF;
990 }
991 pp = ap->host->ports[0]->private_data;
992 writel(notifier_clears[0], pp->notifier_clear_block);
993 pp = ap->host->ports[1]->private_data;
994 writel(notifier_clears[1], pp->notifier_clear_block);
931} 995}
932 996
933static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) 997static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f56549b90aa6..3a7d9b5332af 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -45,7 +45,7 @@
45#include "sata_promise.h" 45#include "sata_promise.h"
46 46
47#define DRV_NAME "sata_promise" 47#define DRV_NAME "sata_promise"
48#define DRV_VERSION "2.05" 48#define DRV_VERSION "2.07"
49 49
50 50
51enum { 51enum {
@@ -653,6 +653,8 @@ static void pdc_error_intr(struct ata_port *ap, struct ata_queued_cmd *qc,
653 qc->err_mask |= ac_err_mask; 653 qc->err_mask |= ac_err_mask;
654 654
655 pdc_reset_port(ap); 655 pdc_reset_port(ap);
656
657 ata_port_abort(ap);
656} 658}
657 659
658static inline unsigned int pdc_host_intr( struct ata_port *ap, 660static inline unsigned int pdc_host_intr( struct ata_port *ap,
@@ -924,6 +926,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
924 struct ata_host *host; 926 struct ata_host *host;
925 void __iomem *base; 927 void __iomem *base;
926 int n_ports, i, rc; 928 int n_ports, i, rc;
929 int is_sataii_tx4;
927 930
928 if (!printed_version++) 931 if (!printed_version++)
929 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 932 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -962,10 +965,23 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
962 } 965 }
963 host->iomap = pcim_iomap_table(pdev); 966 host->iomap = pcim_iomap_table(pdev);
964 967
965 for (i = 0; i < host->n_ports; i++) 968 is_sataii_tx4 = 0;
969 if ((pi->flags & (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) == (PDC_FLAG_GEN_II|PDC_FLAG_4_PORTS)) {
970 is_sataii_tx4 = 1;
971 dev_printk(KERN_INFO, &pdev->dev, "applying SATAII TX4 port numbering workaround\n");
972 }
973 for (i = 0; i < host->n_ports; i++) {
974 static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
975 int ata_nr;
976
977 ata_nr = i;
978 if (is_sataii_tx4)
979 ata_nr = sataii_tx4_port_remap[i];
980
966 pdc_ata_setup_port(host->ports[i], 981 pdc_ata_setup_port(host->ports[i],
967 base + 0x200 + i * 0x80, 982 base + 0x200 + ata_nr * 0x80,
968 base + 0x400 + i * 0x100); 983 base + 0x400 + ata_nr * 0x100);
984 }
969 985
970 /* initialize adapter */ 986 /* initialize adapter */
971 pdc_host_init(host); 987 pdc_host_init(host);
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 305ab7c68ca5..939c9246fdd1 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -93,6 +93,10 @@ static struct pci_driver svia_pci_driver = {
93 .name = DRV_NAME, 93 .name = DRV_NAME,
94 .id_table = svia_pci_tbl, 94 .id_table = svia_pci_tbl,
95 .probe = svia_init_one, 95 .probe = svia_init_one,
96#ifdef CONFIG_PM
97 .suspend = ata_pci_device_suspend,
98 .resume = ata_pci_device_resume,
99#endif
96 .remove = ata_pci_remove_one, 100 .remove = ata_pci_remove_one,
97}; 101};
98 102
@@ -112,6 +116,10 @@ static struct scsi_host_template svia_sht = {
112 .slave_configure = ata_scsi_slave_config, 116 .slave_configure = ata_scsi_slave_config,
113 .slave_destroy = ata_scsi_slave_destroy, 117 .slave_destroy = ata_scsi_slave_destroy,
114 .bios_param = ata_std_bios_param, 118 .bios_param = ata_std_bios_param,
119#ifdef CONFIG_PM
120 .suspend = ata_scsi_device_suspend,
121 .resume = ata_scsi_device_resume,
122#endif
115}; 123};
116 124
117static const struct ata_port_operations vt6420_sata_ops = { 125static const struct ata_port_operations vt6420_sata_ops = {
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 0300e7f54cc4..2e18a63ead36 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -6,6 +6,7 @@
6# 6#
7 7
8menu "Auxiliary Display support" 8menu "Auxiliary Display support"
9 depends on PARPORT
9 10
10config KS0108 11config KS0108
11 tristate "KS0108 LCD Controller" 12 tristate "KS0108 LCD Controller"
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index e177c9533b6c..e1c0730a3b99 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -101,19 +101,6 @@ static void add_dr(struct device *dev, struct devres_node *node)
101 list_add_tail(&node->entry, &dev->devres_head); 101 list_add_tail(&node->entry, &dev->devres_head);
102} 102}
103 103
104/**
105 * devres_alloc - Allocate device resource data
106 * @release: Release function devres will be associated with
107 * @size: Allocation size
108 * @gfp: Allocation flags
109 *
110 * allocate devres of @size bytes. The allocated area is zeroed, then
111 * associated with @release. The returned pointer can be passed to
112 * other devres_*() functions.
113 *
114 * RETURNS:
115 * Pointer to allocated devres on success, NULL on failure.
116 */
117#ifdef CONFIG_DEBUG_DEVRES 104#ifdef CONFIG_DEBUG_DEVRES
118void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, 105void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
119 const char *name) 106 const char *name)
@@ -128,6 +115,19 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
128} 115}
129EXPORT_SYMBOL_GPL(__devres_alloc); 116EXPORT_SYMBOL_GPL(__devres_alloc);
130#else 117#else
118/**
119 * devres_alloc - Allocate device resource data
120 * @release: Release function devres will be associated with
121 * @size: Allocation size
122 * @gfp: Allocation flags
123 *
124 * Allocate devres of @size bytes. The allocated area is zeroed, then
125 * associated with @release. The returned pointer can be passed to
126 * other devres_*() functions.
127 *
128 * RETURNS:
129 * Pointer to allocated devres on success, NULL on failure.
130 */
131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) 131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
132{ 132{
133 struct devres *dr; 133 struct devres *dr;
@@ -416,7 +416,7 @@ static int release_nodes(struct device *dev, struct list_head *first,
416} 416}
417 417
418/** 418/**
419 * devres_release_all - Release all resources 419 * devres_release_all - Release all managed resources
420 * @dev: Device to release resources for 420 * @dev: Device to release resources for
421 * 421 *
422 * Release all resources associated with @dev. This function is 422 * Release all resources associated with @dev. This function is
@@ -600,7 +600,7 @@ static int devm_kzalloc_match(struct device *dev, void *res, void *data)
600} 600}
601 601
602/** 602/**
603 * devm_kzalloc - Managed kzalloc 603 * devm_kzalloc - Resource-managed kzalloc
604 * @dev: Device to allocate memory for 604 * @dev: Device to allocate memory for
605 * @size: Allocation size 605 * @size: Allocation size
606 * @gfp: Allocation gfp flags 606 * @gfp: Allocation gfp flags
@@ -628,7 +628,7 @@ void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
628EXPORT_SYMBOL_GPL(devm_kzalloc); 628EXPORT_SYMBOL_GPL(devm_kzalloc);
629 629
630/** 630/**
631 * devm_kfree - Managed kfree 631 * devm_kfree - Resource-managed kfree
632 * @dev: Device this memory belongs to 632 * @dev: Device this memory belongs to
633 * @p: Memory to free 633 * @p: Memory to free
634 * 634 *
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index eb84d9d44645..869ff8c00146 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -360,7 +360,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
360 * This function creates a simple platform device that requires minimal 360 * This function creates a simple platform device that requires minimal
361 * resource and memory management. Canned release function freeing 361 * resource and memory management. Canned release function freeing
362 * memory allocated for the device allows drivers using such devices 362 * memory allocated for the device allows drivers using such devices
363 * to be unloaded iwithout waiting for the last reference to the device 363 * to be unloaded without waiting for the last reference to the device
364 * to be dropped. 364 * to be dropped.
365 * 365 *
366 * This interface is primarily intended for use with legacy drivers 366 * This interface is primarily intended for use with legacy drivers
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 067a9e8bc377..8d8cdfec6529 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -126,10 +126,13 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
126 126
127 switch (action) { 127 switch (action) {
128 case CPU_UP_PREPARE: 128 case CPU_UP_PREPARE:
129 case CPU_UP_PREPARE_FROZEN:
129 rc = topology_add_dev(cpu); 130 rc = topology_add_dev(cpu);
130 break; 131 break;
131 case CPU_UP_CANCELED: 132 case CPU_UP_CANCELED:
133 case CPU_UP_CANCELED_FROZEN:
132 case CPU_DEAD: 134 case CPU_DEAD:
135 case CPU_DEAD_FROZEN:
133 topology_remove_dev(cpu); 136 topology_remove_dev(cpu);
134 break; 137 break;
135 } 138 }
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 17ee97f3a99b..b4c8319138b2 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -444,8 +444,6 @@ config CDROM_PKTCDVD_WCACHE
444 this option is dangerous unless the CD-RW media is known good, as we 444 this option is dangerous unless the CD-RW media is known good, as we
445 don't do deferred write error handling yet. 445 don't do deferred write error handling yet.
446 446
447source "drivers/s390/block/Kconfig"
448
449config ATA_OVER_ETH 447config ATA_OVER_ETH
450 tristate "ATA over Ethernet support" 448 tristate "ATA over Ethernet support"
451 depends on NET 449 depends on NET
@@ -453,6 +451,8 @@ config ATA_OVER_ETH
453 This driver provides Support for ATA over Ethernet block 451 This driver provides Support for ATA over Ethernet block
454 devices like the Coraid EtherDrive (R) Storage Blade. 452 devices like the Coraid EtherDrive (R) Storage Blade.
455 453
454source "drivers/s390/block/Kconfig"
455
456endmenu 456endmenu
457 457
458endif 458endif
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index af6d7274a7cc..18cdd8c77626 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -243,17 +243,13 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
243 transfer_result = lo_do_transfer(lo, WRITE, page, offset, 243 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
244 bvec->bv_page, bv_offs, size, IV); 244 bvec->bv_page, bv_offs, size, IV);
245 if (unlikely(transfer_result)) { 245 if (unlikely(transfer_result)) {
246 char *kaddr;
247
248 /* 246 /*
249 * The transfer failed, but we still write the data to 247 * The transfer failed, but we still write the data to
250 * keep prepare/commit calls balanced. 248 * keep prepare/commit calls balanced.
251 */ 249 */
252 printk(KERN_ERR "loop: transfer error block %llu\n", 250 printk(KERN_ERR "loop: transfer error block %llu\n",
253 (unsigned long long)index); 251 (unsigned long long)index);
254 kaddr = kmap_atomic(page, KM_USER0); 252 zero_user_page(page, offset, size, KM_USER0);
255 memset(kaddr + offset, 0, size);
256 kunmap_atomic(kaddr, KM_USER0);
257 } 253 }
258 flush_dcache_page(page); 254 flush_dcache_page(page);
259 ret = aops->commit_write(file, page, offset, 255 ret = aops->commit_write(file, page, offset,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 090796bef78f..069ae39a9cd9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -366,20 +366,25 @@ static struct disk_attribute pid_attr = {
366 .show = pid_show, 366 .show = pid_show,
367}; 367};
368 368
369static void nbd_do_it(struct nbd_device *lo) 369static int nbd_do_it(struct nbd_device *lo)
370{ 370{
371 struct request *req; 371 struct request *req;
372 int ret;
372 373
373 BUG_ON(lo->magic != LO_MAGIC); 374 BUG_ON(lo->magic != LO_MAGIC);
374 375
375 lo->pid = current->pid; 376 lo->pid = current->pid;
376 sysfs_create_file(&lo->disk->kobj, &pid_attr.attr); 377 ret = sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
378 if (ret) {
379 printk(KERN_ERR "nbd: sysfs_create_file failed!");
380 return ret;
381 }
377 382
378 while ((req = nbd_read_stat(lo)) != NULL) 383 while ((req = nbd_read_stat(lo)) != NULL)
379 nbd_end_request(req); 384 nbd_end_request(req);
380 385
381 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr); 386 sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
382 return; 387 return 0;
383} 388}
384 389
385static void nbd_clear_que(struct nbd_device *lo) 390static void nbd_clear_que(struct nbd_device *lo)
@@ -569,7 +574,9 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
569 case NBD_DO_IT: 574 case NBD_DO_IT:
570 if (!lo->file) 575 if (!lo->file)
571 return -EINVAL; 576 return -EINVAL;
572 nbd_do_it(lo); 577 error = nbd_do_it(lo);
578 if (error)
579 return error;
573 /* on return tidy up in case we have a signal */ 580 /* on return tidy up in case we have a signal */
574 /* Forcibly shutdown the socket causing all listeners 581 /* Forcibly shutdown the socket causing all listeners
575 * to error 582 * to error
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 43d4ebcb3b44..a1512da32410 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -151,7 +151,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,
151} 151}
152 152
153/* 153/*
154 * ->writepage to the the blockdev's mapping has to redirty the page so that the 154 * ->writepage to the blockdev's mapping has to redirty the page so that the
155 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM 155 * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM
156 * won't try to (pointlessly) write the page again for a while. 156 * won't try to (pointlessly) write the page again for a while.
157 * 157 *
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 1e32fb834eb8..abcafac64738 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,7 @@ menu "Character devices"
6 6
7config VT 7config VT
8 bool "Virtual terminal" if EMBEDDED 8 bool "Virtual terminal" if EMBEDDED
9 depends on !S390
9 select INPUT 10 select INPUT
10 default y if !VIOCONS 11 default y if !VIOCONS
11 ---help--- 12 ---help---
@@ -81,6 +82,7 @@ config VT_HW_CONSOLE_BINDING
81 82
82config SERIAL_NONSTANDARD 83config SERIAL_NONSTANDARD
83 bool "Non-standard serial port support" 84 bool "Non-standard serial port support"
85 depends on HAS_IOMEM
84 ---help--- 86 ---help---
85 Say Y here if you have any non-standard serial boards -- boards 87 Say Y here if you have any non-standard serial boards -- boards
86 which aren't supported using the standard "dumb" serial driver. 88 which aren't supported using the standard "dumb" serial driver.
@@ -631,7 +633,8 @@ config HVC_CONSOLE
631 633
632config HVC_ISERIES 634config HVC_ISERIES
633 bool "iSeries Hypervisor Virtual Console support" 635 bool "iSeries Hypervisor Virtual Console support"
634 depends on PPC_ISERIES && !VIOCONS 636 depends on PPC_ISERIES
637 default y
635 select HVC_DRIVER 638 select HVC_DRIVER
636 help 639 help
637 iSeries machines support a hypervisor virtual console. 640 iSeries machines support a hypervisor virtual console.
@@ -764,7 +767,7 @@ config NVRAM
764 767
765config RTC 768config RTC
766 tristate "Enhanced Real Time Clock Support" 769 tristate "Enhanced Real Time Clock Support"
767 depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH 770 depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH && !S390
768 ---help--- 771 ---help---
769 If you say Y here and create a character special file /dev/rtc with 772 If you say Y here and create a character special file /dev/rtc with
770 major number 10 and minor number 135 using mknod ("man mknod"), you 773 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -812,7 +815,7 @@ config SGI_IP27_RTC
812 815
813config GEN_RTC 816config GEN_RTC
814 tristate "Generic /dev/rtc emulation" 817 tristate "Generic /dev/rtc emulation"
815 depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV 818 depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV && !S390
816 ---help--- 819 ---help---
817 If you say Y here and create a character special file /dev/rtc with 820 If you say Y here and create a character special file /dev/rtc with
818 major number 10 and minor number 135 using mknod ("man mknod"), you 821 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -857,6 +860,7 @@ config COBALT_LCD
857 860
858config DTLK 861config DTLK
859 tristate "Double Talk PC internal speech card support" 862 tristate "Double Talk PC internal speech card support"
863 depends on ISA
860 help 864 help
861 This driver is for the DoubleTalk PC, a speech synthesizer 865 This driver is for the DoubleTalk PC, a speech synthesizer
862 manufactured by RC Systems (<http://www.rcsys.com/>). It is also 866 manufactured by RC Systems (<http://www.rcsys.com/>). It is also
@@ -1042,7 +1046,7 @@ config HPET_MMAP
1042 1046
1043config HANGCHECK_TIMER 1047config HANGCHECK_TIMER
1044 tristate "Hangcheck timer" 1048 tristate "Hangcheck timer"
1045 depends on X86 || IA64 || PPC64 1049 depends on X86 || IA64 || PPC64 || S390
1046 help 1050 help
1047 The hangcheck-timer module detects when the system has gone 1051 The hangcheck-timer module detects when the system has gone
1048 out to lunch past a certain margin. It can reboot the system 1052 out to lunch past a certain margin. It can reboot the system
@@ -1077,5 +1081,7 @@ config DEVPORT
1077 depends on ISA || PCI 1081 depends on ISA || PCI
1078 default y 1082 default y
1079 1083
1084source "drivers/s390/char/Kconfig"
1085
1080endmenu 1086endmenu
1081 1087
diff --git a/drivers/char/drm/drm_dma.c b/drivers/char/drm/drm_dma.c
index 892db7096986..32ed19c9ec1c 100644
--- a/drivers/char/drm/drm_dma.c
+++ b/drivers/char/drm/drm_dma.c
@@ -65,7 +65,7 @@ int drm_dma_setup(drm_device_t * dev)
65 * \param dev DRM device. 65 * \param dev DRM device.
66 * 66 *
67 * Free all pages associated with DMA buffers, the buffers and pages lists, and 67 * Free all pages associated with DMA buffers, the buffers and pages lists, and
68 * finally the the drm_device::dma structure itself. 68 * finally the drm_device::dma structure itself.
69 */ 69 */
70void drm_dma_takedown(drm_device_t * dev) 70void drm_dma_takedown(drm_device_t * dev)
71{ 71{
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 35540cfb43dd..b5c5b9fa84c3 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -157,7 +157,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
157 * \param address access address. 157 * \param address access address.
158 * \return pointer to the page structure. 158 * \return pointer to the page structure.
159 * 159 *
160 * Get the the mapping, find the real physical page to map, get the page, and 160 * Get the mapping, find the real physical page to map, get the page, and
161 * return it. 161 * return it.
162 */ 162 */
163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index a881f96c983e..ecda760ae8c0 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -293,7 +293,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
293# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0 293# define R300_PVS_CNTL_1_PROGRAM_START_SHIFT 0
294# define R300_PVS_CNTL_1_POS_END_SHIFT 10 294# define R300_PVS_CNTL_1_POS_END_SHIFT 10
295# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20 295# define R300_PVS_CNTL_1_PROGRAM_END_SHIFT 20
296/* Addresses are relative the the vertex program parameters area. */ 296/* Addresses are relative to the vertex program parameters area. */
297#define R300_VAP_PVS_CNTL_2 0x22D4 297#define R300_VAP_PVS_CNTL_2 0x22D4
298# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0 298# define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
299# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16 299# define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT 16
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 49f914e79216..9e1fc02967ff 100644
--- a/drivers/char/genrtc.c
+++ b/drivers/char/genrtc.c
@@ -12,7 +12,7 @@
12 * 12 *
13 * This driver allows use of the real time clock (built into 13 * This driver allows use of the real time clock (built into
14 * nearly all computers) from user space. It exports the /dev/rtc 14 * nearly all computers) from user space. It exports the /dev/rtc
15 * interface supporting various ioctl() and also the /proc/dev/rtc 15 * interface supporting various ioctl() and also the /proc/driver/rtc
16 * pseudo-file for status information. 16 * pseudo-file for status information.
17 * 17 *
18 * The ioctls can be used to set the interrupt behaviour where 18 * The ioctls can be used to set the interrupt behaviour where
@@ -377,7 +377,7 @@ static int gen_rtc_release(struct inode *inode, struct file *file)
377#ifdef CONFIG_PROC_FS 377#ifdef CONFIG_PROC_FS
378 378
379/* 379/*
380 * Info exported via "/proc/rtc". 380 * Info exported via "/proc/driver/rtc".
381 */ 381 */
382 382
383static int gen_rtc_proc_output(char *buf) 383static int gen_rtc_proc_output(char *buf)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 5f3acd8e64b8..7cda04b33534 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -91,3 +91,17 @@ config HW_RANDOM_OMAP
91 module will be called omap-rng. 91 module will be called omap-rng.
92 92
93 If unsure, say Y. 93 If unsure, say Y.
94
95config HW_RANDOM_PASEMI
96 tristate "PA Semi HW Random Number Generator support"
97 depends on HW_RANDOM && PPC_PASEMI
98 default HW_RANDOM
99 ---help---
100 This driver provides kernel-side support for the Random Number
101 Generator hardware found on PA6T-1682M processor.
102
103 To compile this driver as a module, choose M here: the
104 module will be called pasemi-rng.
105
106 If unsure, say Y.
107
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index c41fa19454e3..c8b7300e2fb1 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 10obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 11obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 12obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
13obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
new file mode 100644
index 000000000000..fa6040b6c8f2
--- /dev/null
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Maintained by: Olof Johansson <olof@lixom.net>
5 *
6 * Driver for the PWRficient onchip rng
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/platform_device.h>
25#include <linux/hw_random.h>
26#include <asm/of_platform.h>
27#include <asm/io.h>
28
29#define SDCRNG_CTL_REG 0x00
30#define SDCRNG_CTL_FVLD_M 0x0000f000
31#define SDCRNG_CTL_FVLD_S 12
32#define SDCRNG_CTL_KSZ 0x00000800
33#define SDCRNG_CTL_RSRC_CRG 0x00000010
34#define SDCRNG_CTL_RSRC_RRG 0x00000000
35#define SDCRNG_CTL_CE 0x00000004
36#define SDCRNG_CTL_RE 0x00000002
37#define SDCRNG_CTL_DR 0x00000001
38#define SDCRNG_CTL_SELECT_RRG_RNG (SDCRNG_CTL_RE | SDCRNG_CTL_RSRC_RRG)
39#define SDCRNG_CTL_SELECT_CRG_RNG (SDCRNG_CTL_CE | SDCRNG_CTL_RSRC_CRG)
40#define SDCRNG_VAL_REG 0x20
41
42#define MODULE_NAME "pasemi_rng"
43
44static int pasemi_rng_data_present(struct hwrng *rng)
45{
46 void __iomem *rng_regs = (void __iomem *)rng->priv;
47
48 return (in_le32(rng_regs + SDCRNG_CTL_REG)
49 & SDCRNG_CTL_FVLD_M) ? 1 : 0;
50}
51
52static int pasemi_rng_data_read(struct hwrng *rng, u32 *data)
53{
54 void __iomem *rng_regs = (void __iomem *)rng->priv;
55 *data = in_le32(rng_regs + SDCRNG_VAL_REG);
56 return 4;
57}
58
59static int pasemi_rng_init(struct hwrng *rng)
60{
61 void __iomem *rng_regs = (void __iomem *)rng->priv;
62 u32 ctl;
63
64 ctl = SDCRNG_CTL_DR | SDCRNG_CTL_SELECT_RRG_RNG | SDCRNG_CTL_KSZ;
65 out_le32(rng_regs + SDCRNG_CTL_REG, ctl);
66 out_le32(rng_regs + SDCRNG_CTL_REG, ctl & ~SDCRNG_CTL_DR);
67
68 return 0;
69}
70
71static void pasemi_rng_cleanup(struct hwrng *rng)
72{
73 void __iomem *rng_regs = (void __iomem *)rng->priv;
74 u32 ctl;
75
76 ctl = SDCRNG_CTL_RE | SDCRNG_CTL_CE;
77 out_le32(rng_regs + SDCRNG_CTL_REG,
78 in_le32(rng_regs + SDCRNG_CTL_REG) & ~ctl);
79}
80
81static struct hwrng pasemi_rng = {
82 .name = MODULE_NAME,
83 .init = pasemi_rng_init,
84 .cleanup = pasemi_rng_cleanup,
85 .data_present = pasemi_rng_data_present,
86 .data_read = pasemi_rng_data_read,
87};
88
89static int __devinit rng_probe(struct of_device *ofdev,
90 const struct of_device_id *match)
91{
92 void __iomem *rng_regs;
93 struct device_node *rng_np = ofdev->node;
94 struct resource res;
95 int err = 0;
96
97 err = of_address_to_resource(rng_np, 0, &res);
98 if (err)
99 return -ENODEV;
100
101 rng_regs = ioremap(res.start, 0x100);
102
103 if (!rng_regs)
104 return -ENOMEM;
105
106 pasemi_rng.priv = (unsigned long)rng_regs;
107
108 printk(KERN_INFO "Registering PA Semi RNG\n");
109
110 err = hwrng_register(&pasemi_rng);
111
112 if (err)
113 iounmap(rng_regs);
114
115 return err;
116}
117
118static int __devexit rng_remove(struct of_device *dev)
119{
120 void __iomem *rng_regs = (void __iomem *)pasemi_rng.priv;
121
122 hwrng_unregister(&pasemi_rng);
123 iounmap(rng_regs);
124
125 return 0;
126}
127
128static struct of_device_id rng_match[] = {
129 {
130 .compatible = "1682m-rng",
131 },
132 {},
133};
134
135static struct of_platform_driver rng_driver = {
136 .name = "pasemi-rng",
137 .match_table = rng_match,
138 .probe = rng_probe,
139 .remove = rng_remove,
140};
141
142static int __init rng_init(void)
143{
144 return of_register_platform_driver(&rng_driver);
145}
146module_init(rng_init);
147
148static void __exit rng_exit(void)
149{
150 of_unregister_platform_driver(&rng_driver);
151}
152module_exit(rng_exit);
153
154MODULE_LICENSE("GPL");
155MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
156MODULE_DESCRIPTION("H/W RNG driver for PA Semi processor");
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index a6dcb2918157..b894f67fdf14 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -3,6 +3,8 @@
3# 3#
4 4
5menu "IPMI" 5menu "IPMI"
6 depends on HAS_IOMEM
7
6config IPMI_HANDLER 8config IPMI_HANDLER
7 tristate 'IPMI top-level message handler' 9 tristate 'IPMI top-level message handler'
8 help 10 help
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index c09160383a53..6e55cfb9c65a 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -705,15 +705,13 @@ static int __init mmtimer_init(void)
705 maxn++; 705 maxn++;
706 706
707 /* Allocate list of node ptrs to mmtimer_t's */ 707 /* Allocate list of node ptrs to mmtimer_t's */
708 timers = kmalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); 708 timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
709 if (timers == NULL) { 709 if (timers == NULL) {
710 printk(KERN_ERR "%s: failed to allocate memory for device\n", 710 printk(KERN_ERR "%s: failed to allocate memory for device\n",
711 MMTIMER_NAME); 711 MMTIMER_NAME);
712 goto out3; 712 goto out3;
713 } 713 }
714 714
715 memset(timers,0,(sizeof(mmtimer_t *)*maxn));
716
717 /* Allocate mmtimer_t's for each online node */ 715 /* Allocate mmtimer_t's for each online node */
718 for_each_online_node(node) { 716 for_each_online_node(node) {
719 timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); 717 timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 27c1179ee527..f25facd97bb4 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -21,6 +21,7 @@ config SYNCLINK_CS
21config CARDMAN_4000 21config CARDMAN_4000
22 tristate "Omnikey Cardman 4000 support" 22 tristate "Omnikey Cardman 4000 support"
23 depends on PCMCIA 23 depends on PCMCIA
24 select BITREVERSE
24 help 25 help
25 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard 26 Enable support for the Omnikey Cardman 4000 PCMCIA Smartcard
26 reader. 27 reader.
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index e91b43a014b0..fee58e03dbe2 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/bitrev.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/io.h> 36#include <asm/io.h>
36 37
@@ -194,41 +195,17 @@ static inline unsigned char xinb(unsigned short port)
194} 195}
195#endif 196#endif
196 197
197#define b_0000 15 198static inline unsigned char invert_revert(unsigned char ch)
198#define b_0001 14 199{
199#define b_0010 13 200 return bitrev8(~ch);
200#define b_0011 12 201}
201#define b_0100 11
202#define b_0101 10
203#define b_0110 9
204#define b_0111 8
205#define b_1000 7
206#define b_1001 6
207#define b_1010 5
208#define b_1011 4
209#define b_1100 3
210#define b_1101 2
211#define b_1110 1
212#define b_1111 0
213
214static unsigned char irtab[16] = {
215 b_0000, b_1000, b_0100, b_1100,
216 b_0010, b_1010, b_0110, b_1110,
217 b_0001, b_1001, b_0101, b_1101,
218 b_0011, b_1011, b_0111, b_1111
219};
220 202
221static void str_invert_revert(unsigned char *b, int len) 203static void str_invert_revert(unsigned char *b, int len)
222{ 204{
223 int i; 205 int i;
224 206
225 for (i = 0; i < len; i++) 207 for (i = 0; i < len; i++)
226 b[i] = (irtab[b[i] & 0x0f] << 4) | irtab[b[i] >> 4]; 208 b[i] = invert_revert(b[i]);
227}
228
229static unsigned char invert_revert(unsigned char ch)
230{
231 return (irtab[ch & 0x0f] << 4) | irtab[ch >> 4];
232} 209}
233 210
234#define ATRLENCK(dev,pos) \ 211#define ATRLENCK(dev,pos) \
@@ -1114,7 +1091,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
1114 /* 1091 /*
1115 * wait for atr to become valid. 1092 * wait for atr to become valid.
1116 * note: it is important to lock this code. if we dont, the monitor 1093 * note: it is important to lock this code. if we dont, the monitor
1117 * could be run between test_bit and the the call the sleep on the 1094 * could be run between test_bit and the call to sleep on the
1118 * atr-queue. if *then* the monitor detects atr valid, it will wake up 1095 * atr-queue. if *then* the monitor detects atr valid, it will wake up
1119 * any process on the atr-queue, *but* since we have been interrupted, 1096 * any process on the atr-queue, *but* since we have been interrupted,
1120 * we do not yet sleep on this queue. this would result in a missed 1097 * we do not yet sleep on this queue. this would result in a missed
@@ -1881,8 +1858,11 @@ static int cm4000_probe(struct pcmcia_device *link)
1881 init_waitqueue_head(&dev->readq); 1858 init_waitqueue_head(&dev->readq);
1882 1859
1883 ret = cm4000_config(link, i); 1860 ret = cm4000_config(link, i);
1884 if (ret) 1861 if (ret) {
1862 dev_table[i] = NULL;
1863 kfree(dev);
1885 return ret; 1864 return ret;
1865 }
1886 1866
1887 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL, 1867 class_device_create(cmm_class, NULL, MKDEV(major, i), NULL,
1888 "cmm%d", i); 1868 "cmm%d", i);
@@ -1907,7 +1887,7 @@ static void cm4000_detach(struct pcmcia_device *link)
1907 cm4000_release(link); 1887 cm4000_release(link);
1908 1888
1909 dev_table[devno] = NULL; 1889 dev_table[devno] = NULL;
1910 kfree(dev); 1890 kfree(dev);
1911 1891
1912 class_device_destroy(cmm_class, MKDEV(major, devno)); 1892 class_device_destroy(cmm_class, MKDEV(major, devno));
1913 1893
@@ -1956,12 +1936,14 @@ static int __init cmm_init(void)
1956 if (major < 0) { 1936 if (major < 0) {
1957 printk(KERN_WARNING MODULE_NAME 1937 printk(KERN_WARNING MODULE_NAME
1958 ": could not get major number\n"); 1938 ": could not get major number\n");
1939 class_destroy(cmm_class);
1959 return major; 1940 return major;
1960 } 1941 }
1961 1942
1962 rc = pcmcia_register_driver(&cm4000_driver); 1943 rc = pcmcia_register_driver(&cm4000_driver);
1963 if (rc < 0) { 1944 if (rc < 0) {
1964 unregister_chrdev(major, DEVICE_NAME); 1945 unregister_chrdev(major, DEVICE_NAME);
1946 class_destroy(cmm_class);
1965 return rc; 1947 return rc;
1966 } 1948 }
1967 1949
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index f2e4ec4fd407..af88181a17f4 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -636,8 +636,11 @@ static int reader_probe(struct pcmcia_device *link)
636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0); 636 setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
637 637
638 ret = reader_config(link, i); 638 ret = reader_config(link, i);
639 if (ret) 639 if (ret) {
640 dev_table[i] = NULL;
641 kfree(dev);
640 return ret; 642 return ret;
643 }
641 644
642 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL, 645 class_device_create(cmx_class, NULL, MKDEV(major, i), NULL,
643 "cmx%d", i); 646 "cmx%d", i);
@@ -708,12 +711,14 @@ static int __init cm4040_init(void)
708 if (major < 0) { 711 if (major < 0) {
709 printk(KERN_WARNING MODULE_NAME 712 printk(KERN_WARNING MODULE_NAME
710 ": could not get major number\n"); 713 ": could not get major number\n");
714 class_destroy(cmx_class);
711 return major; 715 return major;
712 } 716 }
713 717
714 rc = pcmcia_register_driver(&reader_driver); 718 rc = pcmcia_register_driver(&reader_driver);
715 if (rc < 0) { 719 if (rc < 0) {
716 unregister_chrdev(major, DEVICE_NAME); 720 unregister_chrdev(major, DEVICE_NAME);
721 class_destroy(cmx_class);
717 return rc; 722 return rc;
718 } 723 }
719 724
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fe00c7dfb649..dc4e1ff7f56f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "TPM devices" 5menu "TPM devices"
6 depends on HAS_IOMEM
6 7
7config TCG_TPM 8config TCG_TPM
8 tristate "TPM Hardware Support" 9 tristate "TPM Hardware Support"
@@ -33,7 +34,7 @@ config TCG_NSC
33 tristate "National Semiconductor TPM Interface" 34 tristate "National Semiconductor TPM Interface"
34 depends on TCG_TPM && PNPACPI 35 depends on TCG_TPM && PNPACPI
35 ---help--- 36 ---help---
36 If you have a TPM security chip from National Semicondutor 37 If you have a TPM security chip from National Semiconductor
37 say Yes and it will be accessible from within Linux. To 38 say Yes and it will be accessible from within Linux. To
38 compile this driver as a module, choose M here; the module 39 compile this driver as a module, choose M here; the module
39 will be called tpm_nsc. 40 will be called tpm_nsc.
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 7710a6a77d97..fc662e4ce58a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -934,13 +934,6 @@ restart:
934 return -EINVAL; 934 return -EINVAL;
935 935
936 /* 936 /*
937 * No more input please, we are switching. The new ldisc
938 * will update this value in the ldisc open function
939 */
940
941 tty->receive_room = 0;
942
943 /*
944 * Problem: What do we do if this blocks ? 937 * Problem: What do we do if this blocks ?
945 */ 938 */
946 939
@@ -951,6 +944,13 @@ restart:
951 return 0; 944 return 0;
952 } 945 }
953 946
947 /*
948 * No more input please, we are switching. The new ldisc
949 * will update this value in the ldisc open function
950 */
951
952 tty->receive_room = 0;
953
954 o_ldisc = tty->ldisc; 954 o_ldisc = tty->ldisc;
955 o_tty = tty->link; 955 o_tty = tty->link;
956 956
@@ -1573,11 +1573,11 @@ void no_tty(void)
1573 1573
1574 1574
1575/** 1575/**
1576 * stop_tty - propogate flow control 1576 * stop_tty - propagate flow control
1577 * @tty: tty to stop 1577 * @tty: tty to stop
1578 * 1578 *
1579 * Perform flow control to the driver. For PTY/TTY pairs we 1579 * Perform flow control to the driver. For PTY/TTY pairs we
1580 * must also propogate the TIOCKPKT status. May be called 1580 * must also propagate the TIOCKPKT status. May be called
1581 * on an already stopped device and will not re-call the driver 1581 * on an already stopped device and will not re-call the driver
1582 * method. 1582 * method.
1583 * 1583 *
@@ -1607,11 +1607,11 @@ void stop_tty(struct tty_struct *tty)
1607EXPORT_SYMBOL(stop_tty); 1607EXPORT_SYMBOL(stop_tty);
1608 1608
1609/** 1609/**
1610 * start_tty - propogate flow control 1610 * start_tty - propagate flow control
1611 * @tty: tty to start 1611 * @tty: tty to start
1612 * 1612 *
1613 * Start a tty that has been stopped if at all possible. Perform 1613 * Start a tty that has been stopped if at all possible. Perform
1614 * any neccessary wakeups and propogate the TIOCPKT status. If this 1614 * any neccessary wakeups and propagate the TIOCPKT status. If this
1615 * is the tty was previous stopped and is being started then the 1615 * is the tty was previous stopped and is being started then the
1616 * driver start method is invoked and the line discipline woken. 1616 * driver start method is invoked and the line discipline woken.
1617 * 1617 *
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 893dbaf386fb..eb37fba9b7ef 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1685,9 +1685,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1685 if (sys_dev) { 1685 if (sys_dev) {
1686 switch (action) { 1686 switch (action) {
1687 case CPU_ONLINE: 1687 case CPU_ONLINE:
1688 case CPU_ONLINE_FROZEN:
1688 cpufreq_add_dev(sys_dev); 1689 cpufreq_add_dev(sys_dev);
1689 break; 1690 break;
1690 case CPU_DOWN_PREPARE: 1691 case CPU_DOWN_PREPARE:
1692 case CPU_DOWN_PREPARE_FROZEN:
1691 if (unlikely(lock_policy_rwsem_write(cpu))) 1693 if (unlikely(lock_policy_rwsem_write(cpu)))
1692 BUG(); 1694 BUG();
1693 1695
@@ -1699,6 +1701,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1699 __cpufreq_remove_dev(sys_dev); 1701 __cpufreq_remove_dev(sys_dev);
1700 break; 1702 break;
1701 case CPU_DOWN_FAILED: 1703 case CPU_DOWN_FAILED:
1704 case CPU_DOWN_FAILED_FROZEN:
1702 cpufreq_add_dev(sys_dev); 1705 cpufreq_add_dev(sys_dev);
1703 break; 1706 break;
1704 } 1707 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d1c7cac9316c..d2f0cbd8b8f3 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -313,9 +313,11 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
313 313
314 switch (action) { 314 switch (action) {
315 case CPU_ONLINE: 315 case CPU_ONLINE:
316 case CPU_ONLINE_FROZEN:
316 cpufreq_update_policy(cpu); 317 cpufreq_update_policy(cpu);
317 break; 318 break;
318 case CPU_DEAD: 319 case CPU_DEAD:
320 case CPU_DEAD_FROZEN:
319 cpufreq_stats_free_table(cpu); 321 cpufreq_stats_free_table(cpu);
320 break; 322 break;
321 } 323 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f21fe66c9eef..e678a33ea672 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,9 +51,31 @@ config CRYPTO_DEV_GEODE
51 default m 51 default m
52 help 52 help
53 Say 'Y' here to use the AMD Geode LX processor on-board AES 53 Say 'Y' here to use the AMD Geode LX processor on-board AES
54 engine for the CryptoAPI AES alogrithm. 54 engine for the CryptoAPI AES algorithm.
55 55
56 To compile this driver as a module, choose M here: the module 56 To compile this driver as a module, choose M here: the module
57 will be called geode-aes. 57 will be called geode-aes.
58 58
59config ZCRYPT
60 tristate "Support for PCI-attached cryptographic adapters"
61 depends on S390
62 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
63 default "m"
64 help
65 Select this option if you want to use a PCI-attached cryptographic
66 adapter like:
67 + PCI Cryptographic Accelerator (PCICA)
68 + PCI Cryptographic Coprocessor (PCICC)
69 + PCI-X Cryptographic Coprocessor (PCIXCC)
70 + Crypto Express2 Coprocessor (CEX2C)
71 + Crypto Express2 Accelerator (CEX2A)
72
73config ZCRYPT_MONOLITHIC
74 bool "Monolithic zcrypt module"
75 depends on ZCRYPT="m"
76 help
77 Select this option if you want to have a single module z90crypt.ko
78 that contains all parts of the crypto device driver (ap bus,
79 request router and all the card drivers).
80
59endmenu 81endmenu
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 30d021d1a07c..72be6c63edfc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "DMA Engine support" 5menu "DMA Engine support"
6 depends on !S390
6 7
7config DMA_ENGINE 8config DMA_ENGINE
8 bool "Support for DMA engines" 9 bool "Support for DMA engines"
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 4f0898400c6d..807c402df049 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,6 +7,7 @@
7# 7#
8 8
9menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)' 9menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)'
10 depends on HAS_IOMEM
10 11
11config EDAC 12config EDAC
12 tristate "EDAC core system error reporting (EXPERIMENTAL)" 13 tristate "EDAC core system error reporting (EXPERIMENTAL)"
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
new file mode 100644
index 000000000000..5932c72f9e42
--- /dev/null
+++ b/drivers/firewire/Kconfig
@@ -0,0 +1,61 @@
1# -*- shell-script -*-
2
3comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
4 depends on EXPERIMENTAL=n
5
6config FIREWIRE
7 tristate "IEEE 1394 (FireWire) support (JUJU alternative stack, experimental)"
8 depends on EXPERIMENTAL
9 select CRC_ITU_T
10 help
11 IEEE 1394 describes a high performance serial bus, which is also
12 known as FireWire(tm) or i.Link(tm) and is used for connecting all
13 sorts of devices (most notably digital video cameras) to your
14 computer.
15
16 If you have FireWire hardware and want to use it, say Y here. This
17 is the core support only, you will also need to select a driver for
18 your IEEE 1394 adapter.
19
20 To compile this driver as a module, say M here: the module will be
21 called fw-core.
22
23 This is the "JUJU" FireWire stack, an alternative implementation
24 designed for robustness and simplicity. You can build either this
25 stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
26 or both.
27
28config FIREWIRE_OHCI
29 tristate "Support for OHCI FireWire host controllers"
30 depends on PCI && FIREWIRE
31 help
32 Enable this driver if you have a FireWire controller based
33 on the OHCI specification. For all practical purposes, this
34 is the only chipset in use, so say Y here.
35
36 To compile this driver as a module, say M here: The module will be
37 called fw-ohci.
38
39 If you also build ohci1394 of the classic IEEE 1394 driver stack,
40 blacklist either ohci1394 or fw-ohci to let hotplug load the desired
41 driver.
42
43config FIREWIRE_SBP2
44 tristate "Support for storage devices (SBP-2 protocol driver)"
45 depends on FIREWIRE && SCSI
46 help
47 This option enables you to use SBP-2 devices connected to a
48 FireWire bus. SBP-2 devices include storage devices like
49 harddisks and DVD drives, also some other FireWire devices
50 like scanners.
51
52 To compile this driver as a module, say M here: The module will be
53 called fw-sbp2.
54
55 You should also enable support for disks, CD-ROMs, etc. in the SCSI
56 configuration section.
57
58 If you also build sbp2 of the classic IEEE 1394 driver stack,
59 blacklist either sbp2 or fw-sbp2 to let hotplug load the desired
60 driver.
61
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
new file mode 100644
index 000000000000..fc7d59d4bce0
--- /dev/null
+++ b/drivers/firewire/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Linux IEEE 1394 implementation
3#
4
5fw-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
6 fw-device.o fw-cdev.o
7
8obj-$(CONFIG_FIREWIRE) += fw-core.o
9obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o
10obj-$(CONFIG_FIREWIRE_SBP2) += fw-sbp2.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
new file mode 100644
index 000000000000..636151a64add
--- /dev/null
+++ b/drivers/firewire/fw-card.c
@@ -0,0 +1,560 @@
1/*
2 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/device.h>
22#include <linux/mutex.h>
23#include <linux/crc-itu-t.h>
24#include "fw-transaction.h"
25#include "fw-topology.h"
26#include "fw-device.h"
27
28int fw_compute_block_crc(u32 *block)
29{
30 __be32 be32_block[256];
31 int i, length;
32
33 length = (*block >> 16) & 0xff;
34 for (i = 0; i < length; i++)
35 be32_block[i] = cpu_to_be32(block[i + 1]);
36 *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
37
38 return length;
39}
40
41static DEFINE_MUTEX(card_mutex);
42static LIST_HEAD(card_list);
43
44static LIST_HEAD(descriptor_list);
45static int descriptor_count;
46
47#define BIB_CRC(v) ((v) << 0)
48#define BIB_CRC_LENGTH(v) ((v) << 16)
49#define BIB_INFO_LENGTH(v) ((v) << 24)
50
51#define BIB_LINK_SPEED(v) ((v) << 0)
52#define BIB_GENERATION(v) ((v) << 4)
53#define BIB_MAX_ROM(v) ((v) << 8)
54#define BIB_MAX_RECEIVE(v) ((v) << 12)
55#define BIB_CYC_CLK_ACC(v) ((v) << 16)
56#define BIB_PMC ((1) << 27)
57#define BIB_BMC ((1) << 28)
58#define BIB_ISC ((1) << 29)
59#define BIB_CMC ((1) << 30)
60#define BIB_IMC ((1) << 31)
61
62static u32 *
63generate_config_rom(struct fw_card *card, size_t *config_rom_length)
64{
65 struct fw_descriptor *desc;
66 static u32 config_rom[256];
67 int i, j, length;
68
69 /*
70 * Initialize contents of config rom buffer. On the OHCI
71 * controller, block reads to the config rom accesses the host
72 * memory, but quadlet read access the hardware bus info block
73 * registers. That's just crack, but it means we should make
74 * sure the contents of bus info block in host memory mathces
75 * the version stored in the OHCI registers.
76 */
77
78 memset(config_rom, 0, sizeof(config_rom));
79 config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
80 config_rom[1] = 0x31333934;
81
82 config_rom[2] =
83 BIB_LINK_SPEED(card->link_speed) |
84 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
85 BIB_MAX_ROM(2) |
86 BIB_MAX_RECEIVE(card->max_receive) |
87 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
88 config_rom[3] = card->guid >> 32;
89 config_rom[4] = card->guid;
90
91 /* Generate root directory. */
92 i = 5;
93 config_rom[i++] = 0;
94 config_rom[i++] = 0x0c0083c0; /* node capabilities */
95 j = i + descriptor_count;
96
97 /* Generate root directory entries for descriptors. */
98 list_for_each_entry (desc, &descriptor_list, link) {
99 if (desc->immediate > 0)
100 config_rom[i++] = desc->immediate;
101 config_rom[i] = desc->key | (j - i);
102 i++;
103 j += desc->length;
104 }
105
106 /* Update root directory length. */
107 config_rom[5] = (i - 5 - 1) << 16;
108
109 /* End of root directory, now copy in descriptors. */
110 list_for_each_entry (desc, &descriptor_list, link) {
111 memcpy(&config_rom[i], desc->data, desc->length * 4);
112 i += desc->length;
113 }
114
115 /* Calculate CRCs for all blocks in the config rom. This
116 * assumes that CRC length and info length are identical for
117 * the bus info block, which is always the case for this
118 * implementation. */
119 for (i = 0; i < j; i += length + 1)
120 length = fw_compute_block_crc(config_rom + i);
121
122 *config_rom_length = j;
123
124 return config_rom;
125}
126
127static void
128update_config_roms(void)
129{
130 struct fw_card *card;
131 u32 *config_rom;
132 size_t length;
133
134 list_for_each_entry (card, &card_list, link) {
135 config_rom = generate_config_rom(card, &length);
136 card->driver->set_config_rom(card, config_rom, length);
137 }
138}
139
140int
141fw_core_add_descriptor(struct fw_descriptor *desc)
142{
143 size_t i;
144
145 /*
146 * Check descriptor is valid; the length of all blocks in the
147 * descriptor has to add up to exactly the length of the
148 * block.
149 */
150 i = 0;
151 while (i < desc->length)
152 i += (desc->data[i] >> 16) + 1;
153
154 if (i != desc->length)
155 return -EINVAL;
156
157 mutex_lock(&card_mutex);
158
159 list_add_tail(&desc->link, &descriptor_list);
160 descriptor_count++;
161 if (desc->immediate > 0)
162 descriptor_count++;
163 update_config_roms();
164
165 mutex_unlock(&card_mutex);
166
167 return 0;
168}
169EXPORT_SYMBOL(fw_core_add_descriptor);
170
171void
172fw_core_remove_descriptor(struct fw_descriptor *desc)
173{
174 mutex_lock(&card_mutex);
175
176 list_del(&desc->link);
177 descriptor_count--;
178 if (desc->immediate > 0)
179 descriptor_count--;
180 update_config_roms();
181
182 mutex_unlock(&card_mutex);
183}
184EXPORT_SYMBOL(fw_core_remove_descriptor);
185
186static const char gap_count_table[] = {
187 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
188};
189
190struct bm_data {
191 struct fw_transaction t;
192 struct {
193 __be32 arg;
194 __be32 data;
195 } lock;
196 u32 old;
197 int rcode;
198 struct completion done;
199};
200
201static void
202complete_bm_lock(struct fw_card *card, int rcode,
203 void *payload, size_t length, void *data)
204{
205 struct bm_data *bmd = data;
206
207 if (rcode == RCODE_COMPLETE)
208 bmd->old = be32_to_cpu(*(__be32 *) payload);
209 bmd->rcode = rcode;
210 complete(&bmd->done);
211}
212
213static void
214fw_card_bm_work(struct work_struct *work)
215{
216 struct fw_card *card = container_of(work, struct fw_card, work.work);
217 struct fw_device *root;
218 struct bm_data bmd;
219 unsigned long flags;
220 int root_id, new_root_id, irm_id, gap_count, generation, grace;
221 int do_reset = 0;
222
223 spin_lock_irqsave(&card->lock, flags);
224
225 generation = card->generation;
226 root = card->root_node->data;
227 root_id = card->root_node->node_id;
228 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
229
230 if (card->bm_generation + 1 == generation ||
231 (card->bm_generation != generation && grace)) {
232 /*
233 * This first step is to figure out who is IRM and
234 * then try to become bus manager. If the IRM is not
235 * well defined (e.g. does not have an active link
236 * layer or does not responds to our lock request, we
237 * will have to do a little vigilante bus management.
238 * In that case, we do a goto into the gap count logic
239 * so that when we do the reset, we still optimize the
240 * gap count. That could well save a reset in the
241 * next generation.
242 */
243
244 irm_id = card->irm_node->node_id;
245 if (!card->irm_node->link_on) {
246 new_root_id = card->local_node->node_id;
247 fw_notify("IRM has link off, making local node (%02x) root.\n",
248 new_root_id);
249 goto pick_me;
250 }
251
252 bmd.lock.arg = cpu_to_be32(0x3f);
253 bmd.lock.data = cpu_to_be32(card->local_node->node_id);
254
255 spin_unlock_irqrestore(&card->lock, flags);
256
257 init_completion(&bmd.done);
258 fw_send_request(card, &bmd.t, TCODE_LOCK_COMPARE_SWAP,
259 irm_id, generation,
260 SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
261 &bmd.lock, sizeof(bmd.lock),
262 complete_bm_lock, &bmd);
263 wait_for_completion(&bmd.done);
264
265 if (bmd.rcode == RCODE_GENERATION) {
266 /*
267 * Another bus reset happened. Just return,
268 * the BM work has been rescheduled.
269 */
270 return;
271 }
272
273 if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
274 /* Somebody else is BM, let them do the work. */
275 return;
276
277 spin_lock_irqsave(&card->lock, flags);
278 if (bmd.rcode != RCODE_COMPLETE) {
279 /*
280 * The lock request failed, maybe the IRM
281 * isn't really IRM capable after all. Let's
282 * do a bus reset and pick the local node as
283 * root, and thus, IRM.
284 */
285 new_root_id = card->local_node->node_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id);
288 goto pick_me;
289 }
290 } else if (card->bm_generation != generation) {
291 /*
292 * OK, we weren't BM in the last generation, and it's
293 * less than 100ms since last bus reset. Reschedule
294 * this task 100ms from now.
295 */
296 spin_unlock_irqrestore(&card->lock, flags);
297 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
298 return;
299 }
300
301 /*
302 * We're bus manager for this generation, so next step is to
303 * make sure we have an active cycle master and do gap count
304 * optimization.
305 */
306 card->bm_generation = generation;
307
308 if (root == NULL) {
309 /*
310 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root.
312 */
313 new_root_id = card->local_node->node_id;
314 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
315 /*
316 * If we haven't probed this device yet, bail out now
317 * and let's try again once that's done.
318 */
319 spin_unlock_irqrestore(&card->lock, flags);
320 return;
321 } else if (root->config_rom[2] & BIB_CMC) {
322 /*
323 * FIXME: I suppose we should set the cmstr bit in the
324 * STATE_CLEAR register of this node, as described in
325 * 1394-1995, 8.4.2.6. Also, send out a force root
326 * packet for this node.
327 */
328 new_root_id = root_id;
329 } else {
330 /*
331 * Current root has an active link layer and we
332 * successfully read the config rom, but it's not
333 * cycle master capable.
334 */
335 new_root_id = card->local_node->node_id;
336 }
337
338 pick_me:
339 /* Now figure out what gap count to set. */
340 if (card->topology_type == FW_TOPOLOGY_A &&
341 card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
342 gap_count = gap_count_table[card->root_node->max_hops];
343 else
344 gap_count = 63;
345
346 /*
347 * Finally, figure out if we should do a reset or not. If we've
348 * done less that 5 resets with the same physical topology and we
349 * have either a new root or a new gap count setting, let's do it.
350 */
351
352 if (card->bm_retries++ < 5 &&
353 (card->gap_count != gap_count || new_root_id != root_id))
354 do_reset = 1;
355
356 spin_unlock_irqrestore(&card->lock, flags);
357
358 if (do_reset) {
359 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
360 card->index, new_root_id, gap_count);
361 fw_send_phy_config(card, new_root_id, generation, gap_count);
362 fw_core_initiate_bus_reset(card, 1);
363 }
364}
365
366static void
367flush_timer_callback(unsigned long data)
368{
369 struct fw_card *card = (struct fw_card *)data;
370
371 fw_flush_transactions(card);
372}
373
374void
375fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
376 struct device *device)
377{
378 static atomic_t index = ATOMIC_INIT(-1);
379
380 kref_init(&card->kref);
381 card->index = atomic_inc_return(&index);
382 card->driver = driver;
383 card->device = device;
384 card->current_tlabel = 0;
385 card->tlabel_mask = 0;
386 card->color = 0;
387
388 INIT_LIST_HEAD(&card->transaction_list);
389 spin_lock_init(&card->lock);
390 setup_timer(&card->flush_timer,
391 flush_timer_callback, (unsigned long)card);
392
393 card->local_node = NULL;
394
395 INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
396}
397EXPORT_SYMBOL(fw_card_initialize);
398
399int
400fw_card_add(struct fw_card *card,
401 u32 max_receive, u32 link_speed, u64 guid)
402{
403 u32 *config_rom;
404 size_t length;
405
406 card->max_receive = max_receive;
407 card->link_speed = link_speed;
408 card->guid = guid;
409
410 /* Activate link_on bit and contender bit in our self ID packets.*/
411 if (card->driver->update_phy_reg(card, 4, 0,
412 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
413 return -EIO;
414
415 /*
416 * The subsystem grabs a reference when the card is added and
417 * drops it when the driver calls fw_core_remove_card.
418 */
419 fw_card_get(card);
420
421 mutex_lock(&card_mutex);
422 config_rom = generate_config_rom(card, &length);
423 list_add_tail(&card->link, &card_list);
424 mutex_unlock(&card_mutex);
425
426 return card->driver->enable(card, config_rom, length);
427}
428EXPORT_SYMBOL(fw_card_add);
429
430
431/*
432 * The next few functions implements a dummy driver that use once a
433 * card driver shuts down an fw_card. This allows the driver to
434 * cleanly unload, as all IO to the card will be handled by the dummy
435 * driver instead of calling into the (possibly) unloaded module. The
436 * dummy driver just fails all IO.
437 */
438
439static int
440dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
441{
442 BUG();
443 return -1;
444}
445
446static int
447dummy_update_phy_reg(struct fw_card *card, int address,
448 int clear_bits, int set_bits)
449{
450 return -ENODEV;
451}
452
453static int
454dummy_set_config_rom(struct fw_card *card,
455 u32 *config_rom, size_t length)
456{
457 /*
458 * We take the card out of card_list before setting the dummy
459 * driver, so this should never get called.
460 */
461 BUG();
462 return -1;
463}
464
465static void
466dummy_send_request(struct fw_card *card, struct fw_packet *packet)
467{
468 packet->callback(packet, card, -ENODEV);
469}
470
471static void
472dummy_send_response(struct fw_card *card, struct fw_packet *packet)
473{
474 packet->callback(packet, card, -ENODEV);
475}
476
477static int
478dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
479{
480 return -ENOENT;
481}
482
483static int
484dummy_enable_phys_dma(struct fw_card *card,
485 int node_id, int generation)
486{
487 return -ENODEV;
488}
489
490static struct fw_card_driver dummy_driver = {
491 .name = "dummy",
492 .enable = dummy_enable,
493 .update_phy_reg = dummy_update_phy_reg,
494 .set_config_rom = dummy_set_config_rom,
495 .send_request = dummy_send_request,
496 .cancel_packet = dummy_cancel_packet,
497 .send_response = dummy_send_response,
498 .enable_phys_dma = dummy_enable_phys_dma,
499};
500
501void
502fw_core_remove_card(struct fw_card *card)
503{
504 card->driver->update_phy_reg(card, 4,
505 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
506 fw_core_initiate_bus_reset(card, 1);
507
508 mutex_lock(&card_mutex);
509 list_del(&card->link);
510 mutex_unlock(&card_mutex);
511
512 /* Set up the dummy driver. */
513 card->driver = &dummy_driver;
514
515 fw_flush_transactions(card);
516
517 fw_destroy_nodes(card);
518
519 fw_card_put(card);
520}
521EXPORT_SYMBOL(fw_core_remove_card);
522
523struct fw_card *
524fw_card_get(struct fw_card *card)
525{
526 kref_get(&card->kref);
527
528 return card;
529}
530EXPORT_SYMBOL(fw_card_get);
531
532static void
533release_card(struct kref *kref)
534{
535 struct fw_card *card = container_of(kref, struct fw_card, kref);
536
537 kfree(card);
538}
539
540/*
541 * An assumption for fw_card_put() is that the card driver allocates
542 * the fw_card struct with kalloc and that it has been shut down
543 * before the last ref is dropped.
544 */
545void
546fw_card_put(struct fw_card *card)
547{
548 kref_put(&card->kref, release_card);
549}
550EXPORT_SYMBOL(fw_card_put);
551
552int
553fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
554{
555 int reg = short_reset ? 5 : 1;
556 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
557
558 return card->driver->update_phy_reg(card, reg, 0, bit);
559}
560EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
new file mode 100644
index 000000000000..0fa5bd54c6a1
--- /dev/null
+++ b/drivers/firewire/fw-cdev.c
@@ -0,0 +1,961 @@
1/*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <linux/vmalloc.h>
27#include <linux/poll.h>
28#include <linux/delay.h>
29#include <linux/mm.h>
30#include <linux/idr.h>
31#include <linux/compat.h>
32#include <linux/firewire-cdev.h>
33#include <asm/uaccess.h>
34#include "fw-transaction.h"
35#include "fw-topology.h"
36#include "fw-device.h"
37
38struct client;
39struct client_resource {
40 struct list_head link;
41 void (*release)(struct client *client, struct client_resource *r);
42 u32 handle;
43};
44
45/*
46 * dequeue_event() just kfree()'s the event, so the event has to be
47 * the first field in the struct.
48 */
49
50struct event {
51 struct { void *data; size_t size; } v[2];
52 struct list_head link;
53};
54
55struct bus_reset {
56 struct event event;
57 struct fw_cdev_event_bus_reset reset;
58};
59
60struct response {
61 struct event event;
62 struct fw_transaction transaction;
63 struct client *client;
64 struct client_resource resource;
65 struct fw_cdev_event_response response;
66};
67
68struct iso_interrupt {
69 struct event event;
70 struct fw_cdev_event_iso_interrupt interrupt;
71};
72
73struct client {
74 u32 version;
75 struct fw_device *device;
76 spinlock_t lock;
77 u32 resource_handle;
78 struct list_head resource_list;
79 struct list_head event_list;
80 wait_queue_head_t wait;
81 u64 bus_reset_closure;
82
83 struct fw_iso_context *iso_context;
84 u64 iso_closure;
85 struct fw_iso_buffer buffer;
86 unsigned long vm_start;
87
88 struct list_head link;
89};
90
91static inline void __user *
92u64_to_uptr(__u64 value)
93{
94 return (void __user *)(unsigned long)value;
95}
96
97static inline __u64
98uptr_to_u64(void __user *ptr)
99{
100 return (__u64)(unsigned long)ptr;
101}
102
103static int fw_device_op_open(struct inode *inode, struct file *file)
104{
105 struct fw_device *device;
106 struct client *client;
107 unsigned long flags;
108
109 device = fw_device_from_devt(inode->i_rdev);
110 if (device == NULL)
111 return -ENODEV;
112
113 client = kzalloc(sizeof(*client), GFP_KERNEL);
114 if (client == NULL)
115 return -ENOMEM;
116
117 client->device = fw_device_get(device);
118 INIT_LIST_HEAD(&client->event_list);
119 INIT_LIST_HEAD(&client->resource_list);
120 spin_lock_init(&client->lock);
121 init_waitqueue_head(&client->wait);
122
123 file->private_data = client;
124
125 spin_lock_irqsave(&device->card->lock, flags);
126 list_add_tail(&client->link, &device->client_list);
127 spin_unlock_irqrestore(&device->card->lock, flags);
128
129 return 0;
130}
131
132static void queue_event(struct client *client, struct event *event,
133 void *data0, size_t size0, void *data1, size_t size1)
134{
135 unsigned long flags;
136
137 event->v[0].data = data0;
138 event->v[0].size = size0;
139 event->v[1].data = data1;
140 event->v[1].size = size1;
141
142 spin_lock_irqsave(&client->lock, flags);
143
144 list_add_tail(&event->link, &client->event_list);
145 wake_up_interruptible(&client->wait);
146
147 spin_unlock_irqrestore(&client->lock, flags);
148}
149
150static int
151dequeue_event(struct client *client, char __user *buffer, size_t count)
152{
153 unsigned long flags;
154 struct event *event;
155 size_t size, total;
156 int i, retval;
157
158 retval = wait_event_interruptible(client->wait,
159 !list_empty(&client->event_list) ||
160 fw_device_is_shutdown(client->device));
161 if (retval < 0)
162 return retval;
163
164 if (list_empty(&client->event_list) &&
165 fw_device_is_shutdown(client->device))
166 return -ENODEV;
167
168 spin_lock_irqsave(&client->lock, flags);
169 event = container_of(client->event_list.next, struct event, link);
170 list_del(&event->link);
171 spin_unlock_irqrestore(&client->lock, flags);
172
173 total = 0;
174 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
175 size = min(event->v[i].size, count - total);
176 if (copy_to_user(buffer + total, event->v[i].data, size)) {
177 retval = -EFAULT;
178 goto out;
179 }
180 total += size;
181 }
182 retval = total;
183
184 out:
185 kfree(event);
186
187 return retval;
188}
189
190static ssize_t
191fw_device_op_read(struct file *file,
192 char __user *buffer, size_t count, loff_t *offset)
193{
194 struct client *client = file->private_data;
195
196 return dequeue_event(client, buffer, count);
197}
198
199static void
200fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
201 struct client *client)
202{
203 struct fw_card *card = client->device->card;
204
205 event->closure = client->bus_reset_closure;
206 event->type = FW_CDEV_EVENT_BUS_RESET;
207 event->node_id = client->device->node_id;
208 event->local_node_id = card->local_node->node_id;
209 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
210 event->irm_node_id = card->irm_node->node_id;
211 event->root_node_id = card->root_node->node_id;
212 event->generation = card->generation;
213}
214
215static void
216for_each_client(struct fw_device *device,
217 void (*callback)(struct client *client))
218{
219 struct fw_card *card = device->card;
220 struct client *c;
221 unsigned long flags;
222
223 spin_lock_irqsave(&card->lock, flags);
224
225 list_for_each_entry(c, &device->client_list, link)
226 callback(c);
227
228 spin_unlock_irqrestore(&card->lock, flags);
229}
230
231static void
232queue_bus_reset_event(struct client *client)
233{
234 struct bus_reset *bus_reset;
235
236 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
237 if (bus_reset == NULL) {
238 fw_notify("Out of memory when allocating bus reset event\n");
239 return;
240 }
241
242 fill_bus_reset_event(&bus_reset->reset, client);
243
244 queue_event(client, &bus_reset->event,
245 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
246}
247
248void fw_device_cdev_update(struct fw_device *device)
249{
250 for_each_client(device, queue_bus_reset_event);
251}
252
253static void wake_up_client(struct client *client)
254{
255 wake_up_interruptible(&client->wait);
256}
257
258void fw_device_cdev_remove(struct fw_device *device)
259{
260 for_each_client(device, wake_up_client);
261}
262
263static int ioctl_get_info(struct client *client, void *buffer)
264{
265 struct fw_cdev_get_info *get_info = buffer;
266 struct fw_cdev_event_bus_reset bus_reset;
267
268 client->version = get_info->version;
269 get_info->version = FW_CDEV_VERSION;
270
271 if (get_info->rom != 0) {
272 void __user *uptr = u64_to_uptr(get_info->rom);
273 size_t want = get_info->rom_length;
274 size_t have = client->device->config_rom_length * 4;
275
276 if (copy_to_user(uptr, client->device->config_rom,
277 min(want, have)))
278 return -EFAULT;
279 }
280 get_info->rom_length = client->device->config_rom_length * 4;
281
282 client->bus_reset_closure = get_info->bus_reset_closure;
283 if (get_info->bus_reset != 0) {
284 void __user *uptr = u64_to_uptr(get_info->bus_reset);
285
286 fill_bus_reset_event(&bus_reset, client);
287 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
288 return -EFAULT;
289 }
290
291 get_info->card = client->device->card->index;
292
293 return 0;
294}
295
296static void
297add_client_resource(struct client *client, struct client_resource *resource)
298{
299 unsigned long flags;
300
301 spin_lock_irqsave(&client->lock, flags);
302 list_add_tail(&resource->link, &client->resource_list);
303 resource->handle = client->resource_handle++;
304 spin_unlock_irqrestore(&client->lock, flags);
305}
306
307static int
308release_client_resource(struct client *client, u32 handle,
309 struct client_resource **resource)
310{
311 struct client_resource *r;
312 unsigned long flags;
313
314 spin_lock_irqsave(&client->lock, flags);
315 list_for_each_entry(r, &client->resource_list, link) {
316 if (r->handle == handle) {
317 list_del(&r->link);
318 break;
319 }
320 }
321 spin_unlock_irqrestore(&client->lock, flags);
322
323 if (&r->link == &client->resource_list)
324 return -EINVAL;
325
326 if (resource)
327 *resource = r;
328 else
329 r->release(client, r);
330
331 return 0;
332}
333
334static void
335release_transaction(struct client *client, struct client_resource *resource)
336{
337 struct response *response =
338 container_of(resource, struct response, resource);
339
340 fw_cancel_transaction(client->device->card, &response->transaction);
341}
342
343static void
344complete_transaction(struct fw_card *card, int rcode,
345 void *payload, size_t length, void *data)
346{
347 struct response *response = data;
348 struct client *client = response->client;
349 unsigned long flags;
350
351 if (length < response->response.length)
352 response->response.length = length;
353 if (rcode == RCODE_COMPLETE)
354 memcpy(response->response.data, payload,
355 response->response.length);
356
357 spin_lock_irqsave(&client->lock, flags);
358 list_del(&response->resource.link);
359 spin_unlock_irqrestore(&client->lock, flags);
360
361 response->response.type = FW_CDEV_EVENT_RESPONSE;
362 response->response.rcode = rcode;
363 queue_event(client, &response->event,
364 &response->response, sizeof(response->response),
365 response->response.data, response->response.length);
366}
367
368static ssize_t ioctl_send_request(struct client *client, void *buffer)
369{
370 struct fw_device *device = client->device;
371 struct fw_cdev_send_request *request = buffer;
372 struct response *response;
373
374 /* What is the biggest size we'll accept, really? */
375 if (request->length > 4096)
376 return -EINVAL;
377
378 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
379 if (response == NULL)
380 return -ENOMEM;
381
382 response->client = client;
383 response->response.length = request->length;
384 response->response.closure = request->closure;
385
386 if (request->data &&
387 copy_from_user(response->response.data,
388 u64_to_uptr(request->data), request->length)) {
389 kfree(response);
390 return -EFAULT;
391 }
392
393 response->resource.release = release_transaction;
394 add_client_resource(client, &response->resource);
395
396 fw_send_request(device->card, &response->transaction,
397 request->tcode & 0x1f,
398 device->node->node_id,
399 request->generation,
400 device->node->max_speed,
401 request->offset,
402 response->response.data, request->length,
403 complete_transaction, response);
404
405 if (request->data)
406 return sizeof(request) + request->length;
407 else
408 return sizeof(request);
409}
410
411struct address_handler {
412 struct fw_address_handler handler;
413 __u64 closure;
414 struct client *client;
415 struct client_resource resource;
416};
417
418struct request {
419 struct fw_request *request;
420 void *data;
421 size_t length;
422 struct client_resource resource;
423};
424
425struct request_event {
426 struct event event;
427 struct fw_cdev_event_request request;
428};
429
430static void
431release_request(struct client *client, struct client_resource *resource)
432{
433 struct request *request =
434 container_of(resource, struct request, resource);
435
436 fw_send_response(client->device->card, request->request,
437 RCODE_CONFLICT_ERROR);
438 kfree(request);
439}
440
441static void
442handle_request(struct fw_card *card, struct fw_request *r,
443 int tcode, int destination, int source,
444 int generation, int speed,
445 unsigned long long offset,
446 void *payload, size_t length, void *callback_data)
447{
448 struct address_handler *handler = callback_data;
449 struct request *request;
450 struct request_event *e;
451 struct client *client = handler->client;
452
453 request = kmalloc(sizeof(*request), GFP_ATOMIC);
454 e = kmalloc(sizeof(*e), GFP_ATOMIC);
455 if (request == NULL || e == NULL) {
456 kfree(request);
457 kfree(e);
458 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
459 return;
460 }
461
462 request->request = r;
463 request->data = payload;
464 request->length = length;
465
466 request->resource.release = release_request;
467 add_client_resource(client, &request->resource);
468
469 e->request.type = FW_CDEV_EVENT_REQUEST;
470 e->request.tcode = tcode;
471 e->request.offset = offset;
472 e->request.length = length;
473 e->request.handle = request->resource.handle;
474 e->request.closure = handler->closure;
475
476 queue_event(client, &e->event,
477 &e->request, sizeof(e->request), payload, length);
478}
479
480static void
481release_address_handler(struct client *client,
482 struct client_resource *resource)
483{
484 struct address_handler *handler =
485 container_of(resource, struct address_handler, resource);
486
487 fw_core_remove_address_handler(&handler->handler);
488 kfree(handler);
489}
490
491static int ioctl_allocate(struct client *client, void *buffer)
492{
493 struct fw_cdev_allocate *request = buffer;
494 struct address_handler *handler;
495 struct fw_address_region region;
496
497 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
498 if (handler == NULL)
499 return -ENOMEM;
500
501 region.start = request->offset;
502 region.end = request->offset + request->length;
503 handler->handler.length = request->length;
504 handler->handler.address_callback = handle_request;
505 handler->handler.callback_data = handler;
506 handler->closure = request->closure;
507 handler->client = client;
508
509 if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
510 kfree(handler);
511 return -EBUSY;
512 }
513
514 handler->resource.release = release_address_handler;
515 add_client_resource(client, &handler->resource);
516 request->handle = handler->resource.handle;
517
518 return 0;
519}
520
521static int ioctl_deallocate(struct client *client, void *buffer)
522{
523 struct fw_cdev_deallocate *request = buffer;
524
525 return release_client_resource(client, request->handle, NULL);
526}
527
528static int ioctl_send_response(struct client *client, void *buffer)
529{
530 struct fw_cdev_send_response *request = buffer;
531 struct client_resource *resource;
532 struct request *r;
533
534 if (release_client_resource(client, request->handle, &resource) < 0)
535 return -EINVAL;
536 r = container_of(resource, struct request, resource);
537 if (request->length < r->length)
538 r->length = request->length;
539 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
540 return -EFAULT;
541
542 fw_send_response(client->device->card, r->request, request->rcode);
543 kfree(r);
544
545 return 0;
546}
547
548static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
549{
550 struct fw_cdev_initiate_bus_reset *request = buffer;
551 int short_reset;
552
553 short_reset = (request->type == FW_CDEV_SHORT_RESET);
554
555 return fw_core_initiate_bus_reset(client->device->card, short_reset);
556}
557
558struct descriptor {
559 struct fw_descriptor d;
560 struct client_resource resource;
561 u32 data[0];
562};
563
564static void release_descriptor(struct client *client,
565 struct client_resource *resource)
566{
567 struct descriptor *descriptor =
568 container_of(resource, struct descriptor, resource);
569
570 fw_core_remove_descriptor(&descriptor->d);
571 kfree(descriptor);
572}
573
574static int ioctl_add_descriptor(struct client *client, void *buffer)
575{
576 struct fw_cdev_add_descriptor *request = buffer;
577 struct descriptor *descriptor;
578 int retval;
579
580 if (request->length > 256)
581 return -EINVAL;
582
583 descriptor =
584 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
585 if (descriptor == NULL)
586 return -ENOMEM;
587
588 if (copy_from_user(descriptor->data,
589 u64_to_uptr(request->data), request->length * 4)) {
590 kfree(descriptor);
591 return -EFAULT;
592 }
593
594 descriptor->d.length = request->length;
595 descriptor->d.immediate = request->immediate;
596 descriptor->d.key = request->key;
597 descriptor->d.data = descriptor->data;
598
599 retval = fw_core_add_descriptor(&descriptor->d);
600 if (retval < 0) {
601 kfree(descriptor);
602 return retval;
603 }
604
605 descriptor->resource.release = release_descriptor;
606 add_client_resource(client, &descriptor->resource);
607 request->handle = descriptor->resource.handle;
608
609 return 0;
610}
611
612static int ioctl_remove_descriptor(struct client *client, void *buffer)
613{
614 struct fw_cdev_remove_descriptor *request = buffer;
615
616 return release_client_resource(client, request->handle, NULL);
617}
618
619static void
620iso_callback(struct fw_iso_context *context, u32 cycle,
621 size_t header_length, void *header, void *data)
622{
623 struct client *client = data;
624 struct iso_interrupt *interrupt;
625
626 interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC);
627 if (interrupt == NULL)
628 return;
629
630 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
631 interrupt->interrupt.closure = client->iso_closure;
632 interrupt->interrupt.cycle = cycle;
633 interrupt->interrupt.header_length = header_length;
634 memcpy(interrupt->interrupt.header, header, header_length);
635 queue_event(client, &interrupt->event,
636 &interrupt->interrupt,
637 sizeof(interrupt->interrupt) + header_length, NULL, 0);
638}
639
640static int ioctl_create_iso_context(struct client *client, void *buffer)
641{
642 struct fw_cdev_create_iso_context *request = buffer;
643
644 if (request->channel > 63)
645 return -EINVAL;
646
647 switch (request->type) {
648 case FW_ISO_CONTEXT_RECEIVE:
649 if (request->header_size < 4 || (request->header_size & 3))
650 return -EINVAL;
651
652 break;
653
654 case FW_ISO_CONTEXT_TRANSMIT:
655 if (request->speed > SCODE_3200)
656 return -EINVAL;
657
658 break;
659
660 default:
661 return -EINVAL;
662 }
663
664 client->iso_closure = request->closure;
665 client->iso_context = fw_iso_context_create(client->device->card,
666 request->type,
667 request->channel,
668 request->speed,
669 request->header_size,
670 iso_callback, client);
671 if (IS_ERR(client->iso_context))
672 return PTR_ERR(client->iso_context);
673
674 /* We only support one context at this time. */
675 request->handle = 0;
676
677 return 0;
678}
679
680static int ioctl_queue_iso(struct client *client, void *buffer)
681{
682 struct fw_cdev_queue_iso *request = buffer;
683 struct fw_cdev_iso_packet __user *p, *end, *next;
684 struct fw_iso_context *ctx = client->iso_context;
685 unsigned long payload, buffer_end, header_length;
686 int count;
687 struct {
688 struct fw_iso_packet packet;
689 u8 header[256];
690 } u;
691
692 if (ctx == NULL || request->handle != 0)
693 return -EINVAL;
694
695 /*
696 * If the user passes a non-NULL data pointer, has mmap()'ed
697 * the iso buffer, and the pointer points inside the buffer,
698 * we setup the payload pointers accordingly. Otherwise we
699 * set them both to 0, which will still let packets with
700 * payload_length == 0 through. In other words, if no packets
701 * use the indirect payload, the iso buffer need not be mapped
702 * and the request->data pointer is ignored.
703 */
704
705 payload = (unsigned long)request->data - client->vm_start;
706 buffer_end = client->buffer.page_count << PAGE_SHIFT;
707 if (request->data == 0 || client->buffer.pages == NULL ||
708 payload >= buffer_end) {
709 payload = 0;
710 buffer_end = 0;
711 }
712
713 if (!access_ok(VERIFY_READ, request->packets, request->size))
714 return -EFAULT;
715
716 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
717 end = (void __user *)p + request->size;
718 count = 0;
719 while (p < end) {
720 if (__copy_from_user(&u.packet, p, sizeof(*p)))
721 return -EFAULT;
722
723 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
724 header_length = u.packet.header_length;
725 } else {
726 /*
727 * We require that header_length is a multiple of
728 * the fixed header size, ctx->header_size.
729 */
730 if (ctx->header_size == 0) {
731 if (u.packet.header_length > 0)
732 return -EINVAL;
733 } else if (u.packet.header_length % ctx->header_size != 0) {
734 return -EINVAL;
735 }
736 header_length = 0;
737 }
738
739 next = (struct fw_cdev_iso_packet __user *)
740 &p->header[header_length / 4];
741 if (next > end)
742 return -EINVAL;
743 if (__copy_from_user
744 (u.packet.header, p->header, header_length))
745 return -EFAULT;
746 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
747 u.packet.header_length + u.packet.payload_length > 0)
748 return -EINVAL;
749 if (payload + u.packet.payload_length > buffer_end)
750 return -EINVAL;
751
752 if (fw_iso_context_queue(ctx, &u.packet,
753 &client->buffer, payload))
754 break;
755
756 p = next;
757 payload += u.packet.payload_length;
758 count++;
759 }
760
761 request->size -= uptr_to_u64(p) - request->packets;
762 request->packets = uptr_to_u64(p);
763 request->data = client->vm_start + payload;
764
765 return count;
766}
767
768static int ioctl_start_iso(struct client *client, void *buffer)
769{
770 struct fw_cdev_start_iso *request = buffer;
771
772 if (request->handle != 0)
773 return -EINVAL;
774 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
775 if (request->tags == 0 || request->tags > 15)
776 return -EINVAL;
777
778 if (request->sync > 15)
779 return -EINVAL;
780 }
781
782 return fw_iso_context_start(client->iso_context, request->cycle,
783 request->sync, request->tags);
784}
785
786static int ioctl_stop_iso(struct client *client, void *buffer)
787{
788 struct fw_cdev_stop_iso *request = buffer;
789
790 if (request->handle != 0)
791 return -EINVAL;
792
793 return fw_iso_context_stop(client->iso_context);
794}
795
796static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
797 ioctl_get_info,
798 ioctl_send_request,
799 ioctl_allocate,
800 ioctl_deallocate,
801 ioctl_send_response,
802 ioctl_initiate_bus_reset,
803 ioctl_add_descriptor,
804 ioctl_remove_descriptor,
805 ioctl_create_iso_context,
806 ioctl_queue_iso,
807 ioctl_start_iso,
808 ioctl_stop_iso,
809};
810
811static int
812dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
813{
814 char buffer[256];
815 int retval;
816
817 if (_IOC_TYPE(cmd) != '#' ||
818 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
819 return -EINVAL;
820
821 if (_IOC_DIR(cmd) & _IOC_WRITE) {
822 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
823 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
824 return -EFAULT;
825 }
826
827 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
828 if (retval < 0)
829 return retval;
830
831 if (_IOC_DIR(cmd) & _IOC_READ) {
832 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
833 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
834 return -EFAULT;
835 }
836
837 return 0;
838}
839
840static long
841fw_device_op_ioctl(struct file *file,
842 unsigned int cmd, unsigned long arg)
843{
844 struct client *client = file->private_data;
845
846 return dispatch_ioctl(client, cmd, (void __user *) arg);
847}
848
849#ifdef CONFIG_COMPAT
850static long
851fw_device_op_compat_ioctl(struct file *file,
852 unsigned int cmd, unsigned long arg)
853{
854 struct client *client = file->private_data;
855
856 return dispatch_ioctl(client, cmd, compat_ptr(arg));
857}
858#endif
859
860static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
861{
862 struct client *client = file->private_data;
863 enum dma_data_direction direction;
864 unsigned long size;
865 int page_count, retval;
866
867 /* FIXME: We could support multiple buffers, but we don't. */
868 if (client->buffer.pages != NULL)
869 return -EBUSY;
870
871 if (!(vma->vm_flags & VM_SHARED))
872 return -EINVAL;
873
874 if (vma->vm_start & ~PAGE_MASK)
875 return -EINVAL;
876
877 client->vm_start = vma->vm_start;
878 size = vma->vm_end - vma->vm_start;
879 page_count = size >> PAGE_SHIFT;
880 if (size & ~PAGE_MASK)
881 return -EINVAL;
882
883 if (vma->vm_flags & VM_WRITE)
884 direction = DMA_TO_DEVICE;
885 else
886 direction = DMA_FROM_DEVICE;
887
888 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
889 page_count, direction);
890 if (retval < 0)
891 return retval;
892
893 retval = fw_iso_buffer_map(&client->buffer, vma);
894 if (retval < 0)
895 fw_iso_buffer_destroy(&client->buffer, client->device->card);
896
897 return retval;
898}
899
900static int fw_device_op_release(struct inode *inode, struct file *file)
901{
902 struct client *client = file->private_data;
903 struct event *e, *next_e;
904 struct client_resource *r, *next_r;
905 unsigned long flags;
906
907 if (client->buffer.pages)
908 fw_iso_buffer_destroy(&client->buffer, client->device->card);
909
910 if (client->iso_context)
911 fw_iso_context_destroy(client->iso_context);
912
913 list_for_each_entry_safe(r, next_r, &client->resource_list, link)
914 r->release(client, r);
915
916 /*
917 * FIXME: We should wait for the async tasklets to stop
918 * running before freeing the memory.
919 */
920
921 list_for_each_entry_safe(e, next_e, &client->event_list, link)
922 kfree(e);
923
924 spin_lock_irqsave(&client->device->card->lock, flags);
925 list_del(&client->link);
926 spin_unlock_irqrestore(&client->device->card->lock, flags);
927
928 fw_device_put(client->device);
929 kfree(client);
930
931 return 0;
932}
933
934static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
935{
936 struct client *client = file->private_data;
937 unsigned int mask = 0;
938
939 poll_wait(file, &client->wait, pt);
940
941 if (fw_device_is_shutdown(client->device))
942 mask |= POLLHUP | POLLERR;
943 if (!list_empty(&client->event_list))
944 mask |= POLLIN | POLLRDNORM;
945
946 return mask;
947}
948
949const struct file_operations fw_device_ops = {
950 .owner = THIS_MODULE,
951 .open = fw_device_op_open,
952 .read = fw_device_op_read,
953 .unlocked_ioctl = fw_device_op_ioctl,
954 .poll = fw_device_op_poll,
955 .release = fw_device_op_release,
956 .mmap = fw_device_op_mmap,
957
958#ifdef CONFIG_COMPAT
959 .compat_ioctl = fw_device_op_compat_ioctl,
960#endif
961};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
new file mode 100644
index 000000000000..c1ce465d9710
--- /dev/null
+++ b/drivers/firewire/fw-device.c
@@ -0,0 +1,813 @@
1/*
2 * Device probing and sysfs code.
3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include <linux/kthread.h>
25#include <linux/device.h>
26#include <linux/delay.h>
27#include <linux/idr.h>
28#include <linux/rwsem.h>
29#include <asm/semaphore.h>
30#include <linux/ctype.h>
31#include "fw-transaction.h"
32#include "fw-topology.h"
33#include "fw-device.h"
34
35void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
36{
37 ci->p = p + 1;
38 ci->end = ci->p + (p[0] >> 16);
39}
40EXPORT_SYMBOL(fw_csr_iterator_init);
41
42int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
43{
44 *key = *ci->p >> 24;
45 *value = *ci->p & 0xffffff;
46
47 return ci->p++ < ci->end;
48}
49EXPORT_SYMBOL(fw_csr_iterator_next);
50
51static int is_fw_unit(struct device *dev);
52
53static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
54{
55 struct fw_csr_iterator ci;
56 int key, value, match;
57
58 match = 0;
59 fw_csr_iterator_init(&ci, directory);
60 while (fw_csr_iterator_next(&ci, &key, &value)) {
61 if (key == CSR_VENDOR && value == id->vendor)
62 match |= FW_MATCH_VENDOR;
63 if (key == CSR_MODEL && value == id->model)
64 match |= FW_MATCH_MODEL;
65 if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
66 match |= FW_MATCH_SPECIFIER_ID;
67 if (key == CSR_VERSION && value == id->version)
68 match |= FW_MATCH_VERSION;
69 }
70
71 return (match & id->match_flags) == id->match_flags;
72}
73
74static int fw_unit_match(struct device *dev, struct device_driver *drv)
75{
76 struct fw_unit *unit = fw_unit(dev);
77 struct fw_driver *driver = fw_driver(drv);
78 int i;
79
80 /* We only allow binding to fw_units. */
81 if (!is_fw_unit(dev))
82 return 0;
83
84 for (i = 0; driver->id_table[i].match_flags != 0; i++) {
85 if (match_unit_directory(unit->directory, &driver->id_table[i]))
86 return 1;
87 }
88
89 return 0;
90}
91
92static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
93{
94 struct fw_device *device = fw_device(unit->device.parent);
95 struct fw_csr_iterator ci;
96
97 int key, value;
98 int vendor = 0;
99 int model = 0;
100 int specifier_id = 0;
101 int version = 0;
102
103 fw_csr_iterator_init(&ci, &device->config_rom[5]);
104 while (fw_csr_iterator_next(&ci, &key, &value)) {
105 switch (key) {
106 case CSR_VENDOR:
107 vendor = value;
108 break;
109 case CSR_MODEL:
110 model = value;
111 break;
112 }
113 }
114
115 fw_csr_iterator_init(&ci, unit->directory);
116 while (fw_csr_iterator_next(&ci, &key, &value)) {
117 switch (key) {
118 case CSR_SPECIFIER_ID:
119 specifier_id = value;
120 break;
121 case CSR_VERSION:
122 version = value;
123 break;
124 }
125 }
126
127 return snprintf(buffer, buffer_size,
128 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
129 vendor, model, specifier_id, version);
130}
131
132static int
133fw_unit_uevent(struct device *dev, char **envp, int num_envp,
134 char *buffer, int buffer_size)
135{
136 struct fw_unit *unit = fw_unit(dev);
137 char modalias[64];
138 int length = 0;
139 int i = 0;
140
141 get_modalias(unit, modalias, sizeof(modalias));
142
143 if (add_uevent_var(envp, num_envp, &i,
144 buffer, buffer_size, &length,
145 "MODALIAS=%s", modalias))
146 return -ENOMEM;
147
148 envp[i] = NULL;
149
150 return 0;
151}
152
153struct bus_type fw_bus_type = {
154 .name = "firewire",
155 .match = fw_unit_match,
156};
157EXPORT_SYMBOL(fw_bus_type);
158
159struct fw_device *fw_device_get(struct fw_device *device)
160{
161 get_device(&device->device);
162
163 return device;
164}
165
166void fw_device_put(struct fw_device *device)
167{
168 put_device(&device->device);
169}
170
171static void fw_device_release(struct device *dev)
172{
173 struct fw_device *device = fw_device(dev);
174 unsigned long flags;
175
176 /*
177 * Take the card lock so we don't set this to NULL while a
178 * FW_NODE_UPDATED callback is being handled.
179 */
180 spin_lock_irqsave(&device->card->lock, flags);
181 device->node->data = NULL;
182 spin_unlock_irqrestore(&device->card->lock, flags);
183
184 fw_node_put(device->node);
185 fw_card_put(device->card);
186 kfree(device->config_rom);
187 kfree(device);
188}
189
190int fw_device_enable_phys_dma(struct fw_device *device)
191{
192 return device->card->driver->enable_phys_dma(device->card,
193 device->node_id,
194 device->generation);
195}
196EXPORT_SYMBOL(fw_device_enable_phys_dma);
197
198struct config_rom_attribute {
199 struct device_attribute attr;
200 u32 key;
201};
202
203static ssize_t
204show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
205{
206 struct config_rom_attribute *attr =
207 container_of(dattr, struct config_rom_attribute, attr);
208 struct fw_csr_iterator ci;
209 u32 *dir;
210 int key, value;
211
212 if (is_fw_unit(dev))
213 dir = fw_unit(dev)->directory;
214 else
215 dir = fw_device(dev)->config_rom + 5;
216
217 fw_csr_iterator_init(&ci, dir);
218 while (fw_csr_iterator_next(&ci, &key, &value))
219 if (attr->key == key)
220 return snprintf(buf, buf ? PAGE_SIZE : 0,
221 "0x%06x\n", value);
222
223 return -ENOENT;
224}
225
226#define IMMEDIATE_ATTR(name, key) \
227 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
228
229static ssize_t
230show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
231{
232 struct config_rom_attribute *attr =
233 container_of(dattr, struct config_rom_attribute, attr);
234 struct fw_csr_iterator ci;
235 u32 *dir, *block = NULL, *p, *end;
236 int length, key, value, last_key = 0;
237 char *b;
238
239 if (is_fw_unit(dev))
240 dir = fw_unit(dev)->directory;
241 else
242 dir = fw_device(dev)->config_rom + 5;
243
244 fw_csr_iterator_init(&ci, dir);
245 while (fw_csr_iterator_next(&ci, &key, &value)) {
246 if (attr->key == last_key &&
247 key == (CSR_DESCRIPTOR | CSR_LEAF))
248 block = ci.p - 1 + value;
249 last_key = key;
250 }
251
252 if (block == NULL)
253 return -ENOENT;
254
255 length = min(block[0] >> 16, 256U);
256 if (length < 3)
257 return -ENOENT;
258
259 if (block[1] != 0 || block[2] != 0)
260 /* Unknown encoding. */
261 return -ENOENT;
262
263 if (buf == NULL)
264 return length * 4;
265
266 b = buf;
267 end = &block[length + 1];
268 for (p = &block[3]; p < end; p++, b += 4)
269 * (u32 *) b = (__force u32) __cpu_to_be32(*p);
270
271 /* Strip trailing whitespace and add newline. */
272 while (b--, (isspace(*b) || *b == '\0') && b > buf);
273 strcpy(b + 1, "\n");
274
275 return b + 2 - buf;
276}
277
278#define TEXT_LEAF_ATTR(name, key) \
279 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
280
281static struct config_rom_attribute config_rom_attributes[] = {
282 IMMEDIATE_ATTR(vendor, CSR_VENDOR),
283 IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
284 IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
285 IMMEDIATE_ATTR(version, CSR_VERSION),
286 IMMEDIATE_ATTR(model, CSR_MODEL),
287 TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
288 TEXT_LEAF_ATTR(model_name, CSR_MODEL),
289 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
290};
291
292static void
293init_fw_attribute_group(struct device *dev,
294 struct device_attribute *attrs,
295 struct fw_attribute_group *group)
296{
297 struct device_attribute *attr;
298 int i, j;
299
300 for (j = 0; attrs[j].attr.name != NULL; j++)
301 group->attrs[j] = &attrs[j].attr;
302
303 for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
304 attr = &config_rom_attributes[i].attr;
305 if (attr->show(dev, attr, NULL) < 0)
306 continue;
307 group->attrs[j++] = &attr->attr;
308 }
309
310 BUG_ON(j >= ARRAY_SIZE(group->attrs));
311 group->attrs[j++] = NULL;
312 group->groups[0] = &group->group;
313 group->groups[1] = NULL;
314 group->group.attrs = group->attrs;
315 dev->groups = group->groups;
316}
317
318static ssize_t
319modalias_show(struct device *dev,
320 struct device_attribute *attr, char *buf)
321{
322 struct fw_unit *unit = fw_unit(dev);
323 int length;
324
325 length = get_modalias(unit, buf, PAGE_SIZE);
326 strcpy(buf + length, "\n");
327
328 return length + 1;
329}
330
331static ssize_t
332rom_index_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 struct fw_device *device = fw_device(dev->parent);
336 struct fw_unit *unit = fw_unit(dev);
337
338 return snprintf(buf, PAGE_SIZE, "%d\n",
339 (int)(unit->directory - device->config_rom));
340}
341
342static struct device_attribute fw_unit_attributes[] = {
343 __ATTR_RO(modalias),
344 __ATTR_RO(rom_index),
345 __ATTR_NULL,
346};
347
348static ssize_t
349config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
350{
351 struct fw_device *device = fw_device(dev);
352
353 memcpy(buf, device->config_rom, device->config_rom_length * 4);
354
355 return device->config_rom_length * 4;
356}
357
358static ssize_t
359guid_show(struct device *dev, struct device_attribute *attr, char *buf)
360{
361 struct fw_device *device = fw_device(dev);
362 u64 guid;
363
364 guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
365
366 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
367 (unsigned long long)guid);
368}
369
370static struct device_attribute fw_device_attributes[] = {
371 __ATTR_RO(config_rom),
372 __ATTR_RO(guid),
373 __ATTR_NULL,
374};
375
376struct read_quadlet_callback_data {
377 struct completion done;
378 int rcode;
379 u32 data;
380};
381
382static void
383complete_transaction(struct fw_card *card, int rcode,
384 void *payload, size_t length, void *data)
385{
386 struct read_quadlet_callback_data *callback_data = data;
387
388 if (rcode == RCODE_COMPLETE)
389 callback_data->data = be32_to_cpu(*(__be32 *)payload);
390 callback_data->rcode = rcode;
391 complete(&callback_data->done);
392}
393
394static int read_rom(struct fw_device *device, int index, u32 * data)
395{
396 struct read_quadlet_callback_data callback_data;
397 struct fw_transaction t;
398 u64 offset;
399
400 init_completion(&callback_data.done);
401
402 offset = 0xfffff0000400ULL + index * 4;
403 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
404 device->node_id,
405 device->generation, SCODE_100,
406 offset, NULL, 4, complete_transaction, &callback_data);
407
408 wait_for_completion(&callback_data.done);
409
410 *data = callback_data.data;
411
412 return callback_data.rcode;
413}
414
415static int read_bus_info_block(struct fw_device *device)
416{
417 static u32 rom[256];
418 u32 stack[16], sp, key;
419 int i, end, length;
420
421 /* First read the bus info block. */
422 for (i = 0; i < 5; i++) {
423 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
424 return -1;
425 /*
426 * As per IEEE1212 7.2, during power-up, devices can
427 * reply with a 0 for the first quadlet of the config
428 * rom to indicate that they are booting (for example,
429 * if the firmware is on the disk of a external
430 * harddisk). In that case we just fail, and the
431 * retry mechanism will try again later.
432 */
433 if (i == 0 && rom[i] == 0)
434 return -1;
435 }
436
437 /*
438 * Now parse the config rom. The config rom is a recursive
439 * directory structure so we parse it using a stack of
440 * references to the blocks that make up the structure. We
441 * push a reference to the root directory on the stack to
442 * start things off.
443 */
444 length = i;
445 sp = 0;
446 stack[sp++] = 0xc0000005;
447 while (sp > 0) {
448 /*
449 * Pop the next block reference of the stack. The
450 * lower 24 bits is the offset into the config rom,
451 * the upper 8 bits are the type of the reference the
452 * block.
453 */
454 key = stack[--sp];
455 i = key & 0xffffff;
456 if (i >= ARRAY_SIZE(rom))
457 /*
458 * The reference points outside the standard
459 * config rom area, something's fishy.
460 */
461 return -1;
462
463 /* Read header quadlet for the block to get the length. */
464 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
465 return -1;
466 end = i + (rom[i] >> 16) + 1;
467 i++;
468 if (end > ARRAY_SIZE(rom))
469 /*
470 * This block extends outside standard config
471 * area (and the array we're reading it
472 * into). That's broken, so ignore this
473 * device.
474 */
475 return -1;
476
477 /*
478 * Now read in the block. If this is a directory
479 * block, check the entries as we read them to see if
480 * it references another block, and push it in that case.
481 */
482 while (i < end) {
483 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
484 return -1;
485 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
486 sp < ARRAY_SIZE(stack))
487 stack[sp++] = i + rom[i];
488 i++;
489 }
490 if (length < i)
491 length = i;
492 }
493
494 device->config_rom = kmalloc(length * 4, GFP_KERNEL);
495 if (device->config_rom == NULL)
496 return -1;
497 memcpy(device->config_rom, rom, length * 4);
498 device->config_rom_length = length;
499
500 return 0;
501}
502
503static void fw_unit_release(struct device *dev)
504{
505 struct fw_unit *unit = fw_unit(dev);
506
507 kfree(unit);
508}
509
510static struct device_type fw_unit_type = {
511 .uevent = fw_unit_uevent,
512 .release = fw_unit_release,
513};
514
515static int is_fw_unit(struct device *dev)
516{
517 return dev->type == &fw_unit_type;
518}
519
520static void create_units(struct fw_device *device)
521{
522 struct fw_csr_iterator ci;
523 struct fw_unit *unit;
524 int key, value, i;
525
526 i = 0;
527 fw_csr_iterator_init(&ci, &device->config_rom[5]);
528 while (fw_csr_iterator_next(&ci, &key, &value)) {
529 if (key != (CSR_UNIT | CSR_DIRECTORY))
530 continue;
531
532 /*
533 * Get the address of the unit directory and try to
534 * match the drivers id_tables against it.
535 */
536 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
537 if (unit == NULL) {
538 fw_error("failed to allocate memory for unit\n");
539 continue;
540 }
541
542 unit->directory = ci.p + value - 1;
543 unit->device.bus = &fw_bus_type;
544 unit->device.type = &fw_unit_type;
545 unit->device.parent = &device->device;
546 snprintf(unit->device.bus_id, sizeof(unit->device.bus_id),
547 "%s.%d", device->device.bus_id, i++);
548
549 init_fw_attribute_group(&unit->device,
550 fw_unit_attributes,
551 &unit->attribute_group);
552 if (device_register(&unit->device) < 0)
553 goto skip_unit;
554
555 continue;
556
557 skip_unit:
558 kfree(unit);
559 }
560}
561
562static int shutdown_unit(struct device *device, void *data)
563{
564 device_unregister(device);
565
566 return 0;
567}
568
569static DECLARE_RWSEM(idr_rwsem);
570static DEFINE_IDR(fw_device_idr);
571int fw_cdev_major;
572
573struct fw_device *fw_device_from_devt(dev_t devt)
574{
575 struct fw_device *device;
576
577 down_read(&idr_rwsem);
578 device = idr_find(&fw_device_idr, MINOR(devt));
579 up_read(&idr_rwsem);
580
581 return device;
582}
583
584static void fw_device_shutdown(struct work_struct *work)
585{
586 struct fw_device *device =
587 container_of(work, struct fw_device, work.work);
588 int minor = MINOR(device->device.devt);
589
590 down_write(&idr_rwsem);
591 idr_remove(&fw_device_idr, minor);
592 up_write(&idr_rwsem);
593
594 fw_device_cdev_remove(device);
595 device_for_each_child(&device->device, NULL, shutdown_unit);
596 device_unregister(&device->device);
597}
598
599static struct device_type fw_device_type = {
600 .release = fw_device_release,
601};
602
603/*
604 * These defines control the retry behavior for reading the config
605 * rom. It shouldn't be necessary to tweak these; if the device
606 * doesn't respond to a config rom read within 10 seconds, it's not
607 * going to respond at all. As for the initial delay, a lot of
608 * devices will be able to respond within half a second after bus
609 * reset. On the other hand, it's not really worth being more
610 * aggressive than that, since it scales pretty well; if 10 devices
611 * are plugged in, they're all getting read within one second.
612 */
613
614#define MAX_RETRIES 10
615#define RETRY_DELAY (3 * HZ)
616#define INITIAL_DELAY (HZ / 2)
617
618static void fw_device_init(struct work_struct *work)
619{
620 struct fw_device *device =
621 container_of(work, struct fw_device, work.work);
622 int minor, err;
623
624 /*
625 * All failure paths here set node->data to NULL, so that we
626 * don't try to do device_for_each_child() on a kfree()'d
627 * device.
628 */
629
630 if (read_bus_info_block(device) < 0) {
631 if (device->config_rom_retries < MAX_RETRIES) {
632 device->config_rom_retries++;
633 schedule_delayed_work(&device->work, RETRY_DELAY);
634 } else {
635 fw_notify("giving up on config rom for node id %x\n",
636 device->node_id);
637 if (device->node == device->card->root_node)
638 schedule_delayed_work(&device->card->work, 0);
639 fw_device_release(&device->device);
640 }
641 return;
642 }
643
644 err = -ENOMEM;
645 down_write(&idr_rwsem);
646 if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
647 err = idr_get_new(&fw_device_idr, device, &minor);
648 up_write(&idr_rwsem);
649 if (err < 0)
650 goto error;
651
652 device->device.bus = &fw_bus_type;
653 device->device.type = &fw_device_type;
654 device->device.parent = device->card->device;
655 device->device.devt = MKDEV(fw_cdev_major, minor);
656 snprintf(device->device.bus_id, sizeof(device->device.bus_id),
657 "fw%d", minor);
658
659 init_fw_attribute_group(&device->device,
660 fw_device_attributes,
661 &device->attribute_group);
662 if (device_add(&device->device)) {
663 fw_error("Failed to add device.\n");
664 goto error_with_cdev;
665 }
666
667 create_units(device);
668
669 /*
670 * Transition the device to running state. If it got pulled
671 * out from under us while we did the intialization work, we
672 * have to shut down the device again here. Normally, though,
673 * fw_node_event will be responsible for shutting it down when
674 * necessary. We have to use the atomic cmpxchg here to avoid
675 * racing with the FW_NODE_DESTROYED case in
676 * fw_node_event().
677 */
678 if (atomic_cmpxchg(&device->state,
679 FW_DEVICE_INITIALIZING,
680 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
681 fw_device_shutdown(&device->work.work);
682 else
683 fw_notify("created new fw device %s (%d config rom retries)\n",
684 device->device.bus_id, device->config_rom_retries);
685
686 /*
687 * Reschedule the IRM work if we just finished reading the
688 * root node config rom. If this races with a bus reset we
689 * just end up running the IRM work a couple of extra times -
690 * pretty harmless.
691 */
692 if (device->node == device->card->root_node)
693 schedule_delayed_work(&device->card->work, 0);
694
695 return;
696
697 error_with_cdev:
698 down_write(&idr_rwsem);
699 idr_remove(&fw_device_idr, minor);
700 up_write(&idr_rwsem);
701 error:
702 put_device(&device->device);
703}
704
705static int update_unit(struct device *dev, void *data)
706{
707 struct fw_unit *unit = fw_unit(dev);
708 struct fw_driver *driver = (struct fw_driver *)dev->driver;
709
710 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
711 down(&dev->sem);
712 driver->update(unit);
713 up(&dev->sem);
714 }
715
716 return 0;
717}
718
719static void fw_device_update(struct work_struct *work)
720{
721 struct fw_device *device =
722 container_of(work, struct fw_device, work.work);
723
724 fw_device_cdev_update(device);
725 device_for_each_child(&device->device, NULL, update_unit);
726}
727
728void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
729{
730 struct fw_device *device;
731
732 switch (event) {
733 case FW_NODE_CREATED:
734 case FW_NODE_LINK_ON:
735 if (!node->link_on)
736 break;
737
738 device = kzalloc(sizeof(*device), GFP_ATOMIC);
739 if (device == NULL)
740 break;
741
742 /*
743 * Do minimal intialization of the device here, the
744 * rest will happen in fw_device_init(). We need the
745 * card and node so we can read the config rom and we
746 * need to do device_initialize() now so
747 * device_for_each_child() in FW_NODE_UPDATED is
748 * doesn't freak out.
749 */
750 device_initialize(&device->device);
751 atomic_set(&device->state, FW_DEVICE_INITIALIZING);
752 device->card = fw_card_get(card);
753 device->node = fw_node_get(node);
754 device->node_id = node->node_id;
755 device->generation = card->generation;
756 INIT_LIST_HEAD(&device->client_list);
757
758 /*
759 * Set the node data to point back to this device so
760 * FW_NODE_UPDATED callbacks can update the node_id
761 * and generation for the device.
762 */
763 node->data = device;
764
765 /*
766 * Many devices are slow to respond after bus resets,
767 * especially if they are bus powered and go through
768 * power-up after getting plugged in. We schedule the
769 * first config rom scan half a second after bus reset.
770 */
771 INIT_DELAYED_WORK(&device->work, fw_device_init);
772 schedule_delayed_work(&device->work, INITIAL_DELAY);
773 break;
774
775 case FW_NODE_UPDATED:
776 if (!node->link_on || node->data == NULL)
777 break;
778
779 device = node->data;
780 device->node_id = node->node_id;
781 device->generation = card->generation;
782 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
783 PREPARE_DELAYED_WORK(&device->work, fw_device_update);
784 schedule_delayed_work(&device->work, 0);
785 }
786 break;
787
788 case FW_NODE_DESTROYED:
789 case FW_NODE_LINK_OFF:
790 if (!node->data)
791 break;
792
793 /*
794 * Destroy the device associated with the node. There
795 * are two cases here: either the device is fully
796 * initialized (FW_DEVICE_RUNNING) or we're in the
797 * process of reading its config rom
798 * (FW_DEVICE_INITIALIZING). If it is fully
799 * initialized we can reuse device->work to schedule a
800 * full fw_device_shutdown(). If not, there's work
801 * scheduled to read it's config rom, and we just put
802 * the device in shutdown state to have that code fail
803 * to create the device.
804 */
805 device = node->data;
806 if (atomic_xchg(&device->state,
807 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
808 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
809 schedule_delayed_work(&device->work, 0);
810 }
811 break;
812 }
813}
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
new file mode 100644
index 000000000000..0ba9d64ccf4c
--- /dev/null
+++ b/drivers/firewire/fw-device.h
@@ -0,0 +1,146 @@
1/*
2 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_device_h
20#define __fw_device_h
21
22#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <asm/atomic.h>
25
26enum fw_device_state {
27 FW_DEVICE_INITIALIZING,
28 FW_DEVICE_RUNNING,
29 FW_DEVICE_SHUTDOWN,
30};
31
32struct fw_attribute_group {
33 struct attribute_group *groups[2];
34 struct attribute_group group;
35 struct attribute *attrs[11];
36};
37
38struct fw_device {
39 atomic_t state;
40 struct fw_node *node;
41 int node_id;
42 int generation;
43 struct fw_card *card;
44 struct device device;
45 struct list_head link;
46 struct list_head client_list;
47 u32 *config_rom;
48 size_t config_rom_length;
49 int config_rom_retries;
50 struct delayed_work work;
51 struct fw_attribute_group attribute_group;
52};
53
54static inline struct fw_device *
55fw_device(struct device *dev)
56{
57 return container_of(dev, struct fw_device, device);
58}
59
60static inline int
61fw_device_is_shutdown(struct fw_device *device)
62{
63 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
64}
65
66struct fw_device *fw_device_get(struct fw_device *device);
67void fw_device_put(struct fw_device *device);
68int fw_device_enable_phys_dma(struct fw_device *device);
69
70void fw_device_cdev_update(struct fw_device *device);
71void fw_device_cdev_remove(struct fw_device *device);
72
73struct fw_device *fw_device_from_devt(dev_t devt);
74extern int fw_cdev_major;
75
76struct fw_unit {
77 struct device device;
78 u32 *directory;
79 struct fw_attribute_group attribute_group;
80};
81
82static inline struct fw_unit *
83fw_unit(struct device *dev)
84{
85 return container_of(dev, struct fw_unit, device);
86}
87
88#define CSR_OFFSET 0x40
89#define CSR_LEAF 0x80
90#define CSR_DIRECTORY 0xc0
91
92#define CSR_DESCRIPTOR 0x01
93#define CSR_VENDOR 0x03
94#define CSR_HARDWARE_VERSION 0x04
95#define CSR_NODE_CAPABILITIES 0x0c
96#define CSR_UNIT 0x11
97#define CSR_SPECIFIER_ID 0x12
98#define CSR_VERSION 0x13
99#define CSR_DEPENDENT_INFO 0x14
100#define CSR_MODEL 0x17
101#define CSR_INSTANCE 0x18
102
103#define SBP2_COMMAND_SET_SPECIFIER 0x38
104#define SBP2_COMMAND_SET 0x39
105#define SBP2_COMMAND_SET_REVISION 0x3b
106#define SBP2_FIRMWARE_REVISION 0x3c
107
108struct fw_csr_iterator {
109 u32 *p;
110 u32 *end;
111};
112
113void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
114int fw_csr_iterator_next(struct fw_csr_iterator *ci,
115 int *key, int *value);
116
117#define FW_MATCH_VENDOR 0x0001
118#define FW_MATCH_MODEL 0x0002
119#define FW_MATCH_SPECIFIER_ID 0x0004
120#define FW_MATCH_VERSION 0x0008
121
122struct fw_device_id {
123 u32 match_flags;
124 u32 vendor;
125 u32 model;
126 u32 specifier_id;
127 u32 version;
128 void *driver_data;
129};
130
131struct fw_driver {
132 struct device_driver driver;
133 /* Called when the parent device sits through a bus reset. */
134 void (*update) (struct fw_unit *unit);
135 const struct fw_device_id *id_table;
136};
137
138static inline struct fw_driver *
139fw_driver(struct device_driver *drv)
140{
141 return container_of(drv, struct fw_driver, driver);
142}
143
144extern const struct file_operations fw_device_ops;
145
146#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
new file mode 100644
index 000000000000..2b640e9be6de
--- /dev/null
+++ b/drivers/firewire/fw-iso.c
@@ -0,0 +1,163 @@
1/*
2 * Isochronous IO functionality
3 *
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26
27#include "fw-transaction.h"
28#include "fw-topology.h"
29#include "fw-device.h"
30
31int
32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
33 int page_count, enum dma_data_direction direction)
34{
35 int i, j, retval = -ENOMEM;
36 dma_addr_t address;
37
38 buffer->page_count = page_count;
39 buffer->direction = direction;
40
41 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
42 GFP_KERNEL);
43 if (buffer->pages == NULL)
44 goto out;
45
46 for (i = 0; i < buffer->page_count; i++) {
47 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
48 if (buffer->pages[i] == NULL)
49 goto out_pages;
50
51 address = dma_map_page(card->device, buffer->pages[i],
52 0, PAGE_SIZE, direction);
53 if (dma_mapping_error(address)) {
54 __free_page(buffer->pages[i]);
55 goto out_pages;
56 }
57 set_page_private(buffer->pages[i], address);
58 }
59
60 return 0;
61
62 out_pages:
63 for (j = 0; j < i; j++) {
64 address = page_private(buffer->pages[j]);
65 dma_unmap_page(card->device, address,
66 PAGE_SIZE, DMA_TO_DEVICE);
67 __free_page(buffer->pages[j]);
68 }
69 kfree(buffer->pages);
70 out:
71 buffer->pages = NULL;
72 return retval;
73}
74
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{
77 unsigned long uaddr;
78 int i, retval;
79
80 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval)
84 return retval;
85 uaddr += PAGE_SIZE;
86 }
87
88 return 0;
89}
90
91void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
92 struct fw_card *card)
93{
94 int i;
95 dma_addr_t address;
96
97 for (i = 0; i < buffer->page_count; i++) {
98 address = page_private(buffer->pages[i]);
99 dma_unmap_page(card->device, address,
100 PAGE_SIZE, DMA_TO_DEVICE);
101 __free_page(buffer->pages[i]);
102 }
103
104 kfree(buffer->pages);
105 buffer->pages = NULL;
106}
107
108struct fw_iso_context *
109fw_iso_context_create(struct fw_card *card, int type,
110 int channel, int speed, size_t header_size,
111 fw_iso_callback_t callback, void *callback_data)
112{
113 struct fw_iso_context *ctx;
114
115 ctx = card->driver->allocate_iso_context(card, type, header_size);
116 if (IS_ERR(ctx))
117 return ctx;
118
119 ctx->card = card;
120 ctx->type = type;
121 ctx->channel = channel;
122 ctx->speed = speed;
123 ctx->header_size = header_size;
124 ctx->callback = callback;
125 ctx->callback_data = callback_data;
126
127 return ctx;
128}
129EXPORT_SYMBOL(fw_iso_context_create);
130
131void fw_iso_context_destroy(struct fw_iso_context *ctx)
132{
133 struct fw_card *card = ctx->card;
134
135 card->driver->free_iso_context(ctx);
136}
137EXPORT_SYMBOL(fw_iso_context_destroy);
138
139int
140fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
141{
142 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
143}
144EXPORT_SYMBOL(fw_iso_context_start);
145
146int
147fw_iso_context_queue(struct fw_iso_context *ctx,
148 struct fw_iso_packet *packet,
149 struct fw_iso_buffer *buffer,
150 unsigned long payload)
151{
152 struct fw_card *card = ctx->card;
153
154 return card->driver->queue_iso(ctx, packet, buffer, payload);
155}
156EXPORT_SYMBOL(fw_iso_context_queue);
157
158int
159fw_iso_context_stop(struct fw_iso_context *ctx)
160{
161 return ctx->card->driver->stop_iso(ctx);
162}
163EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
new file mode 100644
index 000000000000..1f5c70461b8b
--- /dev/null
+++ b/drivers/firewire/fw-ohci.c
@@ -0,0 +1,1943 @@
1/*
2 * Driver for OHCI 1394 controllers
3 *
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-ohci.h"
35
36#define DESCRIPTOR_OUTPUT_MORE 0
37#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
38#define DESCRIPTOR_INPUT_MORE (2 << 12)
39#define DESCRIPTOR_INPUT_LAST (3 << 12)
40#define DESCRIPTOR_STATUS (1 << 11)
41#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
42#define DESCRIPTOR_PING (1 << 7)
43#define DESCRIPTOR_YY (1 << 6)
44#define DESCRIPTOR_NO_IRQ (0 << 4)
45#define DESCRIPTOR_IRQ_ERROR (1 << 4)
46#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
47#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
48#define DESCRIPTOR_WAIT (3 << 0)
49
50struct descriptor {
51 __le16 req_count;
52 __le16 control;
53 __le32 data_address;
54 __le32 branch_address;
55 __le16 res_count;
56 __le16 transfer_status;
57} __attribute__((aligned(16)));
58
59struct db_descriptor {
60 __le16 first_size;
61 __le16 control;
62 __le16 second_req_count;
63 __le16 first_req_count;
64 __le32 branch_address;
65 __le16 second_res_count;
66 __le16 first_res_count;
67 __le32 reserved0;
68 __le32 first_buffer;
69 __le32 second_buffer;
70 __le32 reserved1;
71} __attribute__((aligned(16)));
72
73#define CONTROL_SET(regs) (regs)
74#define CONTROL_CLEAR(regs) ((regs) + 4)
75#define COMMAND_PTR(regs) ((regs) + 12)
76#define CONTEXT_MATCH(regs) ((regs) + 16)
77
78struct ar_buffer {
79 struct descriptor descriptor;
80 struct ar_buffer *next;
81 __le32 data[0];
82};
83
84struct ar_context {
85 struct fw_ohci *ohci;
86 struct ar_buffer *current_buffer;
87 struct ar_buffer *last_buffer;
88 void *pointer;
89 u32 regs;
90 struct tasklet_struct tasklet;
91};
92
93struct context;
94
95typedef int (*descriptor_callback_t)(struct context *ctx,
96 struct descriptor *d,
97 struct descriptor *last);
98struct context {
99 struct fw_ohci *ohci;
100 u32 regs;
101
102 struct descriptor *buffer;
103 dma_addr_t buffer_bus;
104 size_t buffer_size;
105 struct descriptor *head_descriptor;
106 struct descriptor *tail_descriptor;
107 struct descriptor *tail_descriptor_last;
108 struct descriptor *prev_descriptor;
109
110 descriptor_callback_t callback;
111
112 struct tasklet_struct tasklet;
113};
114
115#define IT_HEADER_SY(v) ((v) << 0)
116#define IT_HEADER_TCODE(v) ((v) << 4)
117#define IT_HEADER_CHANNEL(v) ((v) << 8)
118#define IT_HEADER_TAG(v) ((v) << 14)
119#define IT_HEADER_SPEED(v) ((v) << 16)
120#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
121
122struct iso_context {
123 struct fw_iso_context base;
124 struct context context;
125 void *header;
126 size_t header_length;
127};
128
129#define CONFIG_ROM_SIZE 1024
130
131struct fw_ohci {
132 struct fw_card card;
133
134 u32 version;
135 __iomem char *registers;
136 dma_addr_t self_id_bus;
137 __le32 *self_id_cpu;
138 struct tasklet_struct bus_reset_tasklet;
139 int node_id;
140 int generation;
141 int request_generation;
142 u32 bus_seconds;
143
144 /*
145 * Spinlock for accessing fw_ohci data. Never call out of
146 * this driver with this lock held.
147 */
148 spinlock_t lock;
149 u32 self_id_buffer[512];
150
151 /* Config rom buffers */
152 __be32 *config_rom;
153 dma_addr_t config_rom_bus;
154 __be32 *next_config_rom;
155 dma_addr_t next_config_rom_bus;
156 u32 next_header;
157
158 struct ar_context ar_request_ctx;
159 struct ar_context ar_response_ctx;
160 struct context at_request_ctx;
161 struct context at_response_ctx;
162
163 u32 it_context_mask;
164 struct iso_context *it_context_list;
165 u32 ir_context_mask;
166 struct iso_context *ir_context_list;
167};
168
169static inline struct fw_ohci *fw_ohci(struct fw_card *card)
170{
171 return container_of(card, struct fw_ohci, card);
172}
173
174#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
175#define IR_CONTEXT_BUFFER_FILL 0x80000000
176#define IR_CONTEXT_ISOCH_HEADER 0x40000000
177#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
178#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
179#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
180
181#define CONTEXT_RUN 0x8000
182#define CONTEXT_WAKE 0x1000
183#define CONTEXT_DEAD 0x0800
184#define CONTEXT_ACTIVE 0x0400
185
186#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
187#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
188#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
189
190#define FW_OHCI_MAJOR 240
191#define OHCI1394_REGISTER_SIZE 0x800
192#define OHCI_LOOP_COUNT 500
193#define OHCI1394_PCI_HCI_Control 0x40
194#define SELF_ID_BUF_SIZE 0x800
195#define OHCI_TCODE_PHY_PACKET 0x0e
196#define OHCI_VERSION_1_1 0x010010
197#define ISO_BUFFER_SIZE (64 * 1024)
198#define AT_BUFFER_SIZE 4096
199
200static char ohci_driver_name[] = KBUILD_MODNAME;
201
202static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
203{
204 writel(data, ohci->registers + offset);
205}
206
207static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
208{
209 return readl(ohci->registers + offset);
210}
211
212static inline void flush_writes(const struct fw_ohci *ohci)
213{
214 /* Do a dummy read to flush writes. */
215 reg_read(ohci, OHCI1394_Version);
216}
217
218static int
219ohci_update_phy_reg(struct fw_card *card, int addr,
220 int clear_bits, int set_bits)
221{
222 struct fw_ohci *ohci = fw_ohci(card);
223 u32 val, old;
224
225 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
226 msleep(2);
227 val = reg_read(ohci, OHCI1394_PhyControl);
228 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
229 fw_error("failed to set phy reg bits.\n");
230 return -EBUSY;
231 }
232
233 old = OHCI1394_PhyControl_ReadData(val);
234 old = (old & ~clear_bits) | set_bits;
235 reg_write(ohci, OHCI1394_PhyControl,
236 OHCI1394_PhyControl_Write(addr, old));
237
238 return 0;
239}
240
241static int ar_context_add_page(struct ar_context *ctx)
242{
243 struct device *dev = ctx->ohci->card.device;
244 struct ar_buffer *ab;
245 dma_addr_t ab_bus;
246 size_t offset;
247
248 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
249 if (ab == NULL)
250 return -ENOMEM;
251
252 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
253 if (dma_mapping_error(ab_bus)) {
254 free_page((unsigned long) ab);
255 return -ENOMEM;
256 }
257
258 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
259 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
260 DESCRIPTOR_STATUS |
261 DESCRIPTOR_BRANCH_ALWAYS);
262 offset = offsetof(struct ar_buffer, data);
263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
265 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
266 ab->descriptor.branch_address = 0;
267
268 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
269
270 ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
271 ctx->last_buffer->next = ab;
272 ctx->last_buffer = ab;
273
274 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
275 flush_writes(ctx->ohci);
276
277 return 0;
278}
279
280static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
281{
282 struct fw_ohci *ohci = ctx->ohci;
283 struct fw_packet p;
284 u32 status, length, tcode;
285
286 p.header[0] = le32_to_cpu(buffer[0]);
287 p.header[1] = le32_to_cpu(buffer[1]);
288 p.header[2] = le32_to_cpu(buffer[2]);
289
290 tcode = (p.header[0] >> 4) & 0x0f;
291 switch (tcode) {
292 case TCODE_WRITE_QUADLET_REQUEST:
293 case TCODE_READ_QUADLET_RESPONSE:
294 p.header[3] = (__force __u32) buffer[3];
295 p.header_length = 16;
296 p.payload_length = 0;
297 break;
298
299 case TCODE_READ_BLOCK_REQUEST :
300 p.header[3] = le32_to_cpu(buffer[3]);
301 p.header_length = 16;
302 p.payload_length = 0;
303 break;
304
305 case TCODE_WRITE_BLOCK_REQUEST:
306 case TCODE_READ_BLOCK_RESPONSE:
307 case TCODE_LOCK_REQUEST:
308 case TCODE_LOCK_RESPONSE:
309 p.header[3] = le32_to_cpu(buffer[3]);
310 p.header_length = 16;
311 p.payload_length = p.header[3] >> 16;
312 break;
313
314 case TCODE_WRITE_RESPONSE:
315 case TCODE_READ_QUADLET_REQUEST:
316 case OHCI_TCODE_PHY_PACKET:
317 p.header_length = 12;
318 p.payload_length = 0;
319 break;
320 }
321
322 p.payload = (void *) buffer + p.header_length;
323
324 /* FIXME: What to do about evt_* errors? */
325 length = (p.header_length + p.payload_length + 3) / 4;
326 status = le32_to_cpu(buffer[length]);
327
328 p.ack = ((status >> 16) & 0x1f) - 16;
329 p.speed = (status >> 21) & 0x7;
330 p.timestamp = status & 0xffff;
331 p.generation = ohci->request_generation;
332
333 /*
334 * The OHCI bus reset handler synthesizes a phy packet with
335 * the new generation number when a bus reset happens (see
336 * section 8.4.2.3). This helps us determine when a request
337 * was received and make sure we send the response in the same
338 * generation. We only need this for requests; for responses
339 * we use the unique tlabel for finding the matching
340 * request.
341 */
342
343 if (p.ack + 16 == 0x09)
344 ohci->request_generation = (buffer[2] >> 16) & 0xff;
345 else if (ctx == &ohci->ar_request_ctx)
346 fw_core_handle_request(&ohci->card, &p);
347 else
348 fw_core_handle_response(&ohci->card, &p);
349
350 return buffer + length + 1;
351}
352
353static void ar_context_tasklet(unsigned long data)
354{
355 struct ar_context *ctx = (struct ar_context *)data;
356 struct fw_ohci *ohci = ctx->ohci;
357 struct ar_buffer *ab;
358 struct descriptor *d;
359 void *buffer, *end;
360
361 ab = ctx->current_buffer;
362 d = &ab->descriptor;
363
364 if (d->res_count == 0) {
365 size_t size, rest, offset;
366
367 /*
368 * This descriptor is finished and we may have a
369 * packet split across this and the next buffer. We
370 * reuse the page for reassembling the split packet.
371 */
372
373 offset = offsetof(struct ar_buffer, data);
374 dma_unmap_single(ohci->card.device,
375 ab->descriptor.data_address - offset,
376 PAGE_SIZE, DMA_BIDIRECTIONAL);
377
378 buffer = ab;
379 ab = ab->next;
380 d = &ab->descriptor;
381 size = buffer + PAGE_SIZE - ctx->pointer;
382 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
383 memmove(buffer, ctx->pointer, size);
384 memcpy(buffer + size, ab->data, rest);
385 ctx->current_buffer = ab;
386 ctx->pointer = (void *) ab->data + rest;
387 end = buffer + size + rest;
388
389 while (buffer < end)
390 buffer = handle_ar_packet(ctx, buffer);
391
392 free_page((unsigned long)buffer);
393 ar_context_add_page(ctx);
394 } else {
395 buffer = ctx->pointer;
396 ctx->pointer = end =
397 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
398
399 while (buffer < end)
400 buffer = handle_ar_packet(ctx, buffer);
401 }
402}
403
404static int
405ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
406{
407 struct ar_buffer ab;
408
409 ctx->regs = regs;
410 ctx->ohci = ohci;
411 ctx->last_buffer = &ab;
412 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
413
414 ar_context_add_page(ctx);
415 ar_context_add_page(ctx);
416 ctx->current_buffer = ab.next;
417 ctx->pointer = ctx->current_buffer->data;
418
419 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
420 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
421 flush_writes(ctx->ohci);
422
423 return 0;
424}
425
426static void context_tasklet(unsigned long data)
427{
428 struct context *ctx = (struct context *) data;
429 struct fw_ohci *ohci = ctx->ohci;
430 struct descriptor *d, *last;
431 u32 address;
432 int z;
433
434 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
435 ctx->buffer_size, DMA_TO_DEVICE);
436
437 d = ctx->tail_descriptor;
438 last = ctx->tail_descriptor_last;
439
440 while (last->branch_address != 0) {
441 address = le32_to_cpu(last->branch_address);
442 z = address & 0xf;
443 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
444 last = (z == 2) ? d : d + z - 1;
445
446 if (!ctx->callback(ctx, d, last))
447 break;
448
449 ctx->tail_descriptor = d;
450 ctx->tail_descriptor_last = last;
451 }
452}
453
454static int
455context_init(struct context *ctx, struct fw_ohci *ohci,
456 size_t buffer_size, u32 regs,
457 descriptor_callback_t callback)
458{
459 ctx->ohci = ohci;
460 ctx->regs = regs;
461 ctx->buffer_size = buffer_size;
462 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
463 if (ctx->buffer == NULL)
464 return -ENOMEM;
465
466 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
467 ctx->callback = callback;
468
469 ctx->buffer_bus =
470 dma_map_single(ohci->card.device, ctx->buffer,
471 buffer_size, DMA_TO_DEVICE);
472 if (dma_mapping_error(ctx->buffer_bus)) {
473 kfree(ctx->buffer);
474 return -ENOMEM;
475 }
476
477 ctx->head_descriptor = ctx->buffer;
478 ctx->prev_descriptor = ctx->buffer;
479 ctx->tail_descriptor = ctx->buffer;
480 ctx->tail_descriptor_last = ctx->buffer;
481
482 /*
483 * We put a dummy descriptor in the buffer that has a NULL
484 * branch address and looks like it's been sent. That way we
485 * have a descriptor to append DMA programs to. Also, the
486 * ring buffer invariant is that it always has at least one
487 * element so that head == tail means buffer full.
488 */
489
490 memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
491 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
493 ctx->head_descriptor++;
494
495 return 0;
496}
497
498static void
499context_release(struct context *ctx)
500{
501 struct fw_card *card = &ctx->ohci->card;
502
503 dma_unmap_single(card->device, ctx->buffer_bus,
504 ctx->buffer_size, DMA_TO_DEVICE);
505 kfree(ctx->buffer);
506}
507
508static struct descriptor *
509context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
510{
511 struct descriptor *d, *tail, *end;
512
513 d = ctx->head_descriptor;
514 tail = ctx->tail_descriptor;
515 end = ctx->buffer + ctx->buffer_size / sizeof(*d);
516
517 if (d + z <= tail) {
518 goto has_space;
519 } else if (d > tail && d + z <= end) {
520 goto has_space;
521 } else if (d > tail && ctx->buffer + z <= tail) {
522 d = ctx->buffer;
523 goto has_space;
524 }
525
526 return NULL;
527
528 has_space:
529 memset(d, 0, z * sizeof(*d));
530 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
531
532 return d;
533}
534
535static void context_run(struct context *ctx, u32 extra)
536{
537 struct fw_ohci *ohci = ctx->ohci;
538
539 reg_write(ohci, COMMAND_PTR(ctx->regs),
540 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
541 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
542 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
543 flush_writes(ohci);
544}
545
546static void context_append(struct context *ctx,
547 struct descriptor *d, int z, int extra)
548{
549 dma_addr_t d_bus;
550
551 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
552
553 ctx->head_descriptor = d + z + extra;
554 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
555 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
556
557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
558 ctx->buffer_size, DMA_TO_DEVICE);
559
560 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
561 flush_writes(ctx->ohci);
562}
563
564static void context_stop(struct context *ctx)
565{
566 u32 reg;
567 int i;
568
569 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
570 flush_writes(ctx->ohci);
571
572 for (i = 0; i < 10; i++) {
573 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
574 if ((reg & CONTEXT_ACTIVE) == 0)
575 break;
576
577 fw_notify("context_stop: still active (0x%08x)\n", reg);
578 msleep(1);
579 }
580}
581
582struct driver_data {
583 struct fw_packet *packet;
584};
585
586/*
587 * This function apppends a packet to the DMA queue for transmission.
588 * Must always be called with the ochi->lock held to ensure proper
589 * generation handling and locking around packet queue manipulation.
590 */
591static int
592at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
593{
594 struct fw_ohci *ohci = ctx->ohci;
595 dma_addr_t d_bus, payload_bus;
596 struct driver_data *driver_data;
597 struct descriptor *d, *last;
598 __le32 *header;
599 int z, tcode;
600 u32 reg;
601
602 d = context_get_descriptors(ctx, 4, &d_bus);
603 if (d == NULL) {
604 packet->ack = RCODE_SEND_ERROR;
605 return -1;
606 }
607
608 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
609 d[0].res_count = cpu_to_le16(packet->timestamp);
610
611 /*
612 * The DMA format for asyncronous link packets is different
613 * from the IEEE1394 layout, so shift the fields around
614 * accordingly. If header_length is 8, it's a PHY packet, to
615 * which we need to prepend an extra quadlet.
616 */
617
618 header = (__le32 *) &d[1];
619 if (packet->header_length > 8) {
620 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
621 (packet->speed << 16));
622 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
623 (packet->header[0] & 0xffff0000));
624 header[2] = cpu_to_le32(packet->header[2]);
625
626 tcode = (packet->header[0] >> 4) & 0x0f;
627 if (TCODE_IS_BLOCK_PACKET(tcode))
628 header[3] = cpu_to_le32(packet->header[3]);
629 else
630 header[3] = (__force __le32) packet->header[3];
631
632 d[0].req_count = cpu_to_le16(packet->header_length);
633 } else {
634 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
635 (packet->speed << 16));
636 header[1] = cpu_to_le32(packet->header[0]);
637 header[2] = cpu_to_le32(packet->header[1]);
638 d[0].req_count = cpu_to_le16(12);
639 }
640
641 driver_data = (struct driver_data *) &d[3];
642 driver_data->packet = packet;
643 packet->driver_data = driver_data;
644
645 if (packet->payload_length > 0) {
646 payload_bus =
647 dma_map_single(ohci->card.device, packet->payload,
648 packet->payload_length, DMA_TO_DEVICE);
649 if (dma_mapping_error(payload_bus)) {
650 packet->ack = RCODE_SEND_ERROR;
651 return -1;
652 }
653
654 d[2].req_count = cpu_to_le16(packet->payload_length);
655 d[2].data_address = cpu_to_le32(payload_bus);
656 last = &d[2];
657 z = 3;
658 } else {
659 last = &d[0];
660 z = 2;
661 }
662
663 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
664 DESCRIPTOR_IRQ_ALWAYS |
665 DESCRIPTOR_BRANCH_ALWAYS);
666
667 /* FIXME: Document how the locking works. */
668 if (ohci->generation != packet->generation) {
669 packet->ack = RCODE_GENERATION;
670 return -1;
671 }
672
673 context_append(ctx, d, z, 4 - z);
674
675 /* If the context isn't already running, start it up. */
676 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
677 if ((reg & CONTEXT_RUN) == 0)
678 context_run(ctx, 0);
679
680 return 0;
681}
682
683static int handle_at_packet(struct context *context,
684 struct descriptor *d,
685 struct descriptor *last)
686{
687 struct driver_data *driver_data;
688 struct fw_packet *packet;
689 struct fw_ohci *ohci = context->ohci;
690 dma_addr_t payload_bus;
691 int evt;
692
693 if (last->transfer_status == 0)
694 /* This descriptor isn't done yet, stop iteration. */
695 return 0;
696
697 driver_data = (struct driver_data *) &d[3];
698 packet = driver_data->packet;
699 if (packet == NULL)
700 /* This packet was cancelled, just continue. */
701 return 1;
702
703 payload_bus = le32_to_cpu(last->data_address);
704 if (payload_bus != 0)
705 dma_unmap_single(ohci->card.device, payload_bus,
706 packet->payload_length, DMA_TO_DEVICE);
707
708 evt = le16_to_cpu(last->transfer_status) & 0x1f;
709 packet->timestamp = le16_to_cpu(last->res_count);
710
711 switch (evt) {
712 case OHCI1394_evt_timeout:
713 /* Async response transmit timed out. */
714 packet->ack = RCODE_CANCELLED;
715 break;
716
717 case OHCI1394_evt_flushed:
718 /*
719 * The packet was flushed should give same error as
720 * when we try to use a stale generation count.
721 */
722 packet->ack = RCODE_GENERATION;
723 break;
724
725 case OHCI1394_evt_missing_ack:
726 /*
727 * Using a valid (current) generation count, but the
728 * node is not on the bus or not sending acks.
729 */
730 packet->ack = RCODE_NO_ACK;
731 break;
732
733 case ACK_COMPLETE + 0x10:
734 case ACK_PENDING + 0x10:
735 case ACK_BUSY_X + 0x10:
736 case ACK_BUSY_A + 0x10:
737 case ACK_BUSY_B + 0x10:
738 case ACK_DATA_ERROR + 0x10:
739 case ACK_TYPE_ERROR + 0x10:
740 packet->ack = evt - 0x10;
741 break;
742
743 default:
744 packet->ack = RCODE_SEND_ERROR;
745 break;
746 }
747
748 packet->callback(packet, &ohci->card, packet->ack);
749
750 return 1;
751}
752
753#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
754#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
755#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
756#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
757#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
758
759static void
760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
761{
762 struct fw_packet response;
763 int tcode, length, i;
764
765 tcode = HEADER_GET_TCODE(packet->header[0]);
766 if (TCODE_IS_BLOCK_PACKET(tcode))
767 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
768 else
769 length = 4;
770
771 i = csr - CSR_CONFIG_ROM;
772 if (i + length > CONFIG_ROM_SIZE) {
773 fw_fill_response(&response, packet->header,
774 RCODE_ADDRESS_ERROR, NULL, 0);
775 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
776 fw_fill_response(&response, packet->header,
777 RCODE_TYPE_ERROR, NULL, 0);
778 } else {
779 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
780 (void *) ohci->config_rom + i, length);
781 }
782
783 fw_core_handle_response(&ohci->card, &response);
784}
785
786static void
787handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
788{
789 struct fw_packet response;
790 int tcode, length, ext_tcode, sel;
791 __be32 *payload, lock_old;
792 u32 lock_arg, lock_data;
793
794 tcode = HEADER_GET_TCODE(packet->header[0]);
795 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
796 payload = packet->payload;
797 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
798
799 if (tcode == TCODE_LOCK_REQUEST &&
800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
801 lock_arg = be32_to_cpu(payload[0]);
802 lock_data = be32_to_cpu(payload[1]);
803 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
804 lock_arg = 0;
805 lock_data = 0;
806 } else {
807 fw_fill_response(&response, packet->header,
808 RCODE_TYPE_ERROR, NULL, 0);
809 goto out;
810 }
811
812 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
813 reg_write(ohci, OHCI1394_CSRData, lock_data);
814 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
815 reg_write(ohci, OHCI1394_CSRControl, sel);
816
817 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
818 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
819 else
820 fw_notify("swap not done yet\n");
821
822 fw_fill_response(&response, packet->header,
823 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
824 out:
825 fw_core_handle_response(&ohci->card, &response);
826}
827
828static void
829handle_local_request(struct context *ctx, struct fw_packet *packet)
830{
831 u64 offset;
832 u32 csr;
833
834 if (ctx == &ctx->ohci->at_request_ctx) {
835 packet->ack = ACK_PENDING;
836 packet->callback(packet, &ctx->ohci->card, packet->ack);
837 }
838
839 offset =
840 ((unsigned long long)
841 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
842 packet->header[2];
843 csr = offset - CSR_REGISTER_BASE;
844
845 /* Handle config rom reads. */
846 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
847 handle_local_rom(ctx->ohci, packet, csr);
848 else switch (csr) {
849 case CSR_BUS_MANAGER_ID:
850 case CSR_BANDWIDTH_AVAILABLE:
851 case CSR_CHANNELS_AVAILABLE_HI:
852 case CSR_CHANNELS_AVAILABLE_LO:
853 handle_local_lock(ctx->ohci, packet, csr);
854 break;
855 default:
856 if (ctx == &ctx->ohci->at_request_ctx)
857 fw_core_handle_request(&ctx->ohci->card, packet);
858 else
859 fw_core_handle_response(&ctx->ohci->card, packet);
860 break;
861 }
862
863 if (ctx == &ctx->ohci->at_response_ctx) {
864 packet->ack = ACK_COMPLETE;
865 packet->callback(packet, &ctx->ohci->card, packet->ack);
866 }
867}
868
869static void
870at_context_transmit(struct context *ctx, struct fw_packet *packet)
871{
872 unsigned long flags;
873 int retval;
874
875 spin_lock_irqsave(&ctx->ohci->lock, flags);
876
877 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
878 ctx->ohci->generation == packet->generation) {
879 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
880 handle_local_request(ctx, packet);
881 return;
882 }
883
884 retval = at_context_queue_packet(ctx, packet);
885 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
886
887 if (retval < 0)
888 packet->callback(packet, &ctx->ohci->card, packet->ack);
889
890}
891
892static void bus_reset_tasklet(unsigned long data)
893{
894 struct fw_ohci *ohci = (struct fw_ohci *)data;
895 int self_id_count, i, j, reg;
896 int generation, new_generation;
897 unsigned long flags;
898
899 reg = reg_read(ohci, OHCI1394_NodeID);
900 if (!(reg & OHCI1394_NodeID_idValid)) {
901 fw_error("node ID not valid, new bus reset in progress\n");
902 return;
903 }
904 ohci->node_id = reg & 0xffff;
905
906 /*
907 * The count in the SelfIDCount register is the number of
908 * bytes in the self ID receive buffer. Since we also receive
909 * the inverted quadlets and a header quadlet, we shift one
910 * bit extra to get the actual number of self IDs.
911 */
912
913 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
914 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
915
916 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
917 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
918 fw_error("inconsistent self IDs\n");
919 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
920 }
921
922 /*
923 * Check the consistency of the self IDs we just read. The
924 * problem we face is that a new bus reset can start while we
925 * read out the self IDs from the DMA buffer. If this happens,
926 * the DMA buffer will be overwritten with new self IDs and we
927 * will read out inconsistent data. The OHCI specification
928 * (section 11.2) recommends a technique similar to
929 * linux/seqlock.h, where we remember the generation of the
930 * self IDs in the buffer before reading them out and compare
931 * it to the current generation after reading them out. If
932 * the two generations match we know we have a consistent set
933 * of self IDs.
934 */
935
936 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
937 if (new_generation != generation) {
938 fw_notify("recursive bus reset detected, "
939 "discarding self ids\n");
940 return;
941 }
942
943 /* FIXME: Document how the locking works. */
944 spin_lock_irqsave(&ohci->lock, flags);
945
946 ohci->generation = generation;
947 context_stop(&ohci->at_request_ctx);
948 context_stop(&ohci->at_response_ctx);
949 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
950
951 /*
952 * This next bit is unrelated to the AT context stuff but we
953 * have to do it under the spinlock also. If a new config rom
954 * was set up before this reset, the old one is now no longer
955 * in use and we can free it. Update the config rom pointers
956 * to point to the current config rom and clear the
957 * next_config_rom pointer so a new udpate can take place.
958 */
959
960 if (ohci->next_config_rom != NULL) {
961 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
962 ohci->config_rom, ohci->config_rom_bus);
963 ohci->config_rom = ohci->next_config_rom;
964 ohci->config_rom_bus = ohci->next_config_rom_bus;
965 ohci->next_config_rom = NULL;
966
967 /*
968 * Restore config_rom image and manually update
969 * config_rom registers. Writing the header quadlet
970 * will indicate that the config rom is ready, so we
971 * do that last.
972 */
973 reg_write(ohci, OHCI1394_BusOptions,
974 be32_to_cpu(ohci->config_rom[2]));
975 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
976 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
977 }
978
979 spin_unlock_irqrestore(&ohci->lock, flags);
980
981 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
982 self_id_count, ohci->self_id_buffer);
983}
984
985static irqreturn_t irq_handler(int irq, void *data)
986{
987 struct fw_ohci *ohci = data;
988 u32 event, iso_event, cycle_time;
989 int i;
990
991 event = reg_read(ohci, OHCI1394_IntEventClear);
992
993 if (!event)
994 return IRQ_NONE;
995
996 reg_write(ohci, OHCI1394_IntEventClear, event);
997
998 if (event & OHCI1394_selfIDComplete)
999 tasklet_schedule(&ohci->bus_reset_tasklet);
1000
1001 if (event & OHCI1394_RQPkt)
1002 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1003
1004 if (event & OHCI1394_RSPkt)
1005 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1006
1007 if (event & OHCI1394_reqTxComplete)
1008 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1009
1010 if (event & OHCI1394_respTxComplete)
1011 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1012
1013 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1014 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1015
1016 while (iso_event) {
1017 i = ffs(iso_event) - 1;
1018 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1019 iso_event &= ~(1 << i);
1020 }
1021
1022 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1023 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1024
1025 while (iso_event) {
1026 i = ffs(iso_event) - 1;
1027 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1028 iso_event &= ~(1 << i);
1029 }
1030
1031 if (event & OHCI1394_cycle64Seconds) {
1032 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1033 if ((cycle_time & 0x80000000) == 0)
1034 ohci->bus_seconds++;
1035 }
1036
1037 return IRQ_HANDLED;
1038}
1039
1040static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1041{
1042 struct fw_ohci *ohci = fw_ohci(card);
1043 struct pci_dev *dev = to_pci_dev(card->device);
1044
1045 /*
1046 * When the link is not yet enabled, the atomic config rom
1047 * update mechanism described below in ohci_set_config_rom()
1048 * is not active. We have to update ConfigRomHeader and
1049 * BusOptions manually, and the write to ConfigROMmap takes
1050 * effect immediately. We tie this to the enabling of the
1051 * link, so we have a valid config rom before enabling - the
1052 * OHCI requires that ConfigROMhdr and BusOptions have valid
1053 * values before enabling.
1054 *
1055 * However, when the ConfigROMmap is written, some controllers
1056 * always read back quadlets 0 and 2 from the config rom to
1057 * the ConfigRomHeader and BusOptions registers on bus reset.
1058 * They shouldn't do that in this initial case where the link
1059 * isn't enabled. This means we have to use the same
1060 * workaround here, setting the bus header to 0 and then write
1061 * the right values in the bus reset tasklet.
1062 */
1063
1064 ohci->next_config_rom =
1065 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1066 &ohci->next_config_rom_bus, GFP_KERNEL);
1067 if (ohci->next_config_rom == NULL)
1068 return -ENOMEM;
1069
1070 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1071 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1072
1073 ohci->next_header = config_rom[0];
1074 ohci->next_config_rom[0] = 0;
1075 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1076 reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1077 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1078
1079 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1080
1081 if (request_irq(dev->irq, irq_handler,
1082 IRQF_SHARED, ohci_driver_name, ohci)) {
1083 fw_error("Failed to allocate shared interrupt %d.\n",
1084 dev->irq);
1085 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1086 ohci->config_rom, ohci->config_rom_bus);
1087 return -EIO;
1088 }
1089
1090 reg_write(ohci, OHCI1394_HCControlSet,
1091 OHCI1394_HCControl_linkEnable |
1092 OHCI1394_HCControl_BIBimageValid);
1093 flush_writes(ohci);
1094
1095 /*
1096 * We are ready to go, initiate bus reset to finish the
1097 * initialization.
1098 */
1099
1100 fw_core_initiate_bus_reset(&ohci->card, 1);
1101
1102 return 0;
1103}
1104
1105static int
1106ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1107{
1108 struct fw_ohci *ohci;
1109 unsigned long flags;
1110 int retval = 0;
1111 __be32 *next_config_rom;
1112 dma_addr_t next_config_rom_bus;
1113
1114 ohci = fw_ohci(card);
1115
1116 /*
1117 * When the OHCI controller is enabled, the config rom update
1118 * mechanism is a bit tricky, but easy enough to use. See
1119 * section 5.5.6 in the OHCI specification.
1120 *
1121 * The OHCI controller caches the new config rom address in a
1122 * shadow register (ConfigROMmapNext) and needs a bus reset
1123 * for the changes to take place. When the bus reset is
1124 * detected, the controller loads the new values for the
1125 * ConfigRomHeader and BusOptions registers from the specified
1126 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1127 * shadow register. All automatically and atomically.
1128 *
1129 * Now, there's a twist to this story. The automatic load of
1130 * ConfigRomHeader and BusOptions doesn't honor the
1131 * noByteSwapData bit, so with a be32 config rom, the
1132 * controller will load be32 values in to these registers
1133 * during the atomic update, even on litte endian
1134 * architectures. The workaround we use is to put a 0 in the
1135 * header quadlet; 0 is endian agnostic and means that the
1136 * config rom isn't ready yet. In the bus reset tasklet we
1137 * then set up the real values for the two registers.
1138 *
1139 * We use ohci->lock to avoid racing with the code that sets
1140 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1141 */
1142
1143 next_config_rom =
1144 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1145 &next_config_rom_bus, GFP_KERNEL);
1146 if (next_config_rom == NULL)
1147 return -ENOMEM;
1148
1149 spin_lock_irqsave(&ohci->lock, flags);
1150
1151 if (ohci->next_config_rom == NULL) {
1152 ohci->next_config_rom = next_config_rom;
1153 ohci->next_config_rom_bus = next_config_rom_bus;
1154
1155 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1156 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1157 length * 4);
1158
1159 ohci->next_header = config_rom[0];
1160 ohci->next_config_rom[0] = 0;
1161
1162 reg_write(ohci, OHCI1394_ConfigROMmap,
1163 ohci->next_config_rom_bus);
1164 } else {
1165 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1166 next_config_rom, next_config_rom_bus);
1167 retval = -EBUSY;
1168 }
1169
1170 spin_unlock_irqrestore(&ohci->lock, flags);
1171
1172 /*
1173 * Now initiate a bus reset to have the changes take
1174 * effect. We clean up the old config rom memory and DMA
1175 * mappings in the bus reset tasklet, since the OHCI
1176 * controller could need to access it before the bus reset
1177 * takes effect.
1178 */
1179 if (retval == 0)
1180 fw_core_initiate_bus_reset(&ohci->card, 1);
1181
1182 return retval;
1183}
1184
1185static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1186{
1187 struct fw_ohci *ohci = fw_ohci(card);
1188
1189 at_context_transmit(&ohci->at_request_ctx, packet);
1190}
1191
1192static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1193{
1194 struct fw_ohci *ohci = fw_ohci(card);
1195
1196 at_context_transmit(&ohci->at_response_ctx, packet);
1197}
1198
1199static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1200{
1201 struct fw_ohci *ohci = fw_ohci(card);
1202 struct context *ctx = &ohci->at_request_ctx;
1203 struct driver_data *driver_data = packet->driver_data;
1204 int retval = -ENOENT;
1205
1206 tasklet_disable(&ctx->tasklet);
1207
1208 if (packet->ack != 0)
1209 goto out;
1210
1211 driver_data->packet = NULL;
1212 packet->ack = RCODE_CANCELLED;
1213 packet->callback(packet, &ohci->card, packet->ack);
1214 retval = 0;
1215
1216 out:
1217 tasklet_enable(&ctx->tasklet);
1218
1219 return retval;
1220}
1221
1222static int
1223ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1224{
1225 struct fw_ohci *ohci = fw_ohci(card);
1226 unsigned long flags;
1227 int n, retval = 0;
1228
1229 /*
1230 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1231 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1232 */
1233
1234 spin_lock_irqsave(&ohci->lock, flags);
1235
1236 if (ohci->generation != generation) {
1237 retval = -ESTALE;
1238 goto out;
1239 }
1240
1241 /*
1242 * Note, if the node ID contains a non-local bus ID, physical DMA is
1243 * enabled for _all_ nodes on remote buses.
1244 */
1245
1246 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1247 if (n < 32)
1248 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1249 else
1250 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1251
1252 flush_writes(ohci);
1253 out:
1254 spin_unlock_irqrestore(&ohci->lock, flags);
1255 return retval;
1256}
1257
1258static u64
1259ohci_get_bus_time(struct fw_card *card)
1260{
1261 struct fw_ohci *ohci = fw_ohci(card);
1262 u32 cycle_time;
1263 u64 bus_time;
1264
1265 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1266 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1267
1268 return bus_time;
1269}
1270
1271static int handle_ir_dualbuffer_packet(struct context *context,
1272 struct descriptor *d,
1273 struct descriptor *last)
1274{
1275 struct iso_context *ctx =
1276 container_of(context, struct iso_context, context);
1277 struct db_descriptor *db = (struct db_descriptor *) d;
1278 __le32 *ir_header;
1279 size_t header_length;
1280 void *p, *end;
1281 int i;
1282
1283 if (db->first_res_count > 0 && db->second_res_count > 0)
1284 /* This descriptor isn't done yet, stop iteration. */
1285 return 0;
1286
1287 header_length = le16_to_cpu(db->first_req_count) -
1288 le16_to_cpu(db->first_res_count);
1289
1290 i = ctx->header_length;
1291 p = db + 1;
1292 end = p + header_length;
1293 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1294 /*
1295 * The iso header is byteswapped to little endian by
1296 * the controller, but the remaining header quadlets
1297 * are big endian. We want to present all the headers
1298 * as big endian, so we have to swap the first
1299 * quadlet.
1300 */
1301 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1302 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1303 i += ctx->base.header_size;
1304 p += ctx->base.header_size + 4;
1305 }
1306
1307 ctx->header_length = i;
1308
1309 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1310 ir_header = (__le32 *) (db + 1);
1311 ctx->base.callback(&ctx->base,
1312 le32_to_cpu(ir_header[0]) & 0xffff,
1313 ctx->header_length, ctx->header,
1314 ctx->base.callback_data);
1315 ctx->header_length = 0;
1316 }
1317
1318 return 1;
1319}
1320
1321static int handle_it_packet(struct context *context,
1322 struct descriptor *d,
1323 struct descriptor *last)
1324{
1325 struct iso_context *ctx =
1326 container_of(context, struct iso_context, context);
1327
1328 if (last->transfer_status == 0)
1329 /* This descriptor isn't done yet, stop iteration. */
1330 return 0;
1331
1332 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1334 0, NULL, ctx->base.callback_data);
1335
1336 return 1;
1337}
1338
1339static struct fw_iso_context *
1340ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1341{
1342 struct fw_ohci *ohci = fw_ohci(card);
1343 struct iso_context *ctx, *list;
1344 descriptor_callback_t callback;
1345 u32 *mask, regs;
1346 unsigned long flags;
1347 int index, retval = -ENOMEM;
1348
1349 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1350 mask = &ohci->it_context_mask;
1351 list = ohci->it_context_list;
1352 callback = handle_it_packet;
1353 } else {
1354 mask = &ohci->ir_context_mask;
1355 list = ohci->ir_context_list;
1356 callback = handle_ir_dualbuffer_packet;
1357 }
1358
1359 /* FIXME: We need a fallback for pre 1.1 OHCI. */
1360 if (callback == handle_ir_dualbuffer_packet &&
1361 ohci->version < OHCI_VERSION_1_1)
1362 return ERR_PTR(-EINVAL);
1363
1364 spin_lock_irqsave(&ohci->lock, flags);
1365 index = ffs(*mask) - 1;
1366 if (index >= 0)
1367 *mask &= ~(1 << index);
1368 spin_unlock_irqrestore(&ohci->lock, flags);
1369
1370 if (index < 0)
1371 return ERR_PTR(-EBUSY);
1372
1373 if (type == FW_ISO_CONTEXT_TRANSMIT)
1374 regs = OHCI1394_IsoXmitContextBase(index);
1375 else
1376 regs = OHCI1394_IsoRcvContextBase(index);
1377
1378 ctx = &list[index];
1379 memset(ctx, 0, sizeof(*ctx));
1380 ctx->header_length = 0;
1381 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1382 if (ctx->header == NULL)
1383 goto out;
1384
1385 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1386 regs, callback);
1387 if (retval < 0)
1388 goto out_with_header;
1389
1390 return &ctx->base;
1391
1392 out_with_header:
1393 free_page((unsigned long)ctx->header);
1394 out:
1395 spin_lock_irqsave(&ohci->lock, flags);
1396 *mask |= 1 << index;
1397 spin_unlock_irqrestore(&ohci->lock, flags);
1398
1399 return ERR_PTR(retval);
1400}
1401
1402static int ohci_start_iso(struct fw_iso_context *base,
1403 s32 cycle, u32 sync, u32 tags)
1404{
1405 struct iso_context *ctx = container_of(base, struct iso_context, base);
1406 struct fw_ohci *ohci = ctx->context.ohci;
1407 u32 control, match;
1408 int index;
1409
1410 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1411 index = ctx - ohci->it_context_list;
1412 match = 0;
1413 if (cycle >= 0)
1414 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1415 (cycle & 0x7fff) << 16;
1416
1417 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1418 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1419 context_run(&ctx->context, match);
1420 } else {
1421 index = ctx - ohci->ir_context_list;
1422 control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
1423 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1424 if (cycle >= 0) {
1425 match |= (cycle & 0x07fff) << 12;
1426 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1427 }
1428
1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1431 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1432 context_run(&ctx->context, control);
1433 }
1434
1435 return 0;
1436}
1437
1438static int ohci_stop_iso(struct fw_iso_context *base)
1439{
1440 struct fw_ohci *ohci = fw_ohci(base->card);
1441 struct iso_context *ctx = container_of(base, struct iso_context, base);
1442 int index;
1443
1444 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1445 index = ctx - ohci->it_context_list;
1446 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1447 } else {
1448 index = ctx - ohci->ir_context_list;
1449 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1450 }
1451 flush_writes(ohci);
1452 context_stop(&ctx->context);
1453
1454 return 0;
1455}
1456
1457static void ohci_free_iso_context(struct fw_iso_context *base)
1458{
1459 struct fw_ohci *ohci = fw_ohci(base->card);
1460 struct iso_context *ctx = container_of(base, struct iso_context, base);
1461 unsigned long flags;
1462 int index;
1463
1464 ohci_stop_iso(base);
1465 context_release(&ctx->context);
1466 free_page((unsigned long)ctx->header);
1467
1468 spin_lock_irqsave(&ohci->lock, flags);
1469
1470 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1471 index = ctx - ohci->it_context_list;
1472 ohci->it_context_mask |= 1 << index;
1473 } else {
1474 index = ctx - ohci->ir_context_list;
1475 ohci->ir_context_mask |= 1 << index;
1476 }
1477
1478 spin_unlock_irqrestore(&ohci->lock, flags);
1479}
1480
1481static int
1482ohci_queue_iso_transmit(struct fw_iso_context *base,
1483 struct fw_iso_packet *packet,
1484 struct fw_iso_buffer *buffer,
1485 unsigned long payload)
1486{
1487 struct iso_context *ctx = container_of(base, struct iso_context, base);
1488 struct descriptor *d, *last, *pd;
1489 struct fw_iso_packet *p;
1490 __le32 *header;
1491 dma_addr_t d_bus, page_bus;
1492 u32 z, header_z, payload_z, irq;
1493 u32 payload_index, payload_end_index, next_page_index;
1494 int page, end_page, i, length, offset;
1495
1496 /*
1497 * FIXME: Cycle lost behavior should be configurable: lose
1498 * packet, retransmit or terminate..
1499 */
1500
1501 p = packet;
1502 payload_index = payload;
1503
1504 if (p->skip)
1505 z = 1;
1506 else
1507 z = 2;
1508 if (p->header_length > 0)
1509 z++;
1510
1511 /* Determine the first page the payload isn't contained in. */
1512 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1513 if (p->payload_length > 0)
1514 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1515 else
1516 payload_z = 0;
1517
1518 z += payload_z;
1519
1520 /* Get header size in number of descriptors. */
1521 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
1522
1523 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1524 if (d == NULL)
1525 return -ENOMEM;
1526
1527 if (!p->skip) {
1528 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1529 d[0].req_count = cpu_to_le16(8);
1530
1531 header = (__le32 *) &d[1];
1532 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1533 IT_HEADER_TAG(p->tag) |
1534 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1535 IT_HEADER_CHANNEL(ctx->base.channel) |
1536 IT_HEADER_SPEED(ctx->base.speed));
1537 header[1] =
1538 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1539 p->payload_length));
1540 }
1541
1542 if (p->header_length > 0) {
1543 d[2].req_count = cpu_to_le16(p->header_length);
1544 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
1545 memcpy(&d[z], p->header, p->header_length);
1546 }
1547
1548 pd = d + z - payload_z;
1549 payload_end_index = payload_index + p->payload_length;
1550 for (i = 0; i < payload_z; i++) {
1551 page = payload_index >> PAGE_SHIFT;
1552 offset = payload_index & ~PAGE_MASK;
1553 next_page_index = (page + 1) << PAGE_SHIFT;
1554 length =
1555 min(next_page_index, payload_end_index) - payload_index;
1556 pd[i].req_count = cpu_to_le16(length);
1557
1558 page_bus = page_private(buffer->pages[page]);
1559 pd[i].data_address = cpu_to_le32(page_bus + offset);
1560
1561 payload_index += length;
1562 }
1563
1564 if (p->interrupt)
1565 irq = DESCRIPTOR_IRQ_ALWAYS;
1566 else
1567 irq = DESCRIPTOR_NO_IRQ;
1568
1569 last = z == 2 ? d : d + z - 1;
1570 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1571 DESCRIPTOR_STATUS |
1572 DESCRIPTOR_BRANCH_ALWAYS |
1573 irq);
1574
1575 context_append(&ctx->context, d, z, header_z);
1576
1577 return 0;
1578}
1579
1580static int
1581ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1582 struct fw_iso_packet *packet,
1583 struct fw_iso_buffer *buffer,
1584 unsigned long payload)
1585{
1586 struct iso_context *ctx = container_of(base, struct iso_context, base);
1587 struct db_descriptor *db = NULL;
1588 struct descriptor *d;
1589 struct fw_iso_packet *p;
1590 dma_addr_t d_bus, page_bus;
1591 u32 z, header_z, length, rest;
1592 int page, offset, packet_count, header_size;
1593
1594 /*
1595 * FIXME: Cycle lost behavior should be configurable: lose
1596 * packet, retransmit or terminate..
1597 */
1598
1599 if (packet->skip) {
1600 d = context_get_descriptors(&ctx->context, 2, &d_bus);
1601 if (d == NULL)
1602 return -ENOMEM;
1603
1604 db = (struct db_descriptor *) d;
1605 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1606 DESCRIPTOR_BRANCH_ALWAYS |
1607 DESCRIPTOR_WAIT);
1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1609 context_append(&ctx->context, d, 2, 0);
1610 }
1611
1612 p = packet;
1613 z = 2;
1614
1615 /*
1616 * The OHCI controller puts the status word in the header
1617 * buffer too, so we need 4 extra bytes per packet.
1618 */
1619 packet_count = p->header_length / ctx->base.header_size;
1620 header_size = packet_count * (ctx->base.header_size + 4);
1621
1622 /* Get header size in number of descriptors. */
1623 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1624 page = payload >> PAGE_SHIFT;
1625 offset = payload & ~PAGE_MASK;
1626 rest = p->payload_length;
1627
1628 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1629 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1630 while (rest > 0) {
1631 d = context_get_descriptors(&ctx->context,
1632 z + header_z, &d_bus);
1633 if (d == NULL)
1634 return -ENOMEM;
1635
1636 db = (struct db_descriptor *) d;
1637 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1638 DESCRIPTOR_BRANCH_ALWAYS);
1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1640 db->first_req_count = cpu_to_le16(header_size);
1641 db->first_res_count = db->first_req_count;
1642 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
1643
1644 if (offset + rest < PAGE_SIZE)
1645 length = rest;
1646 else
1647 length = PAGE_SIZE - offset;
1648
1649 db->second_req_count = cpu_to_le16(length);
1650 db->second_res_count = db->second_req_count;
1651 page_bus = page_private(buffer->pages[page]);
1652 db->second_buffer = cpu_to_le32(page_bus + offset);
1653
1654 if (p->interrupt && length == rest)
1655 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1656
1657 context_append(&ctx->context, d, z, header_z);
1658 offset = (offset + length) & ~PAGE_MASK;
1659 rest -= length;
1660 page++;
1661 }
1662
1663 return 0;
1664}
1665
1666static int
1667ohci_queue_iso(struct fw_iso_context *base,
1668 struct fw_iso_packet *packet,
1669 struct fw_iso_buffer *buffer,
1670 unsigned long payload)
1671{
1672 struct iso_context *ctx = container_of(base, struct iso_context, base);
1673
1674 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1675 return ohci_queue_iso_transmit(base, packet, buffer, payload);
1676 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1677 return ohci_queue_iso_receive_dualbuffer(base, packet,
1678 buffer, payload);
1679 else
1680 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1681 return -EINVAL;
1682}
1683
1684static const struct fw_card_driver ohci_driver = {
1685 .name = ohci_driver_name,
1686 .enable = ohci_enable,
1687 .update_phy_reg = ohci_update_phy_reg,
1688 .set_config_rom = ohci_set_config_rom,
1689 .send_request = ohci_send_request,
1690 .send_response = ohci_send_response,
1691 .cancel_packet = ohci_cancel_packet,
1692 .enable_phys_dma = ohci_enable_phys_dma,
1693 .get_bus_time = ohci_get_bus_time,
1694
1695 .allocate_iso_context = ohci_allocate_iso_context,
1696 .free_iso_context = ohci_free_iso_context,
1697 .queue_iso = ohci_queue_iso,
1698 .start_iso = ohci_start_iso,
1699 .stop_iso = ohci_stop_iso,
1700};
1701
1702static int software_reset(struct fw_ohci *ohci)
1703{
1704 int i;
1705
1706 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1707
1708 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1709 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1710 OHCI1394_HCControl_softReset) == 0)
1711 return 0;
1712 msleep(1);
1713 }
1714
1715 return -EBUSY;
1716}
1717
1718static int __devinit
1719pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1720{
1721 struct fw_ohci *ohci;
1722 u32 bus_options, max_receive, link_speed;
1723 u64 guid;
1724 int err;
1725 size_t size;
1726
1727 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
1728 if (ohci == NULL) {
1729 fw_error("Could not malloc fw_ohci data.\n");
1730 return -ENOMEM;
1731 }
1732
1733 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1734
1735 err = pci_enable_device(dev);
1736 if (err) {
1737 fw_error("Failed to enable OHCI hardware.\n");
1738 goto fail_put_card;
1739 }
1740
1741 pci_set_master(dev);
1742 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1743 pci_set_drvdata(dev, ohci);
1744
1745 spin_lock_init(&ohci->lock);
1746
1747 tasklet_init(&ohci->bus_reset_tasklet,
1748 bus_reset_tasklet, (unsigned long)ohci);
1749
1750 err = pci_request_region(dev, 0, ohci_driver_name);
1751 if (err) {
1752 fw_error("MMIO resource unavailable\n");
1753 goto fail_disable;
1754 }
1755
1756 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1757 if (ohci->registers == NULL) {
1758 fw_error("Failed to remap registers\n");
1759 err = -ENXIO;
1760 goto fail_iomem;
1761 }
1762
1763 if (software_reset(ohci)) {
1764 fw_error("Failed to reset ohci card.\n");
1765 err = -EBUSY;
1766 goto fail_registers;
1767 }
1768
1769 /*
1770 * Now enable LPS, which we need in order to start accessing
1771 * most of the registers. In fact, on some cards (ALI M5251),
1772 * accessing registers in the SClk domain without LPS enabled
1773 * will lock up the machine. Wait 50msec to make sure we have
1774 * full link enabled.
1775 */
1776 reg_write(ohci, OHCI1394_HCControlSet,
1777 OHCI1394_HCControl_LPS |
1778 OHCI1394_HCControl_postedWriteEnable);
1779 flush_writes(ohci);
1780 msleep(50);
1781
1782 reg_write(ohci, OHCI1394_HCControlClear,
1783 OHCI1394_HCControl_noByteSwapData);
1784
1785 reg_write(ohci, OHCI1394_LinkControlSet,
1786 OHCI1394_LinkControl_rcvSelfID |
1787 OHCI1394_LinkControl_cycleTimerEnable |
1788 OHCI1394_LinkControl_cycleMaster);
1789
1790 ar_context_init(&ohci->ar_request_ctx, ohci,
1791 OHCI1394_AsReqRcvContextControlSet);
1792
1793 ar_context_init(&ohci->ar_response_ctx, ohci,
1794 OHCI1394_AsRspRcvContextControlSet);
1795
1796 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1797 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
1798
1799 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1800 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
1801
1802 reg_write(ohci, OHCI1394_ATRetries,
1803 OHCI1394_MAX_AT_REQ_RETRIES |
1804 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1805 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1806
1807 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1808 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1809 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1810 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1811 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1812
1813 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1814 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1815 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1816 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1817 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1818
1819 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1820 fw_error("Out of memory for it/ir contexts.\n");
1821 err = -ENOMEM;
1822 goto fail_registers;
1823 }
1824
1825 /* self-id dma buffer allocation */
1826 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1827 SELF_ID_BUF_SIZE,
1828 &ohci->self_id_bus,
1829 GFP_KERNEL);
1830 if (ohci->self_id_cpu == NULL) {
1831 fw_error("Out of memory for self ID buffer.\n");
1832 err = -ENOMEM;
1833 goto fail_registers;
1834 }
1835
1836 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1837 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1838 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1839 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1840 reg_write(ohci, OHCI1394_IntMaskSet,
1841 OHCI1394_selfIDComplete |
1842 OHCI1394_RQPkt | OHCI1394_RSPkt |
1843 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1844 OHCI1394_isochRx | OHCI1394_isochTx |
1845 OHCI1394_masterIntEnable |
1846 OHCI1394_cycle64Seconds);
1847
1848 bus_options = reg_read(ohci, OHCI1394_BusOptions);
1849 max_receive = (bus_options >> 12) & 0xf;
1850 link_speed = bus_options & 0x7;
1851 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1852 reg_read(ohci, OHCI1394_GUIDLo);
1853
1854 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1855 if (err < 0)
1856 goto fail_self_id;
1857
1858 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1859 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1860 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
1861
1862 return 0;
1863
1864 fail_self_id:
1865 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1866 ohci->self_id_cpu, ohci->self_id_bus);
1867 fail_registers:
1868 kfree(ohci->it_context_list);
1869 kfree(ohci->ir_context_list);
1870 pci_iounmap(dev, ohci->registers);
1871 fail_iomem:
1872 pci_release_region(dev, 0);
1873 fail_disable:
1874 pci_disable_device(dev);
1875 fail_put_card:
1876 fw_card_put(&ohci->card);
1877
1878 return err;
1879}
1880
1881static void pci_remove(struct pci_dev *dev)
1882{
1883 struct fw_ohci *ohci;
1884
1885 ohci = pci_get_drvdata(dev);
1886 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1887 flush_writes(ohci);
1888 fw_core_remove_card(&ohci->card);
1889
1890 /*
1891 * FIXME: Fail all pending packets here, now that the upper
1892 * layers can't queue any more.
1893 */
1894
1895 software_reset(ohci);
1896 free_irq(dev->irq, ohci);
1897 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1898 ohci->self_id_cpu, ohci->self_id_bus);
1899 kfree(ohci->it_context_list);
1900 kfree(ohci->ir_context_list);
1901 pci_iounmap(dev, ohci->registers);
1902 pci_release_region(dev, 0);
1903 pci_disable_device(dev);
1904 fw_card_put(&ohci->card);
1905
1906 fw_notify("Removed fw-ohci device.\n");
1907}
1908
1909static struct pci_device_id pci_table[] = {
1910 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1911 { }
1912};
1913
1914MODULE_DEVICE_TABLE(pci, pci_table);
1915
1916static struct pci_driver fw_ohci_pci_driver = {
1917 .name = ohci_driver_name,
1918 .id_table = pci_table,
1919 .probe = pci_probe,
1920 .remove = pci_remove,
1921};
1922
1923MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1924MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1925MODULE_LICENSE("GPL");
1926
1927/* Provide a module alias so root-on-sbp2 initrds don't break. */
1928#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
1929MODULE_ALIAS("ohci1394");
1930#endif
1931
1932static int __init fw_ohci_init(void)
1933{
1934 return pci_register_driver(&fw_ohci_pci_driver);
1935}
1936
1937static void __exit fw_ohci_cleanup(void)
1938{
1939 pci_unregister_driver(&fw_ohci_pci_driver);
1940}
1941
1942module_init(fw_ohci_init);
1943module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
new file mode 100644
index 000000000000..fa15706397d7
--- /dev/null
+++ b/drivers/firewire/fw-ohci.h
@@ -0,0 +1,153 @@
1#ifndef __fw_ohci_h
2#define __fw_ohci_h
3
4/* OHCI register map */
5
6#define OHCI1394_Version 0x000
7#define OHCI1394_GUID_ROM 0x004
8#define OHCI1394_ATRetries 0x008
9#define OHCI1394_CSRData 0x00C
10#define OHCI1394_CSRCompareData 0x010
11#define OHCI1394_CSRControl 0x014
12#define OHCI1394_ConfigROMhdr 0x018
13#define OHCI1394_BusID 0x01C
14#define OHCI1394_BusOptions 0x020
15#define OHCI1394_GUIDHi 0x024
16#define OHCI1394_GUIDLo 0x028
17#define OHCI1394_ConfigROMmap 0x034
18#define OHCI1394_PostedWriteAddressLo 0x038
19#define OHCI1394_PostedWriteAddressHi 0x03C
20#define OHCI1394_VendorID 0x040
21#define OHCI1394_HCControlSet 0x050
22#define OHCI1394_HCControlClear 0x054
23#define OHCI1394_HCControl_BIBimageValid 0x80000000
24#define OHCI1394_HCControl_noByteSwapData 0x40000000
25#define OHCI1394_HCControl_programPhyEnable 0x00800000
26#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
27#define OHCI1394_HCControl_LPS 0x00080000
28#define OHCI1394_HCControl_postedWriteEnable 0x00040000
29#define OHCI1394_HCControl_linkEnable 0x00020000
30#define OHCI1394_HCControl_softReset 0x00010000
31#define OHCI1394_SelfIDBuffer 0x064
32#define OHCI1394_SelfIDCount 0x068
33#define OHCI1394_IRMultiChanMaskHiSet 0x070
34#define OHCI1394_IRMultiChanMaskHiClear 0x074
35#define OHCI1394_IRMultiChanMaskLoSet 0x078
36#define OHCI1394_IRMultiChanMaskLoClear 0x07C
37#define OHCI1394_IntEventSet 0x080
38#define OHCI1394_IntEventClear 0x084
39#define OHCI1394_IntMaskSet 0x088
40#define OHCI1394_IntMaskClear 0x08C
41#define OHCI1394_IsoXmitIntEventSet 0x090
42#define OHCI1394_IsoXmitIntEventClear 0x094
43#define OHCI1394_IsoXmitIntMaskSet 0x098
44#define OHCI1394_IsoXmitIntMaskClear 0x09C
45#define OHCI1394_IsoRecvIntEventSet 0x0A0
46#define OHCI1394_IsoRecvIntEventClear 0x0A4
47#define OHCI1394_IsoRecvIntMaskSet 0x0A8
48#define OHCI1394_IsoRecvIntMaskClear 0x0AC
49#define OHCI1394_InitialBandwidthAvailable 0x0B0
50#define OHCI1394_InitialChannelsAvailableHi 0x0B4
51#define OHCI1394_InitialChannelsAvailableLo 0x0B8
52#define OHCI1394_FairnessControl 0x0DC
53#define OHCI1394_LinkControlSet 0x0E0
54#define OHCI1394_LinkControlClear 0x0E4
55#define OHCI1394_LinkControl_rcvSelfID (1 << 9)
56#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10)
57#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20)
58#define OHCI1394_LinkControl_cycleMaster (1 << 21)
59#define OHCI1394_LinkControl_cycleSource (1 << 22)
60#define OHCI1394_NodeID 0x0E8
61#define OHCI1394_NodeID_idValid 0x80000000
62#define OHCI1394_PhyControl 0x0EC
63#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
64#define OHCI1394_PhyControl_ReadDone 0x80000000
65#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
66#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
67#define OHCI1394_PhyControl_WriteDone 0x00004000
68#define OHCI1394_IsochronousCycleTimer 0x0F0
69#define OHCI1394_AsReqFilterHiSet 0x100
70#define OHCI1394_AsReqFilterHiClear 0x104
71#define OHCI1394_AsReqFilterLoSet 0x108
72#define OHCI1394_AsReqFilterLoClear 0x10C
73#define OHCI1394_PhyReqFilterHiSet 0x110
74#define OHCI1394_PhyReqFilterHiClear 0x114
75#define OHCI1394_PhyReqFilterLoSet 0x118
76#define OHCI1394_PhyReqFilterLoClear 0x11C
77#define OHCI1394_PhyUpperBound 0x120
78
79#define OHCI1394_AsReqTrContextBase 0x180
80#define OHCI1394_AsReqTrContextControlSet 0x180
81#define OHCI1394_AsReqTrContextControlClear 0x184
82#define OHCI1394_AsReqTrCommandPtr 0x18C
83
84#define OHCI1394_AsRspTrContextBase 0x1A0
85#define OHCI1394_AsRspTrContextControlSet 0x1A0
86#define OHCI1394_AsRspTrContextControlClear 0x1A4
87#define OHCI1394_AsRspTrCommandPtr 0x1AC
88
89#define OHCI1394_AsReqRcvContextBase 0x1C0
90#define OHCI1394_AsReqRcvContextControlSet 0x1C0
91#define OHCI1394_AsReqRcvContextControlClear 0x1C4
92#define OHCI1394_AsReqRcvCommandPtr 0x1CC
93
94#define OHCI1394_AsRspRcvContextBase 0x1E0
95#define OHCI1394_AsRspRcvContextControlSet 0x1E0
96#define OHCI1394_AsRspRcvContextControlClear 0x1E4
97#define OHCI1394_AsRspRcvCommandPtr 0x1EC
98
99/* Isochronous transmit registers */
100#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n))
101#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n))
102#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n))
103#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n))
104
105/* Isochronous receive registers */
106#define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n))
107#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n))
108#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
109#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n))
110#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n))
111
112/* Interrupts Mask/Events */
113#define OHCI1394_reqTxComplete 0x00000001
114#define OHCI1394_respTxComplete 0x00000002
115#define OHCI1394_ARRQ 0x00000004
116#define OHCI1394_ARRS 0x00000008
117#define OHCI1394_RQPkt 0x00000010
118#define OHCI1394_RSPkt 0x00000020
119#define OHCI1394_isochTx 0x00000040
120#define OHCI1394_isochRx 0x00000080
121#define OHCI1394_postedWriteErr 0x00000100
122#define OHCI1394_lockRespErr 0x00000200
123#define OHCI1394_selfIDComplete 0x00010000
124#define OHCI1394_busReset 0x00020000
125#define OHCI1394_phy 0x00080000
126#define OHCI1394_cycleSynch 0x00100000
127#define OHCI1394_cycle64Seconds 0x00200000
128#define OHCI1394_cycleLost 0x00400000
129#define OHCI1394_cycleInconsistent 0x00800000
130#define OHCI1394_unrecoverableError 0x01000000
131#define OHCI1394_cycleTooLong 0x02000000
132#define OHCI1394_phyRegRcvd 0x04000000
133#define OHCI1394_masterIntEnable 0x80000000
134
135#define OHCI1394_evt_no_status 0x0
136#define OHCI1394_evt_long_packet 0x2
137#define OHCI1394_evt_missing_ack 0x3
138#define OHCI1394_evt_underrun 0x4
139#define OHCI1394_evt_overrun 0x5
140#define OHCI1394_evt_descriptor_read 0x6
141#define OHCI1394_evt_data_read 0x7
142#define OHCI1394_evt_data_write 0x8
143#define OHCI1394_evt_bus_reset 0x9
144#define OHCI1394_evt_timeout 0xa
145#define OHCI1394_evt_tcode_err 0xb
146#define OHCI1394_evt_reserved_b 0xc
147#define OHCI1394_evt_reserved_c 0xd
148#define OHCI1394_evt_unknown 0xe
149#define OHCI1394_evt_flushed 0xf
150
151#define OHCI1394_phy_tcode 0xe
152
153#endif /* __fw_ohci_h */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
new file mode 100644
index 000000000000..68300414e5f4
--- /dev/null
+++ b/drivers/firewire/fw-sbp2.c
@@ -0,0 +1,1147 @@
1/*
2 * SBP2 driver (SCSI over IEEE1394)
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 * The basic structure of this driver is based on the old storage driver,
23 * drivers/ieee1394/sbp2.c, originally written by
24 * James Goodwin <jamesg@filanet.com>
25 * with later contributions and ongoing maintenance from
26 * Ben Collins <bcollins@debian.org>,
27 * Stefan Richter <stefanr@s5r6.in-berlin.de>
28 * and many others.
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/mod_devicetable.h>
34#include <linux/device.h>
35#include <linux/scatterlist.h>
36#include <linux/dma-mapping.h>
37#include <linux/timer.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_dbg.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_host.h>
44
45#include "fw-transaction.h"
46#include "fw-topology.h"
47#include "fw-device.h"
48
49/* I don't know why the SCSI stack doesn't define something like this... */
50typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
51
52static const char sbp2_driver_name[] = "sbp2";
53
54struct sbp2_device {
55 struct kref kref;
56 struct fw_unit *unit;
57 struct fw_address_handler address_handler;
58 struct list_head orb_list;
59 u64 management_agent_address;
60 u64 command_block_agent_address;
61 u32 workarounds;
62 int login_id;
63
64 /*
65 * We cache these addresses and only update them once we've
66 * logged in or reconnected to the sbp2 device. That way, any
67 * IO to the device will automatically fail and get retried if
68 * it happens in a window where the device is not ready to
69 * handle it (e.g. after a bus reset but before we reconnect).
70 */
71 int node_id;
72 int address_high;
73 int generation;
74
75 int retries;
76 struct delayed_work work;
77};
78
79#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
80#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
81#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
82
83#define SBP2_ORB_NULL 0x80000000
84
85#define SBP2_DIRECTION_TO_MEDIA 0x0
86#define SBP2_DIRECTION_FROM_MEDIA 0x1
87
88/* Unit directory keys */
89#define SBP2_COMMAND_SET_SPECIFIER 0x38
90#define SBP2_COMMAND_SET 0x39
91#define SBP2_COMMAND_SET_REVISION 0x3b
92#define SBP2_FIRMWARE_REVISION 0x3c
93
94/* Flags for detected oddities and brokeness */
95#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
96#define SBP2_WORKAROUND_INQUIRY_36 0x2
97#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
98#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
99#define SBP2_WORKAROUND_OVERRIDE 0x100
100
101/* Management orb opcodes */
102#define SBP2_LOGIN_REQUEST 0x0
103#define SBP2_QUERY_LOGINS_REQUEST 0x1
104#define SBP2_RECONNECT_REQUEST 0x3
105#define SBP2_SET_PASSWORD_REQUEST 0x4
106#define SBP2_LOGOUT_REQUEST 0x7
107#define SBP2_ABORT_TASK_REQUEST 0xb
108#define SBP2_ABORT_TASK_SET 0xc
109#define SBP2_LOGICAL_UNIT_RESET 0xe
110#define SBP2_TARGET_RESET_REQUEST 0xf
111
112/* Offsets for command block agent registers */
113#define SBP2_AGENT_STATE 0x00
114#define SBP2_AGENT_RESET 0x04
115#define SBP2_ORB_POINTER 0x08
116#define SBP2_DOORBELL 0x10
117#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
118
119/* Status write response codes */
120#define SBP2_STATUS_REQUEST_COMPLETE 0x0
121#define SBP2_STATUS_TRANSPORT_FAILURE 0x1
122#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
123#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
124
125#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
126#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
127#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
128#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
129#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
130#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
131#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
132#define STATUS_GET_DATA(v) ((v).data)
133
134struct sbp2_status {
135 u32 status;
136 u32 orb_low;
137 u8 data[24];
138};
139
140struct sbp2_pointer {
141 u32 high;
142 u32 low;
143};
144
145struct sbp2_orb {
146 struct fw_transaction t;
147 dma_addr_t request_bus;
148 int rcode;
149 struct sbp2_pointer pointer;
150 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
151 struct list_head link;
152};
153
154#define MANAGEMENT_ORB_LUN(v) ((v))
155#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
156#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
157#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
158#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
159#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
160
161#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
162#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
163
164struct sbp2_management_orb {
165 struct sbp2_orb base;
166 struct {
167 struct sbp2_pointer password;
168 struct sbp2_pointer response;
169 u32 misc;
170 u32 length;
171 struct sbp2_pointer status_fifo;
172 } request;
173 __be32 response[4];
174 dma_addr_t response_bus;
175 struct completion done;
176 struct sbp2_status status;
177};
178
179#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
180#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
181
182struct sbp2_login_response {
183 u32 misc;
184 struct sbp2_pointer command_block_agent;
185 u32 reconnect_hold;
186};
187#define COMMAND_ORB_DATA_SIZE(v) ((v))
188#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
189#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
190#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
191#define COMMAND_ORB_SPEED(v) ((v) << 24)
192#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
193#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
194#define COMMAND_ORB_NOTIFY ((1) << 31)
195
196struct sbp2_command_orb {
197 struct sbp2_orb base;
198 struct {
199 struct sbp2_pointer next;
200 struct sbp2_pointer data_descriptor;
201 u32 misc;
202 u8 command_block[12];
203 } request;
204 struct scsi_cmnd *cmd;
205 scsi_done_fn_t done;
206 struct fw_unit *unit;
207
208 struct sbp2_pointer page_table[SG_ALL];
209 dma_addr_t page_table_bus;
210 dma_addr_t request_buffer_bus;
211};
212
213/*
214 * List of devices with known bugs.
215 *
216 * The firmware_revision field, masked with 0xffff00, is the best
217 * indicator for the type of bridge chip of a device. It yields a few
218 * false positives but this did not break correctly behaving devices
219 * so far. We use ~0 as a wildcard, since the 24 bit values we get
220 * from the config rom can never match that.
221 */
222static const struct {
223 u32 firmware_revision;
224 u32 model;
225 unsigned workarounds;
226} sbp2_workarounds_table[] = {
227 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
228 .firmware_revision = 0x002800,
229 .model = 0x001010,
230 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
231 SBP2_WORKAROUND_MODE_SENSE_8,
232 },
233 /* Initio bridges, actually only needed for some older ones */ {
234 .firmware_revision = 0x000200,
235 .model = ~0,
236 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
237 },
238 /* Symbios bridge */ {
239 .firmware_revision = 0xa0b800,
240 .model = ~0,
241 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
242 },
243
244 /*
245 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
246 * these iPods do not feature the read_capacity bug according
247 * to one report. Read_capacity behaviour as well as model_id
248 * could change due to Apple-supplied firmware updates though.
249 */
250
251 /* iPod 4th generation. */ {
252 .firmware_revision = 0x0a2700,
253 .model = 0x000021,
254 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
255 },
256 /* iPod mini */ {
257 .firmware_revision = 0x0a2700,
258 .model = 0x000023,
259 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
260 },
261 /* iPod Photo */ {
262 .firmware_revision = 0x0a2700,
263 .model = 0x00007e,
264 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
265 }
266};
267
268static void
269sbp2_status_write(struct fw_card *card, struct fw_request *request,
270 int tcode, int destination, int source,
271 int generation, int speed,
272 unsigned long long offset,
273 void *payload, size_t length, void *callback_data)
274{
275 struct sbp2_device *sd = callback_data;
276 struct sbp2_orb *orb;
277 struct sbp2_status status;
278 size_t header_size;
279 unsigned long flags;
280
281 if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
282 length == 0 || length > sizeof(status)) {
283 fw_send_response(card, request, RCODE_TYPE_ERROR);
284 return;
285 }
286
287 header_size = min(length, 2 * sizeof(u32));
288 fw_memcpy_from_be32(&status, payload, header_size);
289 if (length > header_size)
290 memcpy(status.data, payload + 8, length - header_size);
291 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
292 fw_notify("non-orb related status write, not handled\n");
293 fw_send_response(card, request, RCODE_COMPLETE);
294 return;
295 }
296
297 /* Lookup the orb corresponding to this status write. */
298 spin_lock_irqsave(&card->lock, flags);
299 list_for_each_entry(orb, &sd->orb_list, link) {
300 if (STATUS_GET_ORB_HIGH(status) == 0 &&
301 STATUS_GET_ORB_LOW(status) == orb->request_bus &&
302 orb->rcode == RCODE_COMPLETE) {
303 list_del(&orb->link);
304 break;
305 }
306 }
307 spin_unlock_irqrestore(&card->lock, flags);
308
309 if (&orb->link != &sd->orb_list)
310 orb->callback(orb, &status);
311 else
312 fw_error("status write for unknown orb\n");
313
314 fw_send_response(card, request, RCODE_COMPLETE);
315}
316
317static void
318complete_transaction(struct fw_card *card, int rcode,
319 void *payload, size_t length, void *data)
320{
321 struct sbp2_orb *orb = data;
322 unsigned long flags;
323
324 orb->rcode = rcode;
325 if (rcode != RCODE_COMPLETE) {
326 spin_lock_irqsave(&card->lock, flags);
327 list_del(&orb->link);
328 spin_unlock_irqrestore(&card->lock, flags);
329 orb->callback(orb, NULL);
330 }
331}
332
333static void
334sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
335 int node_id, int generation, u64 offset)
336{
337 struct fw_device *device = fw_device(unit->device.parent);
338 struct sbp2_device *sd = unit->device.driver_data;
339 unsigned long flags;
340
341 orb->pointer.high = 0;
342 orb->pointer.low = orb->request_bus;
343 fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
344
345 spin_lock_irqsave(&device->card->lock, flags);
346 list_add_tail(&orb->link, &sd->orb_list);
347 spin_unlock_irqrestore(&device->card->lock, flags);
348
349 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
350 node_id, generation,
351 device->node->max_speed, offset,
352 &orb->pointer, sizeof(orb->pointer),
353 complete_transaction, orb);
354}
355
356static int sbp2_cancel_orbs(struct fw_unit *unit)
357{
358 struct fw_device *device = fw_device(unit->device.parent);
359 struct sbp2_device *sd = unit->device.driver_data;
360 struct sbp2_orb *orb, *next;
361 struct list_head list;
362 unsigned long flags;
363 int retval = -ENOENT;
364
365 INIT_LIST_HEAD(&list);
366 spin_lock_irqsave(&device->card->lock, flags);
367 list_splice_init(&sd->orb_list, &list);
368 spin_unlock_irqrestore(&device->card->lock, flags);
369
370 list_for_each_entry_safe(orb, next, &list, link) {
371 retval = 0;
372 if (fw_cancel_transaction(device->card, &orb->t) == 0)
373 continue;
374
375 orb->rcode = RCODE_CANCELLED;
376 orb->callback(orb, NULL);
377 }
378
379 return retval;
380}
381
382static void
383complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
384{
385 struct sbp2_management_orb *orb =
386 (struct sbp2_management_orb *)base_orb;
387
388 if (status)
389 memcpy(&orb->status, status, sizeof(*status));
390 complete(&orb->done);
391}
392
393static int
394sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
395 int function, int lun, void *response)
396{
397 struct fw_device *device = fw_device(unit->device.parent);
398 struct sbp2_device *sd = unit->device.driver_data;
399 struct sbp2_management_orb *orb;
400 int retval = -ENOMEM;
401
402 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
403 if (orb == NULL)
404 return -ENOMEM;
405
406 /*
407 * The sbp2 device is going to send a block read request to
408 * read out the request from host memory, so map it for dma.
409 */
410 orb->base.request_bus =
411 dma_map_single(device->card->device, &orb->request,
412 sizeof(orb->request), DMA_TO_DEVICE);
413 if (dma_mapping_error(orb->base.request_bus))
414 goto out;
415
416 orb->response_bus =
417 dma_map_single(device->card->device, &orb->response,
418 sizeof(orb->response), DMA_FROM_DEVICE);
419 if (dma_mapping_error(orb->response_bus))
420 goto out;
421
422 orb->request.response.high = 0;
423 orb->request.response.low = orb->response_bus;
424
425 orb->request.misc =
426 MANAGEMENT_ORB_NOTIFY |
427 MANAGEMENT_ORB_FUNCTION(function) |
428 MANAGEMENT_ORB_LUN(lun);
429 orb->request.length =
430 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
431
432 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
433 orb->request.status_fifo.low = sd->address_handler.offset;
434
435 /*
436 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
437 * login and 1 second reconnect time. The reconnect setting
438 * is probably fine, but the exclusive login should be an option.
439 */
440 if (function == SBP2_LOGIN_REQUEST) {
441 orb->request.misc |=
442 MANAGEMENT_ORB_EXCLUSIVE |
443 MANAGEMENT_ORB_RECONNECT(0);
444 }
445
446 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
447
448 init_completion(&orb->done);
449 orb->base.callback = complete_management_orb;
450
451 sbp2_send_orb(&orb->base, unit,
452 node_id, generation, sd->management_agent_address);
453
454 wait_for_completion_timeout(&orb->done,
455 msecs_to_jiffies(SBP2_ORB_TIMEOUT));
456
457 retval = -EIO;
458 if (sbp2_cancel_orbs(unit) == 0) {
459 fw_error("orb reply timed out, rcode=0x%02x\n",
460 orb->base.rcode);
461 goto out;
462 }
463
464 if (orb->base.rcode != RCODE_COMPLETE) {
465 fw_error("management write failed, rcode 0x%02x\n",
466 orb->base.rcode);
467 goto out;
468 }
469
470 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
471 STATUS_GET_SBP_STATUS(orb->status) != 0) {
472 fw_error("error status: %d:%d\n",
473 STATUS_GET_RESPONSE(orb->status),
474 STATUS_GET_SBP_STATUS(orb->status));
475 goto out;
476 }
477
478 retval = 0;
479 out:
480 dma_unmap_single(device->card->device, orb->base.request_bus,
481 sizeof(orb->request), DMA_TO_DEVICE);
482 dma_unmap_single(device->card->device, orb->response_bus,
483 sizeof(orb->response), DMA_FROM_DEVICE);
484
485 if (response)
486 fw_memcpy_from_be32(response,
487 orb->response, sizeof(orb->response));
488 kfree(orb);
489
490 return retval;
491}
492
493static void
494complete_agent_reset_write(struct fw_card *card, int rcode,
495 void *payload, size_t length, void *data)
496{
497 struct fw_transaction *t = data;
498
499 kfree(t);
500}
501
502static int sbp2_agent_reset(struct fw_unit *unit)
503{
504 struct fw_device *device = fw_device(unit->device.parent);
505 struct sbp2_device *sd = unit->device.driver_data;
506 struct fw_transaction *t;
507 static u32 zero;
508
509 t = kzalloc(sizeof(*t), GFP_ATOMIC);
510 if (t == NULL)
511 return -ENOMEM;
512
513 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
514 sd->node_id, sd->generation, SCODE_400,
515 sd->command_block_agent_address + SBP2_AGENT_RESET,
516 &zero, sizeof(zero), complete_agent_reset_write, t);
517
518 return 0;
519}
520
521static void sbp2_reconnect(struct work_struct *work);
522static struct scsi_host_template scsi_driver_template;
523
524static void
525release_sbp2_device(struct kref *kref)
526{
527 struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
528 struct Scsi_Host *host =
529 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
530
531 sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
532 SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
533
534 scsi_remove_host(host);
535 fw_core_remove_address_handler(&sd->address_handler);
536 fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
537 put_device(&sd->unit->device);
538 scsi_host_put(host);
539}
540
541static void sbp2_login(struct work_struct *work)
542{
543 struct sbp2_device *sd =
544 container_of(work, struct sbp2_device, work.work);
545 struct Scsi_Host *host =
546 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
547 struct fw_unit *unit = sd->unit;
548 struct fw_device *device = fw_device(unit->device.parent);
549 struct sbp2_login_response response;
550 int generation, node_id, local_node_id, lun, retval;
551
552 /* FIXME: Make this work for multi-lun devices. */
553 lun = 0;
554
555 generation = device->card->generation;
556 node_id = device->node->node_id;
557 local_node_id = device->card->local_node->node_id;
558
559 if (sbp2_send_management_orb(unit, node_id, generation,
560 SBP2_LOGIN_REQUEST, lun, &response) < 0) {
561 if (sd->retries++ < 5) {
562 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
563 } else {
564 fw_error("failed to login to %s\n",
565 unit->device.bus_id);
566 kref_put(&sd->kref, release_sbp2_device);
567 }
568 return;
569 }
570
571 sd->generation = generation;
572 sd->node_id = node_id;
573 sd->address_high = local_node_id << 16;
574
575 /* Get command block agent offset and login id. */
576 sd->command_block_agent_address =
577 ((u64) (response.command_block_agent.high & 0xffff) << 32) |
578 response.command_block_agent.low;
579 sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
580
581 fw_notify("logged in to sbp2 unit %s (%d retries)\n",
582 unit->device.bus_id, sd->retries);
583 fw_notify(" - management_agent_address: 0x%012llx\n",
584 (unsigned long long) sd->management_agent_address);
585 fw_notify(" - command_block_agent_address: 0x%012llx\n",
586 (unsigned long long) sd->command_block_agent_address);
587 fw_notify(" - status write address: 0x%012llx\n",
588 (unsigned long long) sd->address_handler.offset);
589
590#if 0
591 /* FIXME: The linux1394 sbp2 does this last step. */
592 sbp2_set_busy_timeout(scsi_id);
593#endif
594
595 PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect);
596 sbp2_agent_reset(unit);
597
598 /* FIXME: Loop over luns here. */
599 lun = 0;
600 retval = scsi_add_device(host, 0, 0, lun);
601 if (retval < 0) {
602 sbp2_send_management_orb(unit, sd->node_id, sd->generation,
603 SBP2_LOGOUT_REQUEST, sd->login_id,
604 NULL);
605 /*
606 * Set this back to sbp2_login so we fall back and
607 * retry login on bus reset.
608 */
609 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
610 }
611 kref_put(&sd->kref, release_sbp2_device);
612}
613
614static int sbp2_probe(struct device *dev)
615{
616 struct fw_unit *unit = fw_unit(dev);
617 struct fw_device *device = fw_device(unit->device.parent);
618 struct sbp2_device *sd;
619 struct fw_csr_iterator ci;
620 struct Scsi_Host *host;
621 int i, key, value, err;
622 u32 model, firmware_revision;
623
624 err = -ENOMEM;
625 host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd));
626 if (host == NULL)
627 goto fail;
628
629 sd = (struct sbp2_device *) host->hostdata;
630 unit->device.driver_data = sd;
631 sd->unit = unit;
632 INIT_LIST_HEAD(&sd->orb_list);
633 kref_init(&sd->kref);
634
635 sd->address_handler.length = 0x100;
636 sd->address_handler.address_callback = sbp2_status_write;
637 sd->address_handler.callback_data = sd;
638
639 err = fw_core_add_address_handler(&sd->address_handler,
640 &fw_high_memory_region);
641 if (err < 0)
642 goto fail_host;
643
644 err = fw_device_enable_phys_dma(device);
645 if (err < 0)
646 goto fail_address_handler;
647
648 err = scsi_add_host(host, &unit->device);
649 if (err < 0)
650 goto fail_address_handler;
651
652 /*
653 * Scan unit directory to get management agent address,
654 * firmware revison and model. Initialize firmware_revision
655 * and model to values that wont match anything in our table.
656 */
657 firmware_revision = 0xff000000;
658 model = 0xff000000;
659 fw_csr_iterator_init(&ci, unit->directory);
660 while (fw_csr_iterator_next(&ci, &key, &value)) {
661 switch (key) {
662 case CSR_DEPENDENT_INFO | CSR_OFFSET:
663 sd->management_agent_address =
664 0xfffff0000000ULL + 4 * value;
665 break;
666 case SBP2_FIRMWARE_REVISION:
667 firmware_revision = value;
668 break;
669 case CSR_MODEL:
670 model = value;
671 break;
672 }
673 }
674
675 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
676 if (sbp2_workarounds_table[i].firmware_revision !=
677 (firmware_revision & 0xffffff00))
678 continue;
679 if (sbp2_workarounds_table[i].model != model &&
680 sbp2_workarounds_table[i].model != ~0)
681 continue;
682 sd->workarounds |= sbp2_workarounds_table[i].workarounds;
683 break;
684 }
685
686 if (sd->workarounds)
687 fw_notify("Workarounds for node %s: 0x%x "
688 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
689 unit->device.bus_id,
690 sd->workarounds, firmware_revision, model);
691
692 get_device(&unit->device);
693
694 /*
695 * We schedule work to do the login so we can easily
696 * reschedule retries. Always get the ref before scheduling
697 * work.
698 */
699 INIT_DELAYED_WORK(&sd->work, sbp2_login);
700 if (schedule_delayed_work(&sd->work, 0))
701 kref_get(&sd->kref);
702
703 return 0;
704
705 fail_address_handler:
706 fw_core_remove_address_handler(&sd->address_handler);
707 fail_host:
708 scsi_host_put(host);
709 fail:
710 return err;
711}
712
713static int sbp2_remove(struct device *dev)
714{
715 struct fw_unit *unit = fw_unit(dev);
716 struct sbp2_device *sd = unit->device.driver_data;
717
718 kref_put(&sd->kref, release_sbp2_device);
719
720 return 0;
721}
722
723static void sbp2_reconnect(struct work_struct *work)
724{
725 struct sbp2_device *sd =
726 container_of(work, struct sbp2_device, work.work);
727 struct fw_unit *unit = sd->unit;
728 struct fw_device *device = fw_device(unit->device.parent);
729 int generation, node_id, local_node_id;
730
731 generation = device->card->generation;
732 node_id = device->node->node_id;
733 local_node_id = device->card->local_node->node_id;
734
735 if (sbp2_send_management_orb(unit, node_id, generation,
736 SBP2_RECONNECT_REQUEST,
737 sd->login_id, NULL) < 0) {
738 if (sd->retries++ >= 5) {
739 fw_error("failed to reconnect to %s\n",
740 unit->device.bus_id);
741 /* Fall back and try to log in again. */
742 sd->retries = 0;
743 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
744 }
745 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
746 return;
747 }
748
749 sd->generation = generation;
750 sd->node_id = node_id;
751 sd->address_high = local_node_id << 16;
752
753 fw_notify("reconnected to unit %s (%d retries)\n",
754 unit->device.bus_id, sd->retries);
755 sbp2_agent_reset(unit);
756 sbp2_cancel_orbs(unit);
757 kref_put(&sd->kref, release_sbp2_device);
758}
759
760static void sbp2_update(struct fw_unit *unit)
761{
762 struct fw_device *device = fw_device(unit->device.parent);
763 struct sbp2_device *sd = unit->device.driver_data;
764
765 sd->retries = 0;
766 fw_device_enable_phys_dma(device);
767 if (schedule_delayed_work(&sd->work, 0))
768 kref_get(&sd->kref);
769}
770
771#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
772#define SBP2_SW_VERSION_ENTRY 0x00010483
773
774static const struct fw_device_id sbp2_id_table[] = {
775 {
776 .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
777 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
778 .version = SBP2_SW_VERSION_ENTRY,
779 },
780 { }
781};
782
783static struct fw_driver sbp2_driver = {
784 .driver = {
785 .owner = THIS_MODULE,
786 .name = sbp2_driver_name,
787 .bus = &fw_bus_type,
788 .probe = sbp2_probe,
789 .remove = sbp2_remove,
790 },
791 .update = sbp2_update,
792 .id_table = sbp2_id_table,
793};
794
795static unsigned int
796sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
797{
798 int sam_status;
799
800 sense_data[0] = 0x70;
801 sense_data[1] = 0x0;
802 sense_data[2] = sbp2_status[1];
803 sense_data[3] = sbp2_status[4];
804 sense_data[4] = sbp2_status[5];
805 sense_data[5] = sbp2_status[6];
806 sense_data[6] = sbp2_status[7];
807 sense_data[7] = 10;
808 sense_data[8] = sbp2_status[8];
809 sense_data[9] = sbp2_status[9];
810 sense_data[10] = sbp2_status[10];
811 sense_data[11] = sbp2_status[11];
812 sense_data[12] = sbp2_status[2];
813 sense_data[13] = sbp2_status[3];
814 sense_data[14] = sbp2_status[12];
815 sense_data[15] = sbp2_status[13];
816
817 sam_status = sbp2_status[0] & 0x3f;
818
819 switch (sam_status) {
820 case SAM_STAT_GOOD:
821 case SAM_STAT_CHECK_CONDITION:
822 case SAM_STAT_CONDITION_MET:
823 case SAM_STAT_BUSY:
824 case SAM_STAT_RESERVATION_CONFLICT:
825 case SAM_STAT_COMMAND_TERMINATED:
826 return DID_OK << 16 | sam_status;
827
828 default:
829 return DID_ERROR << 16;
830 }
831}
832
833static void
834complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
835{
836 struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
837 struct fw_unit *unit = orb->unit;
838 struct fw_device *device = fw_device(unit->device.parent);
839 struct scatterlist *sg;
840 int result;
841
842 if (status != NULL) {
843 if (STATUS_GET_DEAD(*status))
844 sbp2_agent_reset(unit);
845
846 switch (STATUS_GET_RESPONSE(*status)) {
847 case SBP2_STATUS_REQUEST_COMPLETE:
848 result = DID_OK << 16;
849 break;
850 case SBP2_STATUS_TRANSPORT_FAILURE:
851 result = DID_BUS_BUSY << 16;
852 break;
853 case SBP2_STATUS_ILLEGAL_REQUEST:
854 case SBP2_STATUS_VENDOR_DEPENDENT:
855 default:
856 result = DID_ERROR << 16;
857 break;
858 }
859
860 if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
861 result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
862 orb->cmd->sense_buffer);
863 } else {
864 /*
865 * If the orb completes with status == NULL, something
866 * went wrong, typically a bus reset happened mid-orb
867 * or when sending the write (less likely).
868 */
869 result = DID_BUS_BUSY << 16;
870 }
871
872 dma_unmap_single(device->card->device, orb->base.request_bus,
873 sizeof(orb->request), DMA_TO_DEVICE);
874
875 if (orb->cmd->use_sg > 0) {
876 sg = (struct scatterlist *)orb->cmd->request_buffer;
877 dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
878 orb->cmd->sc_data_direction);
879 }
880
881 if (orb->page_table_bus != 0)
882 dma_unmap_single(device->card->device, orb->page_table_bus,
883 sizeof(orb->page_table_bus), DMA_TO_DEVICE);
884
885 if (orb->request_buffer_bus != 0)
886 dma_unmap_single(device->card->device, orb->request_buffer_bus,
887 sizeof(orb->request_buffer_bus),
888 DMA_FROM_DEVICE);
889
890 orb->cmd->result = result;
891 orb->done(orb->cmd);
892 kfree(orb);
893}
894
895static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
896{
897 struct sbp2_device *sd =
898 (struct sbp2_device *)orb->cmd->device->host->hostdata;
899 struct fw_unit *unit = sd->unit;
900 struct fw_device *device = fw_device(unit->device.parent);
901 struct scatterlist *sg;
902 int sg_len, l, i, j, count;
903 size_t size;
904 dma_addr_t sg_addr;
905
906 sg = (struct scatterlist *)orb->cmd->request_buffer;
907 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
908 orb->cmd->sc_data_direction);
909 if (count == 0)
910 goto fail;
911
912 /*
913 * Handle the special case where there is only one element in
914 * the scatter list by converting it to an immediate block
915 * request. This is also a workaround for broken devices such
916 * as the second generation iPod which doesn't support page
917 * tables.
918 */
919 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
920 orb->request.data_descriptor.high = sd->address_high;
921 orb->request.data_descriptor.low = sg_dma_address(sg);
922 orb->request.misc |=
923 COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
924 return 0;
925 }
926
927 /*
928 * Convert the scatterlist to an sbp2 page table. If any
929 * scatterlist entries are too big for sbp2, we split them as we
930 * go. Even if we ask the block I/O layer to not give us sg
931 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
932 * during DMA mapping, and Linux currently doesn't prevent this.
933 */
934 for (i = 0, j = 0; i < count; i++) {
935 sg_len = sg_dma_len(sg + i);
936 sg_addr = sg_dma_address(sg + i);
937 while (sg_len) {
938 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
939 orb->page_table[j].low = sg_addr;
940 orb->page_table[j].high = (l << 16);
941 sg_addr += l;
942 sg_len -= l;
943 j++;
944 }
945 }
946
947 size = sizeof(orb->page_table[0]) * j;
948
949 /*
950 * The data_descriptor pointer is the one case where we need
951 * to fill in the node ID part of the address. All other
952 * pointers assume that the data referenced reside on the
953 * initiator (i.e. us), but data_descriptor can refer to data
954 * on other nodes so we need to put our ID in descriptor.high.
955 */
956
957 orb->page_table_bus =
958 dma_map_single(device->card->device, orb->page_table,
959 size, DMA_TO_DEVICE);
960 if (dma_mapping_error(orb->page_table_bus))
961 goto fail_page_table;
962 orb->request.data_descriptor.high = sd->address_high;
963 orb->request.data_descriptor.low = orb->page_table_bus;
964 orb->request.misc |=
965 COMMAND_ORB_PAGE_TABLE_PRESENT |
966 COMMAND_ORB_DATA_SIZE(j);
967
968 fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
969
970 return 0;
971
972 fail_page_table:
973 dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
974 orb->cmd->sc_data_direction);
975 fail:
976 return -ENOMEM;
977}
978
979/* SCSI stack integration */
980
981static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
982{
983 struct sbp2_device *sd =
984 (struct sbp2_device *)cmd->device->host->hostdata;
985 struct fw_unit *unit = sd->unit;
986 struct fw_device *device = fw_device(unit->device.parent);
987 struct sbp2_command_orb *orb;
988
989 /*
990 * Bidirectional commands are not yet implemented, and unknown
991 * transfer direction not handled.
992 */
993 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
994 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
995 cmd->result = DID_ERROR << 16;
996 done(cmd);
997 return 0;
998 }
999
1000 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1001 if (orb == NULL) {
1002 fw_notify("failed to alloc orb\n");
1003 goto fail_alloc;
1004 }
1005
1006 /* Initialize rcode to something not RCODE_COMPLETE. */
1007 orb->base.rcode = -1;
1008 orb->base.request_bus =
1009 dma_map_single(device->card->device, &orb->request,
1010 sizeof(orb->request), DMA_TO_DEVICE);
1011 if (dma_mapping_error(orb->base.request_bus))
1012 goto fail_mapping;
1013
1014 orb->unit = unit;
1015 orb->done = done;
1016 orb->cmd = cmd;
1017
1018 orb->request.next.high = SBP2_ORB_NULL;
1019 orb->request.next.low = 0x0;
1020 /*
1021 * At speed 100 we can do 512 bytes per packet, at speed 200,
1022 * 1024 bytes per packet etc. The SBP-2 max_payload field
1023 * specifies the max payload size as 2 ^ (max_payload + 2), so
1024 * if we set this to max_speed + 7, we get the right value.
1025 */
1026 orb->request.misc =
1027 COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
1028 COMMAND_ORB_SPEED(device->node->max_speed) |
1029 COMMAND_ORB_NOTIFY;
1030
1031 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1032 orb->request.misc |=
1033 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
1034 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
1035 orb->request.misc |=
1036 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1037
1038 if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
1039 goto fail_map_payload;
1040
1041 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1042
1043 memset(orb->request.command_block,
1044 0, sizeof(orb->request.command_block));
1045 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
1046
1047 orb->base.callback = complete_command_orb;
1048
1049 sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
1050 sd->command_block_agent_address + SBP2_ORB_POINTER);
1051
1052 return 0;
1053
1054 fail_map_payload:
1055 dma_unmap_single(device->card->device, orb->base.request_bus,
1056 sizeof(orb->request), DMA_TO_DEVICE);
1057 fail_mapping:
1058 kfree(orb);
1059 fail_alloc:
1060 return SCSI_MLQUEUE_HOST_BUSY;
1061}
1062
1063static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1064{
1065 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
1066
1067 sdev->allow_restart = 1;
1068
1069 if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1070 sdev->inquiry_len = 36;
1071 return 0;
1072}
1073
1074static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1075{
1076 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
1077 struct fw_unit *unit = sd->unit;
1078
1079 sdev->use_10_for_rw = 1;
1080
1081 if (sdev->type == TYPE_ROM)
1082 sdev->use_10_for_ms = 1;
1083 if (sdev->type == TYPE_DISK &&
1084 sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1085 sdev->skip_ms_page_8 = 1;
1086 if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
1087 fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
1088 sdev->fix_capacity = 1;
1089 }
1090
1091 return 0;
1092}
1093
1094/*
1095 * Called by scsi stack when something has really gone wrong. Usually
1096 * called when a command has timed-out for some reason.
1097 */
1098static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1099{
1100 struct sbp2_device *sd =
1101 (struct sbp2_device *)cmd->device->host->hostdata;
1102 struct fw_unit *unit = sd->unit;
1103
1104 fw_notify("sbp2_scsi_abort\n");
1105 sbp2_agent_reset(unit);
1106 sbp2_cancel_orbs(unit);
1107
1108 return SUCCESS;
1109}
1110
1111static struct scsi_host_template scsi_driver_template = {
1112 .module = THIS_MODULE,
1113 .name = "SBP-2 IEEE-1394",
1114 .proc_name = (char *)sbp2_driver_name,
1115 .queuecommand = sbp2_scsi_queuecommand,
1116 .slave_alloc = sbp2_scsi_slave_alloc,
1117 .slave_configure = sbp2_scsi_slave_configure,
1118 .eh_abort_handler = sbp2_scsi_abort,
1119 .this_id = -1,
1120 .sg_tablesize = SG_ALL,
1121 .use_clustering = ENABLE_CLUSTERING,
1122 .cmd_per_lun = 1,
1123 .can_queue = 1,
1124};
1125
1126MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1127MODULE_DESCRIPTION("SCSI over IEEE1394");
1128MODULE_LICENSE("GPL");
1129MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
1130
1131/* Provide a module alias so root-on-sbp2 initrds don't break. */
1132#ifndef CONFIG_IEEE1394_SBP2_MODULE
1133MODULE_ALIAS("sbp2");
1134#endif
1135
1136static int __init sbp2_init(void)
1137{
1138 return driver_register(&sbp2_driver.driver);
1139}
1140
1141static void __exit sbp2_cleanup(void)
1142{
1143 driver_unregister(&sbp2_driver.driver);
1144}
1145
1146module_init(sbp2_init);
1147module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
new file mode 100644
index 000000000000..7aebb8ae0efa
--- /dev/null
+++ b/drivers/firewire/fw-topology.c
@@ -0,0 +1,537 @@
1/*
2 * Incremental bus scan, based on bus topology
3 *
4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include "fw-transaction.h"
25#include "fw-topology.h"
26
27#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
28#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
29#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
30#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
31#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
32#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
33#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
34#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
35
36#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
37
38static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
39{
40 u32 q;
41 int port_type, shift, seq;
42
43 *total_port_count = 0;
44 *child_port_count = 0;
45
46 shift = 6;
47 q = *sid;
48 seq = 0;
49
50 while (1) {
51 port_type = (q >> shift) & 0x03;
52 switch (port_type) {
53 case SELFID_PORT_CHILD:
54 (*child_port_count)++;
55 case SELFID_PORT_PARENT:
56 case SELFID_PORT_NCONN:
57 (*total_port_count)++;
58 case SELFID_PORT_NONE:
59 break;
60 }
61
62 shift -= 2;
63 if (shift == 0) {
64 if (!SELF_ID_MORE_PACKETS(q))
65 return sid + 1;
66
67 shift = 16;
68 sid++;
69 q = *sid;
70
71 /*
72 * Check that the extra packets actually are
73 * extended self ID packets and that the
74 * sequence numbers in the extended self ID
75 * packets increase as expected.
76 */
77
78 if (!SELF_ID_EXTENDED(q) ||
79 seq != SELF_ID_EXT_SEQUENCE(q))
80 return NULL;
81
82 seq++;
83 }
84 }
85}
86
87static int get_port_type(u32 *sid, int port_index)
88{
89 int index, shift;
90
91 index = (port_index + 5) / 8;
92 shift = 16 - ((port_index + 5) & 7) * 2;
93 return (sid[index] >> shift) & 0x03;
94}
95
96static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
97{
98 struct fw_node *node;
99
100 node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
101 GFP_ATOMIC);
102 if (node == NULL)
103 return NULL;
104
105 node->color = color;
106 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
107 node->link_on = SELF_ID_LINK_ON(sid);
108 node->phy_speed = SELF_ID_PHY_SPEED(sid);
109 node->port_count = port_count;
110
111 atomic_set(&node->ref_count, 1);
112 INIT_LIST_HEAD(&node->link);
113
114 return node;
115}
116
117/*
118 * Compute the maximum hop count for this node and it's children. The
119 * maximum hop count is the maximum number of connections between any
120 * two nodes in the subtree rooted at this node. We need this for
121 * setting the gap count. As we build the tree bottom up in
122 * build_tree() below, this is fairly easy to do: for each node we
123 * maintain the max hop count and the max depth, ie the number of hops
124 * to the furthest leaf. Computing the max hop count breaks down into
125 * two cases: either the path goes through this node, in which case
126 * the hop count is the sum of the two biggest child depths plus 2.
127 * Or it could be the case that the max hop path is entirely
128 * containted in a child tree, in which case the max hop count is just
129 * the max hop count of this child.
130 */
131static void update_hop_count(struct fw_node *node)
132{
133 int depths[2] = { -1, -1 };
134 int max_child_hops = 0;
135 int i;
136
137 for (i = 0; i < node->port_count; i++) {
138 if (node->ports[i].node == NULL)
139 continue;
140
141 if (node->ports[i].node->max_hops > max_child_hops)
142 max_child_hops = node->ports[i].node->max_hops;
143
144 if (node->ports[i].node->max_depth > depths[0]) {
145 depths[1] = depths[0];
146 depths[0] = node->ports[i].node->max_depth;
147 } else if (node->ports[i].node->max_depth > depths[1])
148 depths[1] = node->ports[i].node->max_depth;
149 }
150
151 node->max_depth = depths[0] + 1;
152 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
153}
154
155
156/**
157 * build_tree - Build the tree representation of the topology
158 * @self_ids: array of self IDs to create the tree from
159 * @self_id_count: the length of the self_ids array
160 * @local_id: the node ID of the local node
161 *
162 * This function builds the tree representation of the topology given
163 * by the self IDs from the latest bus reset. During the construction
164 * of the tree, the function checks that the self IDs are valid and
165 * internally consistent. On succcess this funtions returns the
166 * fw_node corresponding to the local card otherwise NULL.
167 */
168static struct fw_node *build_tree(struct fw_card *card,
169 u32 *sid, int self_id_count)
170{
171 struct fw_node *node, *child, *local_node, *irm_node;
172 struct list_head stack, *h;
173 u32 *next_sid, *end, q;
174 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
175 int gap_count, topology_type;
176
177 local_node = NULL;
178 node = NULL;
179 INIT_LIST_HEAD(&stack);
180 stack_depth = 0;
181 end = sid + self_id_count;
182 phy_id = 0;
183 irm_node = NULL;
184 gap_count = SELF_ID_GAP_COUNT(*sid);
185 topology_type = 0;
186
187 while (sid < end) {
188 next_sid = count_ports(sid, &port_count, &child_port_count);
189
190 if (next_sid == NULL) {
191 fw_error("Inconsistent extended self IDs.\n");
192 return NULL;
193 }
194
195 q = *sid;
196 if (phy_id != SELF_ID_PHY_ID(q)) {
197 fw_error("PHY ID mismatch in self ID: %d != %d.\n",
198 phy_id, SELF_ID_PHY_ID(q));
199 return NULL;
200 }
201
202 if (child_port_count > stack_depth) {
203 fw_error("Topology stack underflow\n");
204 return NULL;
205 }
206
207 /*
208 * Seek back from the top of our stack to find the
209 * start of the child nodes for this node.
210 */
211 for (i = 0, h = &stack; i < child_port_count; i++)
212 h = h->prev;
213 child = fw_node(h);
214
215 node = fw_node_create(q, port_count, card->color);
216 if (node == NULL) {
217 fw_error("Out of memory while building topology.");
218 return NULL;
219 }
220
221 if (phy_id == (card->node_id & 0x3f))
222 local_node = node;
223
224 if (SELF_ID_CONTENDER(q))
225 irm_node = node;
226
227 if (node->phy_speed == SCODE_BETA)
228 topology_type |= FW_TOPOLOGY_B;
229 else
230 topology_type |= FW_TOPOLOGY_A;
231
232 parent_count = 0;
233
234 for (i = 0; i < port_count; i++) {
235 switch (get_port_type(sid, i)) {
236 case SELFID_PORT_PARENT:
237 /*
238 * Who's your daddy? We dont know the
239 * parent node at this time, so we
240 * temporarily abuse node->color for
241 * remembering the entry in the
242 * node->ports array where the parent
243 * node should be. Later, when we
244 * handle the parent node, we fix up
245 * the reference.
246 */
247 parent_count++;
248 node->color = i;
249 break;
250
251 case SELFID_PORT_CHILD:
252 node->ports[i].node = child;
253 /*
254 * Fix up parent reference for this
255 * child node.
256 */
257 child->ports[child->color].node = node;
258 child->color = card->color;
259 child = fw_node(child->link.next);
260 break;
261 }
262 }
263
264 /*
265 * Check that the node reports exactly one parent
266 * port, except for the root, which of course should
267 * have no parents.
268 */
269 if ((next_sid == end && parent_count != 0) ||
270 (next_sid < end && parent_count != 1)) {
271 fw_error("Parent port inconsistency for node %d: "
272 "parent_count=%d\n", phy_id, parent_count);
273 return NULL;
274 }
275
276 /* Pop the child nodes off the stack and push the new node. */
277 __list_del(h->prev, &stack);
278 list_add_tail(&node->link, &stack);
279 stack_depth += 1 - child_port_count;
280
281 /*
282 * If all PHYs does not report the same gap count
283 * setting, we fall back to 63 which will force a gap
284 * count reconfiguration and a reset.
285 */
286 if (SELF_ID_GAP_COUNT(q) != gap_count)
287 gap_count = 63;
288
289 update_hop_count(node);
290
291 sid = next_sid;
292 phy_id++;
293 }
294
295 card->root_node = node;
296 card->irm_node = irm_node;
297 card->gap_count = gap_count;
298 card->topology_type = topology_type;
299
300 return local_node;
301}
302
303typedef void (*fw_node_callback_t)(struct fw_card * card,
304 struct fw_node * node,
305 struct fw_node * parent);
306
307static void
308for_each_fw_node(struct fw_card *card, struct fw_node *root,
309 fw_node_callback_t callback)
310{
311 struct list_head list;
312 struct fw_node *node, *next, *child, *parent;
313 int i;
314
315 INIT_LIST_HEAD(&list);
316
317 fw_node_get(root);
318 list_add_tail(&root->link, &list);
319 parent = NULL;
320 list_for_each_entry(node, &list, link) {
321 node->color = card->color;
322
323 for (i = 0; i < node->port_count; i++) {
324 child = node->ports[i].node;
325 if (!child)
326 continue;
327 if (child->color == card->color)
328 parent = child;
329 else {
330 fw_node_get(child);
331 list_add_tail(&child->link, &list);
332 }
333 }
334
335 callback(card, node, parent);
336 }
337
338 list_for_each_entry_safe(node, next, &list, link)
339 fw_node_put(node);
340}
341
342static void
343report_lost_node(struct fw_card *card,
344 struct fw_node *node, struct fw_node *parent)
345{
346 fw_node_event(card, node, FW_NODE_DESTROYED);
347 fw_node_put(node);
348}
349
350static void
351report_found_node(struct fw_card *card,
352 struct fw_node *node, struct fw_node *parent)
353{
354 int b_path = (node->phy_speed == SCODE_BETA);
355
356 if (parent != NULL) {
357 /* min() macro doesn't work here with gcc 3.4 */
358 node->max_speed = parent->max_speed < node->phy_speed ?
359 parent->max_speed : node->phy_speed;
360 node->b_path = parent->b_path && b_path;
361 } else {
362 node->max_speed = node->phy_speed;
363 node->b_path = b_path;
364 }
365
366 fw_node_event(card, node, FW_NODE_CREATED);
367}
368
369void fw_destroy_nodes(struct fw_card *card)
370{
371 unsigned long flags;
372
373 spin_lock_irqsave(&card->lock, flags);
374 card->color++;
375 if (card->local_node != NULL)
376 for_each_fw_node(card, card->local_node, report_lost_node);
377 spin_unlock_irqrestore(&card->lock, flags);
378}
379
380static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
381{
382 struct fw_node *tree;
383 int i;
384
385 tree = node1->ports[port].node;
386 node0->ports[port].node = tree;
387 for (i = 0; i < tree->port_count; i++) {
388 if (tree->ports[i].node == node1) {
389 tree->ports[i].node = node0;
390 break;
391 }
392 }
393}
394
395/**
396 * update_tree - compare the old topology tree for card with the new
397 * one specified by root. Queue the nodes and mark them as either
398 * found, lost or updated. Update the nodes in the card topology tree
399 * as we go.
400 */
401static void
402update_tree(struct fw_card *card, struct fw_node *root)
403{
404 struct list_head list0, list1;
405 struct fw_node *node0, *node1;
406 int i, event;
407
408 INIT_LIST_HEAD(&list0);
409 list_add_tail(&card->local_node->link, &list0);
410 INIT_LIST_HEAD(&list1);
411 list_add_tail(&root->link, &list1);
412
413 node0 = fw_node(list0.next);
414 node1 = fw_node(list1.next);
415
416 while (&node0->link != &list0) {
417
418 /* assert(node0->port_count == node1->port_count); */
419 if (node0->link_on && !node1->link_on)
420 event = FW_NODE_LINK_OFF;
421 else if (!node0->link_on && node1->link_on)
422 event = FW_NODE_LINK_ON;
423 else
424 event = FW_NODE_UPDATED;
425
426 node0->node_id = node1->node_id;
427 node0->color = card->color;
428 node0->link_on = node1->link_on;
429 node0->initiated_reset = node1->initiated_reset;
430 node0->max_hops = node1->max_hops;
431 node1->color = card->color;
432 fw_node_event(card, node0, event);
433
434 if (card->root_node == node1)
435 card->root_node = node0;
436 if (card->irm_node == node1)
437 card->irm_node = node0;
438
439 for (i = 0; i < node0->port_count; i++) {
440 if (node0->ports[i].node && node1->ports[i].node) {
441 /*
442 * This port didn't change, queue the
443 * connected node for further
444 * investigation.
445 */
446 if (node0->ports[i].node->color == card->color)
447 continue;
448 list_add_tail(&node0->ports[i].node->link,
449 &list0);
450 list_add_tail(&node1->ports[i].node->link,
451 &list1);
452 } else if (node0->ports[i].node) {
453 /*
454 * The nodes connected here were
455 * unplugged; unref the lost nodes and
456 * queue FW_NODE_LOST callbacks for
457 * them.
458 */
459
460 for_each_fw_node(card, node0->ports[i].node,
461 report_lost_node);
462 node0->ports[i].node = NULL;
463 } else if (node1->ports[i].node) {
464 /*
465 * One or more node were connected to
466 * this port. Move the new nodes into
467 * the tree and queue FW_NODE_CREATED
468 * callbacks for them.
469 */
470 move_tree(node0, node1, i);
471 for_each_fw_node(card, node0->ports[i].node,
472 report_found_node);
473 }
474 }
475
476 node0 = fw_node(node0->link.next);
477 node1 = fw_node(node1->link.next);
478 }
479}
480
481static void
482update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
483{
484 int node_count;
485
486 card->topology_map[1]++;
487 node_count = (card->root_node->node_id & 0x3f) + 1;
488 card->topology_map[2] = (node_count << 16) | self_id_count;
489 card->topology_map[0] = (self_id_count + 2) << 16;
490 memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
491 fw_compute_block_crc(card->topology_map);
492}
493
494void
495fw_core_handle_bus_reset(struct fw_card *card,
496 int node_id, int generation,
497 int self_id_count, u32 * self_ids)
498{
499 struct fw_node *local_node;
500 unsigned long flags;
501
502 fw_flush_transactions(card);
503
504 spin_lock_irqsave(&card->lock, flags);
505
506 /*
507 * If the new topology has a different self_id_count the topology
508 * changed, either nodes were added or removed. In that case we
509 * reset the IRM reset counter.
510 */
511 if (card->self_id_count != self_id_count)
512 card->bm_retries = 0;
513
514 card->node_id = node_id;
515 card->generation = generation;
516 card->reset_jiffies = jiffies;
517 schedule_delayed_work(&card->work, 0);
518
519 local_node = build_tree(card, self_ids, self_id_count);
520
521 update_topology_map(card, self_ids, self_id_count);
522
523 card->color++;
524
525 if (local_node == NULL) {
526 fw_error("topology build failed\n");
527 /* FIXME: We need to issue a bus reset in this case. */
528 } else if (card->local_node == NULL) {
529 card->local_node = local_node;
530 for_each_fw_node(card, local_node, report_found_node);
531 } else {
532 update_tree(card, local_node);
533 }
534
535 spin_unlock_irqrestore(&card->lock, flags);
536}
537EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
new file mode 100644
index 000000000000..363b6cbcd0b3
--- /dev/null
+++ b/drivers/firewire/fw-topology.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_topology_h
20#define __fw_topology_h
21
22enum {
23 FW_TOPOLOGY_A = 0x01,
24 FW_TOPOLOGY_B = 0x02,
25 FW_TOPOLOGY_MIXED = 0x03,
26};
27
28enum {
29 FW_NODE_CREATED = 0x00,
30 FW_NODE_UPDATED = 0x01,
31 FW_NODE_DESTROYED = 0x02,
32 FW_NODE_LINK_ON = 0x03,
33 FW_NODE_LINK_OFF = 0x04,
34};
35
36struct fw_port {
37 struct fw_node *node;
38 unsigned speed : 3; /* S100, S200, ... S3200 */
39};
40
41struct fw_node {
42 u16 node_id;
43 u8 color;
44 u8 port_count;
45 unsigned link_on : 1;
46 unsigned initiated_reset : 1;
47 unsigned b_path : 1;
48 u8 phy_speed : 3; /* As in the self ID packet. */
49 u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on
50 * the path from the local node to this node. */
51 u8 max_depth : 4; /* Maximum depth to any leaf node */
52 u8 max_hops : 4; /* Max hops in this sub tree */
53 atomic_t ref_count;
54
55 /* For serializing node topology into a list. */
56 struct list_head link;
57
58 /* Upper layer specific data. */
59 void *data;
60
61 struct fw_port ports[0];
62};
63
64static inline struct fw_node *
65fw_node(struct list_head *l)
66{
67 return list_entry(l, struct fw_node, link);
68}
69
70static inline struct fw_node *
71fw_node_get(struct fw_node *node)
72{
73 atomic_inc(&node->ref_count);
74
75 return node;
76}
77
78static inline void
79fw_node_put(struct fw_node *node)
80{
81 if (atomic_dec_and_test(&node->ref_count))
82 kfree(node);
83}
84
85void
86fw_destroy_nodes(struct fw_card *card);
87
88int
89fw_compute_block_crc(u32 *block);
90
91
92#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
new file mode 100644
index 000000000000..80d0121463d0
--- /dev/null
+++ b/drivers/firewire/fw-transaction.c
@@ -0,0 +1,910 @@
1/*
2 * Core IEEE1394 transaction logic
3 *
4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/list.h>
29#include <linux/kthread.h>
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-topology.h"
35#include "fw-device.h"
36
37#define HEADER_PRI(pri) ((pri) << 0)
38#define HEADER_TCODE(tcode) ((tcode) << 4)
39#define HEADER_RETRY(retry) ((retry) << 8)
40#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
41#define HEADER_DESTINATION(destination) ((destination) << 16)
42#define HEADER_SOURCE(source) ((source) << 16)
43#define HEADER_RCODE(rcode) ((rcode) << 12)
44#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
45#define HEADER_DATA_LENGTH(length) ((length) << 16)
46#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
47
48#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
49#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
50#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
51#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
52#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
53#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
54#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
55#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
56
57#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
58#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
59#define PHY_IDENTIFIER(id) ((id) << 30)
60
61static int
62close_transaction(struct fw_transaction *transaction,
63 struct fw_card *card, int rcode,
64 u32 *payload, size_t length)
65{
66 struct fw_transaction *t;
67 unsigned long flags;
68
69 spin_lock_irqsave(&card->lock, flags);
70 list_for_each_entry(t, &card->transaction_list, link) {
71 if (t == transaction) {
72 list_del(&t->link);
73 card->tlabel_mask &= ~(1 << t->tlabel);
74 break;
75 }
76 }
77 spin_unlock_irqrestore(&card->lock, flags);
78
79 if (&t->link != &card->transaction_list) {
80 t->callback(card, rcode, payload, length, t->callback_data);
81 return 0;
82 }
83
84 return -ENOENT;
85}
86
87/*
88 * Only valid for transactions that are potentially pending (ie have
89 * been sent).
90 */
91int
92fw_cancel_transaction(struct fw_card *card,
93 struct fw_transaction *transaction)
94{
95 /*
96 * Cancel the packet transmission if it's still queued. That
97 * will call the packet transmission callback which cancels
98 * the transaction.
99 */
100
101 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
102 return 0;
103
104 /*
105 * If the request packet has already been sent, we need to see
106 * if the transaction is still pending and remove it in that case.
107 */
108
109 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
110}
111EXPORT_SYMBOL(fw_cancel_transaction);
112
113static void
114transmit_complete_callback(struct fw_packet *packet,
115 struct fw_card *card, int status)
116{
117 struct fw_transaction *t =
118 container_of(packet, struct fw_transaction, packet);
119
120 switch (status) {
121 case ACK_COMPLETE:
122 close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
123 break;
124 case ACK_PENDING:
125 t->timestamp = packet->timestamp;
126 break;
127 case ACK_BUSY_X:
128 case ACK_BUSY_A:
129 case ACK_BUSY_B:
130 close_transaction(t, card, RCODE_BUSY, NULL, 0);
131 break;
132 case ACK_DATA_ERROR:
133 close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
134 break;
135 case ACK_TYPE_ERROR:
136 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
137 break;
138 default:
139 /*
140 * In this case the ack is really a juju specific
141 * rcode, so just forward that to the callback.
142 */
143 close_transaction(t, card, status, NULL, 0);
144 break;
145 }
146}
147
148static void
149fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
150 int node_id, int source_id, int generation, int speed,
151 unsigned long long offset, void *payload, size_t length)
152{
153 int ext_tcode;
154
155 if (tcode > 0x10) {
156 ext_tcode = tcode - 0x10;
157 tcode = TCODE_LOCK_REQUEST;
158 } else
159 ext_tcode = 0;
160
161 packet->header[0] =
162 HEADER_RETRY(RETRY_X) |
163 HEADER_TLABEL(tlabel) |
164 HEADER_TCODE(tcode) |
165 HEADER_DESTINATION(node_id);
166 packet->header[1] =
167 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
168 packet->header[2] =
169 offset;
170
171 switch (tcode) {
172 case TCODE_WRITE_QUADLET_REQUEST:
173 packet->header[3] = *(u32 *)payload;
174 packet->header_length = 16;
175 packet->payload_length = 0;
176 break;
177
178 case TCODE_LOCK_REQUEST:
179 case TCODE_WRITE_BLOCK_REQUEST:
180 packet->header[3] =
181 HEADER_DATA_LENGTH(length) |
182 HEADER_EXTENDED_TCODE(ext_tcode);
183 packet->header_length = 16;
184 packet->payload = payload;
185 packet->payload_length = length;
186 break;
187
188 case TCODE_READ_QUADLET_REQUEST:
189 packet->header_length = 12;
190 packet->payload_length = 0;
191 break;
192
193 case TCODE_READ_BLOCK_REQUEST:
194 packet->header[3] =
195 HEADER_DATA_LENGTH(length) |
196 HEADER_EXTENDED_TCODE(ext_tcode);
197 packet->header_length = 16;
198 packet->payload_length = 0;
199 break;
200 }
201
202 packet->speed = speed;
203 packet->generation = generation;
204 packet->ack = 0;
205}
206
207/**
208 * This function provides low-level access to the IEEE1394 transaction
209 * logic. Most C programs would use either fw_read(), fw_write() or
210 * fw_lock() instead - those function are convenience wrappers for
211 * this function. The fw_send_request() function is primarily
212 * provided as a flexible, one-stop entry point for languages bindings
213 * and protocol bindings.
214 *
215 * FIXME: Document this function further, in particular the possible
216 * values for rcode in the callback. In short, we map ACK_COMPLETE to
217 * RCODE_COMPLETE, internal errors set errno and set rcode to
218 * RCODE_SEND_ERROR (which is out of range for standard ieee1394
219 * rcodes). All other rcodes are forwarded unchanged. For all
220 * errors, payload is NULL, length is 0.
221 *
222 * Can not expect the callback to be called before the function
223 * returns, though this does happen in some cases (ACK_COMPLETE and
224 * errors).
225 *
226 * The payload is only used for write requests and must not be freed
227 * until the callback has been called.
228 *
229 * @param card the card from which to send the request
230 * @param tcode the tcode for this transaction. Do not use
231 * TCODE_LOCK_REQUEST directly, insted use TCODE_LOCK_MASK_SWAP
232 * etc. to specify tcode and ext_tcode.
233 * @param node_id the destination node ID (bus ID and PHY ID concatenated)
234 * @param generation the generation for which node_id is valid
235 * @param speed the speed to use for sending the request
236 * @param offset the 48 bit offset on the destination node
237 * @param payload the data payload for the request subaction
238 * @param length the length in bytes of the data to read
239 * @param callback function to be called when the transaction is completed
240 * @param callback_data pointer to arbitrary data, which will be
241 * passed to the callback
242 */
243void
244fw_send_request(struct fw_card *card, struct fw_transaction *t,
245 int tcode, int node_id, int generation, int speed,
246 unsigned long long offset,
247 void *payload, size_t length,
248 fw_transaction_callback_t callback, void *callback_data)
249{
250 unsigned long flags;
251 int tlabel, source;
252
253 /*
254 * Bump the flush timer up 100ms first of all so we
255 * don't race with a flush timer callback.
256 */
257
258 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
259
260 /*
261 * Allocate tlabel from the bitmap and put the transaction on
262 * the list while holding the card spinlock.
263 */
264
265 spin_lock_irqsave(&card->lock, flags);
266
267 source = card->node_id;
268 tlabel = card->current_tlabel;
269 if (card->tlabel_mask & (1 << tlabel)) {
270 spin_unlock_irqrestore(&card->lock, flags);
271 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
272 return;
273 }
274
275 card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
276 card->tlabel_mask |= (1 << tlabel);
277
278 list_add_tail(&t->link, &card->transaction_list);
279
280 spin_unlock_irqrestore(&card->lock, flags);
281
282 /* Initialize rest of transaction, fill out packet and send it. */
283 t->node_id = node_id;
284 t->tlabel = tlabel;
285 t->callback = callback;
286 t->callback_data = callback_data;
287
288 fw_fill_request(&t->packet, tcode, t->tlabel,
289 node_id, source, generation,
290 speed, offset, payload, length);
291 t->packet.callback = transmit_complete_callback;
292
293 card->driver->send_request(card, &t->packet);
294}
295EXPORT_SYMBOL(fw_send_request);
296
297static void
298transmit_phy_packet_callback(struct fw_packet *packet,
299 struct fw_card *card, int status)
300{
301 kfree(packet);
302}
303
304static void send_phy_packet(struct fw_card *card, u32 data, int generation)
305{
306 struct fw_packet *packet;
307
308 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
309 if (packet == NULL)
310 return;
311
312 packet->header[0] = data;
313 packet->header[1] = ~data;
314 packet->header_length = 8;
315 packet->payload_length = 0;
316 packet->speed = SCODE_100;
317 packet->generation = generation;
318 packet->callback = transmit_phy_packet_callback;
319
320 card->driver->send_request(card, packet);
321}
322
323void fw_send_phy_config(struct fw_card *card,
324 int node_id, int generation, int gap_count)
325{
326 u32 q;
327
328 q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
329 PHY_CONFIG_ROOT_ID(node_id) |
330 PHY_CONFIG_GAP_COUNT(gap_count);
331
332 send_phy_packet(card, q, generation);
333}
334
335void fw_flush_transactions(struct fw_card *card)
336{
337 struct fw_transaction *t, *next;
338 struct list_head list;
339 unsigned long flags;
340
341 INIT_LIST_HEAD(&list);
342 spin_lock_irqsave(&card->lock, flags);
343 list_splice_init(&card->transaction_list, &list);
344 card->tlabel_mask = 0;
345 spin_unlock_irqrestore(&card->lock, flags);
346
347 list_for_each_entry_safe(t, next, &list, link) {
348 card->driver->cancel_packet(card, &t->packet);
349
350 /*
351 * At this point cancel_packet will never call the
352 * transaction callback, since we just took all the
353 * transactions out of the list. So do it here.
354 */
355 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
356 }
357}
358
359static struct fw_address_handler *
360lookup_overlapping_address_handler(struct list_head *list,
361 unsigned long long offset, size_t length)
362{
363 struct fw_address_handler *handler;
364
365 list_for_each_entry(handler, list, link) {
366 if (handler->offset < offset + length &&
367 offset < handler->offset + handler->length)
368 return handler;
369 }
370
371 return NULL;
372}
373
374static struct fw_address_handler *
375lookup_enclosing_address_handler(struct list_head *list,
376 unsigned long long offset, size_t length)
377{
378 struct fw_address_handler *handler;
379
380 list_for_each_entry(handler, list, link) {
381 if (handler->offset <= offset &&
382 offset + length <= handler->offset + handler->length)
383 return handler;
384 }
385
386 return NULL;
387}
388
389static DEFINE_SPINLOCK(address_handler_lock);
390static LIST_HEAD(address_handler_list);
391
392const struct fw_address_region fw_low_memory_region =
393 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
394const struct fw_address_region fw_high_memory_region =
395 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
396const struct fw_address_region fw_private_region =
397 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
398const struct fw_address_region fw_csr_region =
399 { .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, };
400const struct fw_address_region fw_unit_space_region =
401 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
402EXPORT_SYMBOL(fw_low_memory_region);
403EXPORT_SYMBOL(fw_high_memory_region);
404EXPORT_SYMBOL(fw_private_region);
405EXPORT_SYMBOL(fw_csr_region);
406EXPORT_SYMBOL(fw_unit_space_region);
407
408/**
409 * Allocate a range of addresses in the node space of the OHCI
410 * controller. When a request is received that falls within the
411 * specified address range, the specified callback is invoked. The
412 * parameters passed to the callback give the details of the
413 * particular request
414 */
415int
416fw_core_add_address_handler(struct fw_address_handler *handler,
417 const struct fw_address_region *region)
418{
419 struct fw_address_handler *other;
420 unsigned long flags;
421 int ret = -EBUSY;
422
423 spin_lock_irqsave(&address_handler_lock, flags);
424
425 handler->offset = region->start;
426 while (handler->offset + handler->length <= region->end) {
427 other =
428 lookup_overlapping_address_handler(&address_handler_list,
429 handler->offset,
430 handler->length);
431 if (other != NULL) {
432 handler->offset += other->length;
433 } else {
434 list_add_tail(&handler->link, &address_handler_list);
435 ret = 0;
436 break;
437 }
438 }
439
440 spin_unlock_irqrestore(&address_handler_lock, flags);
441
442 return ret;
443}
444EXPORT_SYMBOL(fw_core_add_address_handler);
445
446/**
447 * Deallocate a range of addresses allocated with fw_allocate. This
448 * will call the associated callback one last time with a the special
449 * tcode TCODE_DEALLOCATE, to let the client destroy the registered
450 * callback data. For convenience, the callback parameters offset and
451 * length are set to the start and the length respectively for the
452 * deallocated region, payload is set to NULL.
453 */
454void fw_core_remove_address_handler(struct fw_address_handler *handler)
455{
456 unsigned long flags;
457
458 spin_lock_irqsave(&address_handler_lock, flags);
459 list_del(&handler->link);
460 spin_unlock_irqrestore(&address_handler_lock, flags);
461}
462EXPORT_SYMBOL(fw_core_remove_address_handler);
463
464struct fw_request {
465 struct fw_packet response;
466 u32 request_header[4];
467 int ack;
468 u32 length;
469 u32 data[0];
470};
471
472static void
473free_response_callback(struct fw_packet *packet,
474 struct fw_card *card, int status)
475{
476 struct fw_request *request;
477
478 request = container_of(packet, struct fw_request, response);
479 kfree(request);
480}
481
482void
483fw_fill_response(struct fw_packet *response, u32 *request_header,
484 int rcode, void *payload, size_t length)
485{
486 int tcode, tlabel, extended_tcode, source, destination;
487
488 tcode = HEADER_GET_TCODE(request_header[0]);
489 tlabel = HEADER_GET_TLABEL(request_header[0]);
490 source = HEADER_GET_DESTINATION(request_header[0]);
491 destination = HEADER_GET_SOURCE(request_header[1]);
492 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
493
494 response->header[0] =
495 HEADER_RETRY(RETRY_1) |
496 HEADER_TLABEL(tlabel) |
497 HEADER_DESTINATION(destination);
498 response->header[1] =
499 HEADER_SOURCE(source) |
500 HEADER_RCODE(rcode);
501 response->header[2] = 0;
502
503 switch (tcode) {
504 case TCODE_WRITE_QUADLET_REQUEST:
505 case TCODE_WRITE_BLOCK_REQUEST:
506 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
507 response->header_length = 12;
508 response->payload_length = 0;
509 break;
510
511 case TCODE_READ_QUADLET_REQUEST:
512 response->header[0] |=
513 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
514 if (payload != NULL)
515 response->header[3] = *(u32 *)payload;
516 else
517 response->header[3] = 0;
518 response->header_length = 16;
519 response->payload_length = 0;
520 break;
521
522 case TCODE_READ_BLOCK_REQUEST:
523 case TCODE_LOCK_REQUEST:
524 response->header[0] |= HEADER_TCODE(tcode + 2);
525 response->header[3] =
526 HEADER_DATA_LENGTH(length) |
527 HEADER_EXTENDED_TCODE(extended_tcode);
528 response->header_length = 16;
529 response->payload = payload;
530 response->payload_length = length;
531 break;
532
533 default:
534 BUG();
535 return;
536 }
537}
538EXPORT_SYMBOL(fw_fill_response);
539
540static struct fw_request *
541allocate_request(struct fw_packet *p)
542{
543 struct fw_request *request;
544 u32 *data, length;
545 int request_tcode, t;
546
547 request_tcode = HEADER_GET_TCODE(p->header[0]);
548 switch (request_tcode) {
549 case TCODE_WRITE_QUADLET_REQUEST:
550 data = &p->header[3];
551 length = 4;
552 break;
553
554 case TCODE_WRITE_BLOCK_REQUEST:
555 case TCODE_LOCK_REQUEST:
556 data = p->payload;
557 length = HEADER_GET_DATA_LENGTH(p->header[3]);
558 break;
559
560 case TCODE_READ_QUADLET_REQUEST:
561 data = NULL;
562 length = 4;
563 break;
564
565 case TCODE_READ_BLOCK_REQUEST:
566 data = NULL;
567 length = HEADER_GET_DATA_LENGTH(p->header[3]);
568 break;
569
570 default:
571 BUG();
572 return NULL;
573 }
574
575 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
576 if (request == NULL)
577 return NULL;
578
579 t = (p->timestamp & 0x1fff) + 4000;
580 if (t >= 8000)
581 t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
582 else
583 t = (p->timestamp & ~0x1fff) + t;
584
585 request->response.speed = p->speed;
586 request->response.timestamp = t;
587 request->response.generation = p->generation;
588 request->response.ack = 0;
589 request->response.callback = free_response_callback;
590 request->ack = p->ack;
591 request->length = length;
592 if (data)
593 memcpy(request->data, data, length);
594
595 memcpy(request->request_header, p->header, sizeof(p->header));
596
597 return request;
598}
599
600void
601fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
602{
603 /*
604 * Broadcast packets are reported as ACK_COMPLETE, so this
605 * check is sufficient to ensure we don't send response to
606 * broadcast packets or posted writes.
607 */
608 if (request->ack != ACK_PENDING)
609 return;
610
611 if (rcode == RCODE_COMPLETE)
612 fw_fill_response(&request->response, request->request_header,
613 rcode, request->data, request->length);
614 else
615 fw_fill_response(&request->response, request->request_header,
616 rcode, NULL, 0);
617
618 card->driver->send_response(card, &request->response);
619}
620EXPORT_SYMBOL(fw_send_response);
621
622void
623fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
624{
625 struct fw_address_handler *handler;
626 struct fw_request *request;
627 unsigned long long offset;
628 unsigned long flags;
629 int tcode, destination, source;
630
631 if (p->payload_length > 2048) {
632 /* FIXME: send error response. */
633 return;
634 }
635
636 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
637 return;
638
639 request = allocate_request(p);
640 if (request == NULL) {
641 /* FIXME: send statically allocated busy packet. */
642 return;
643 }
644
645 offset =
646 ((unsigned long long)
647 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
648 tcode = HEADER_GET_TCODE(p->header[0]);
649 destination = HEADER_GET_DESTINATION(p->header[0]);
650 source = HEADER_GET_SOURCE(p->header[0]);
651
652 spin_lock_irqsave(&address_handler_lock, flags);
653 handler = lookup_enclosing_address_handler(&address_handler_list,
654 offset, request->length);
655 spin_unlock_irqrestore(&address_handler_lock, flags);
656
657 /*
658 * FIXME: lookup the fw_node corresponding to the sender of
659 * this request and pass that to the address handler instead
660 * of the node ID. We may also want to move the address
661 * allocations to fw_node so we only do this callback if the
662 * upper layers registered it for this node.
663 */
664
665 if (handler == NULL)
666 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
667 else
668 handler->address_callback(card, request,
669 tcode, destination, source,
670 p->generation, p->speed, offset,
671 request->data, request->length,
672 handler->callback_data);
673}
674EXPORT_SYMBOL(fw_core_handle_request);
675
676void
677fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
678{
679 struct fw_transaction *t;
680 unsigned long flags;
681 u32 *data;
682 size_t data_length;
683 int tcode, tlabel, destination, source, rcode;
684
685 tcode = HEADER_GET_TCODE(p->header[0]);
686 tlabel = HEADER_GET_TLABEL(p->header[0]);
687 destination = HEADER_GET_DESTINATION(p->header[0]);
688 source = HEADER_GET_SOURCE(p->header[1]);
689 rcode = HEADER_GET_RCODE(p->header[1]);
690
691 spin_lock_irqsave(&card->lock, flags);
692 list_for_each_entry(t, &card->transaction_list, link) {
693 if (t->node_id == source && t->tlabel == tlabel) {
694 list_del(&t->link);
695 card->tlabel_mask &= ~(1 << t->tlabel);
696 break;
697 }
698 }
699 spin_unlock_irqrestore(&card->lock, flags);
700
701 if (&t->link == &card->transaction_list) {
702 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
703 source, tlabel);
704 return;
705 }
706
707 /*
708 * FIXME: sanity check packet, is length correct, does tcodes
709 * and addresses match.
710 */
711
712 switch (tcode) {
713 case TCODE_READ_QUADLET_RESPONSE:
714 data = (u32 *) &p->header[3];
715 data_length = 4;
716 break;
717
718 case TCODE_WRITE_RESPONSE:
719 data = NULL;
720 data_length = 0;
721 break;
722
723 case TCODE_READ_BLOCK_RESPONSE:
724 case TCODE_LOCK_RESPONSE:
725 data = p->payload;
726 data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
727 break;
728
729 default:
730 /* Should never happen, this is just to shut up gcc. */
731 data = NULL;
732 data_length = 0;
733 break;
734 }
735
736 t->callback(card, rcode, data, data_length, t->callback_data);
737}
738EXPORT_SYMBOL(fw_core_handle_response);
739
740const struct fw_address_region topology_map_region =
741 { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
742
743static void
744handle_topology_map(struct fw_card *card, struct fw_request *request,
745 int tcode, int destination, int source,
746 int generation, int speed,
747 unsigned long long offset,
748 void *payload, size_t length, void *callback_data)
749{
750 int i, start, end;
751 u32 *map;
752
753 if (!TCODE_IS_READ_REQUEST(tcode)) {
754 fw_send_response(card, request, RCODE_TYPE_ERROR);
755 return;
756 }
757
758 if ((offset & 3) > 0 || (length & 3) > 0) {
759 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
760 return;
761 }
762
763 start = (offset - topology_map_region.start) / 4;
764 end = start + length / 4;
765 map = payload;
766
767 for (i = 0; i < length / 4; i++)
768 map[i] = cpu_to_be32(card->topology_map[start + i]);
769
770 fw_send_response(card, request, RCODE_COMPLETE);
771}
772
773static struct fw_address_handler topology_map = {
774 .length = 0x200,
775 .address_callback = handle_topology_map,
776};
777
778const struct fw_address_region registers_region =
779 { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
780
781static void
782handle_registers(struct fw_card *card, struct fw_request *request,
783 int tcode, int destination, int source,
784 int generation, int speed,
785 unsigned long long offset,
786 void *payload, size_t length, void *callback_data)
787{
788 int reg = offset - CSR_REGISTER_BASE;
789 unsigned long long bus_time;
790 __be32 *data = payload;
791
792 switch (reg) {
793 case CSR_CYCLE_TIME:
794 case CSR_BUS_TIME:
795 if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
796 fw_send_response(card, request, RCODE_TYPE_ERROR);
797 break;
798 }
799
800 bus_time = card->driver->get_bus_time(card);
801 if (reg == CSR_CYCLE_TIME)
802 *data = cpu_to_be32(bus_time);
803 else
804 *data = cpu_to_be32(bus_time >> 25);
805 fw_send_response(card, request, RCODE_COMPLETE);
806 break;
807
808 case CSR_BUS_MANAGER_ID:
809 case CSR_BANDWIDTH_AVAILABLE:
810 case CSR_CHANNELS_AVAILABLE_HI:
811 case CSR_CHANNELS_AVAILABLE_LO:
812 /*
813 * FIXME: these are handled by the OHCI hardware and
814 * the stack never sees these request. If we add
815 * support for a new type of controller that doesn't
816 * handle this in hardware we need to deal with these
817 * transactions.
818 */
819 BUG();
820 break;
821
822 case CSR_BUSY_TIMEOUT:
823 /* FIXME: Implement this. */
824 default:
825 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
826 break;
827 }
828}
829
830static struct fw_address_handler registers = {
831 .length = 0x400,
832 .address_callback = handle_registers,
833};
834
835MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
836MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
837MODULE_LICENSE("GPL");
838
839static const u32 vendor_textual_descriptor[] = {
840 /* textual descriptor leaf () */
841 0x00060000,
842 0x00000000,
843 0x00000000,
844 0x4c696e75, /* L i n u */
845 0x78204669, /* x F i */
846 0x72657769, /* r e w i */
847 0x72650000, /* r e */
848};
849
850static const u32 model_textual_descriptor[] = {
851 /* model descriptor leaf () */
852 0x00030000,
853 0x00000000,
854 0x00000000,
855 0x4a756a75, /* J u j u */
856};
857
858static struct fw_descriptor vendor_id_descriptor = {
859 .length = ARRAY_SIZE(vendor_textual_descriptor),
860 .immediate = 0x03d00d1e,
861 .key = 0x81000000,
862 .data = vendor_textual_descriptor,
863};
864
865static struct fw_descriptor model_id_descriptor = {
866 .length = ARRAY_SIZE(model_textual_descriptor),
867 .immediate = 0x17000001,
868 .key = 0x81000000,
869 .data = model_textual_descriptor,
870};
871
872static int __init fw_core_init(void)
873{
874 int retval;
875
876 retval = bus_register(&fw_bus_type);
877 if (retval < 0)
878 return retval;
879
880 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
881 if (fw_cdev_major < 0) {
882 bus_unregister(&fw_bus_type);
883 return fw_cdev_major;
884 }
885
886 retval = fw_core_add_address_handler(&topology_map,
887 &topology_map_region);
888 BUG_ON(retval < 0);
889
890 retval = fw_core_add_address_handler(&registers,
891 &registers_region);
892 BUG_ON(retval < 0);
893
894 /* Add the vendor textual descriptor. */
895 retval = fw_core_add_descriptor(&vendor_id_descriptor);
896 BUG_ON(retval < 0);
897 retval = fw_core_add_descriptor(&model_id_descriptor);
898 BUG_ON(retval < 0);
899
900 return 0;
901}
902
903static void __exit fw_core_cleanup(void)
904{
905 unregister_chrdev(fw_cdev_major, "firewire");
906 bus_unregister(&fw_bus_type);
907}
908
909module_init(fw_core_init);
910module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
new file mode 100644
index 000000000000..acdc3be38c61
--- /dev/null
+++ b/drivers/firewire/fw-transaction.h
@@ -0,0 +1,458 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_transaction_h
20#define __fw_transaction_h
21
22#include <linux/device.h>
23#include <linux/timer.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/fs.h>
27#include <linux/dma-mapping.h>
28#include <linux/firewire-constants.h>
29
30#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
31#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
32#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
33#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
34#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
35#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
36
37#define LOCAL_BUS 0xffc0
38
39#define SELFID_PORT_CHILD 0x3
40#define SELFID_PORT_PARENT 0x2
41#define SELFID_PORT_NCONN 0x1
42#define SELFID_PORT_NONE 0x0
43
44#define PHY_PACKET_CONFIG 0x0
45#define PHY_PACKET_LINK_ON 0x1
46#define PHY_PACKET_SELF_ID 0x2
47
48/* Bit fields _within_ the PHY registers. */
49#define PHY_LINK_ACTIVE 0x80
50#define PHY_CONTENDER 0x40
51#define PHY_BUS_RESET 0x40
52#define PHY_BUS_SHORT_RESET 0x40
53
54#define CSR_REGISTER_BASE 0xfffff0000000ULL
55
56/* register offsets relative to CSR_REGISTER_BASE */
57#define CSR_STATE_CLEAR 0x0
58#define CSR_STATE_SET 0x4
59#define CSR_NODE_IDS 0x8
60#define CSR_RESET_START 0xc
61#define CSR_SPLIT_TIMEOUT_HI 0x18
62#define CSR_SPLIT_TIMEOUT_LO 0x1c
63#define CSR_CYCLE_TIME 0x200
64#define CSR_BUS_TIME 0x204
65#define CSR_BUSY_TIMEOUT 0x210
66#define CSR_BUS_MANAGER_ID 0x21c
67#define CSR_BANDWIDTH_AVAILABLE 0x220
68#define CSR_CHANNELS_AVAILABLE 0x224
69#define CSR_CHANNELS_AVAILABLE_HI 0x224
70#define CSR_CHANNELS_AVAILABLE_LO 0x228
71#define CSR_BROADCAST_CHANNEL 0x234
72#define CSR_CONFIG_ROM 0x400
73#define CSR_CONFIG_ROM_END 0x800
74#define CSR_FCP_COMMAND 0xB00
75#define CSR_FCP_RESPONSE 0xD00
76#define CSR_FCP_END 0xF00
77#define CSR_TOPOLOGY_MAP 0x1000
78#define CSR_TOPOLOGY_MAP_END 0x1400
79#define CSR_SPEED_MAP 0x2000
80#define CSR_SPEED_MAP_END 0x3000
81
82#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
83#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
84#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
85
86static inline void
87fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
88{
89 u32 *dst = _dst;
90 u32 *src = _src;
91 int i;
92
93 for (i = 0; i < size / 4; i++)
94 dst[i] = cpu_to_be32(src[i]);
95}
96
97static inline void
98fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
99{
100 fw_memcpy_from_be32(_dst, _src, size);
101}
102
103struct fw_card;
104struct fw_packet;
105struct fw_node;
106struct fw_request;
107
108struct fw_descriptor {
109 struct list_head link;
110 size_t length;
111 u32 immediate;
112 u32 key;
113 const u32 *data;
114};
115
116int fw_core_add_descriptor(struct fw_descriptor *desc);
117void fw_core_remove_descriptor(struct fw_descriptor *desc);
118
119typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
120 struct fw_card *card, int status);
121
122typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
123 void *data,
124 size_t length,
125 void *callback_data);
126
127typedef void (*fw_address_callback_t)(struct fw_card *card,
128 struct fw_request *request,
129 int tcode, int destination, int source,
130 int generation, int speed,
131 unsigned long long offset,
132 void *data, size_t length,
133 void *callback_data);
134
135typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
136 int node_id, int generation,
137 u32 *self_ids,
138 int self_id_count,
139 void *callback_data);
140
141struct fw_packet {
142 int speed;
143 int generation;
144 u32 header[4];
145 size_t header_length;
146 void *payload;
147 size_t payload_length;
148 u32 timestamp;
149
150 /*
151 * This callback is called when the packet transmission has
152 * completed; for successful transmission, the status code is
153 * the ack received from the destination, otherwise it's a
154 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
155 * The callback can be called from tasklet context and thus
156 * must never block.
157 */
158 fw_packet_callback_t callback;
159 int ack;
160 struct list_head link;
161 void *driver_data;
162};
163
164struct fw_transaction {
165 int node_id; /* The generation is implied; it is always the current. */
166 int tlabel;
167 int timestamp;
168 struct list_head link;
169
170 struct fw_packet packet;
171
172 /*
173 * The data passed to the callback is valid only during the
174 * callback.
175 */
176 fw_transaction_callback_t callback;
177 void *callback_data;
178};
179
180static inline struct fw_packet *
181fw_packet(struct list_head *l)
182{
183 return list_entry(l, struct fw_packet, link);
184}
185
186struct fw_address_handler {
187 u64 offset;
188 size_t length;
189 fw_address_callback_t address_callback;
190 void *callback_data;
191 struct list_head link;
192};
193
194
195struct fw_address_region {
196 u64 start;
197 u64 end;
198};
199
200extern const struct fw_address_region fw_low_memory_region;
201extern const struct fw_address_region fw_high_memory_region;
202extern const struct fw_address_region fw_private_region;
203extern const struct fw_address_region fw_csr_region;
204extern const struct fw_address_region fw_unit_space_region;
205
206int fw_core_add_address_handler(struct fw_address_handler *handler,
207 const struct fw_address_region *region);
208void fw_core_remove_address_handler(struct fw_address_handler *handler);
209void fw_fill_response(struct fw_packet *response, u32 *request_header,
210 int rcode, void *payload, size_t length);
211void fw_send_response(struct fw_card *card,
212 struct fw_request *request, int rcode);
213
214extern struct bus_type fw_bus_type;
215
216struct fw_card {
217 const struct fw_card_driver *driver;
218 struct device *device;
219 struct kref kref;
220
221 int node_id;
222 int generation;
223 /* This is the generation used for timestamping incoming requests. */
224 int request_generation;
225 int current_tlabel, tlabel_mask;
226 struct list_head transaction_list;
227 struct timer_list flush_timer;
228 unsigned long reset_jiffies;
229
230 unsigned long long guid;
231 int max_receive;
232 int link_speed;
233 int config_rom_generation;
234
235 /*
236 * We need to store up to 4 self ID for a maximum of 63
237 * devices plus 3 words for the topology map header.
238 */
239 int self_id_count;
240 u32 topology_map[252 + 3];
241
242 spinlock_t lock; /* Take this lock when handling the lists in
243 * this struct. */
244 struct fw_node *local_node;
245 struct fw_node *root_node;
246 struct fw_node *irm_node;
247 int color;
248 int gap_count;
249 int topology_type;
250
251 int index;
252
253 struct list_head link;
254
255 /* Work struct for BM duties. */
256 struct delayed_work work;
257 int bm_retries;
258 int bm_generation;
259};
260
261struct fw_card *fw_card_get(struct fw_card *card);
262void fw_card_put(struct fw_card *card);
263
264/*
265 * The iso packet format allows for an immediate header/payload part
266 * stored in 'header' immediately after the packet info plus an
267 * indirect payload part that is pointer to by the 'payload' field.
268 * Applications can use one or the other or both to implement simple
269 * low-bandwidth streaming (e.g. audio) or more advanced
270 * scatter-gather streaming (e.g. assembling video frame automatically).
271 */
272
273struct fw_iso_packet {
274 u16 payload_length; /* Length of indirect payload. */
275 u32 interrupt : 1; /* Generate interrupt on this packet */
276 u32 skip : 1; /* Set to not send packet at all. */
277 u32 tag : 2;
278 u32 sy : 4;
279 u32 header_length : 8; /* Length of immediate header. */
280 u32 header[0];
281};
282
283#define FW_ISO_CONTEXT_TRANSMIT 0
284#define FW_ISO_CONTEXT_RECEIVE 1
285
286#define FW_ISO_CONTEXT_MATCH_TAG0 1
287#define FW_ISO_CONTEXT_MATCH_TAG1 2
288#define FW_ISO_CONTEXT_MATCH_TAG2 4
289#define FW_ISO_CONTEXT_MATCH_TAG3 8
290#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
291
292struct fw_iso_context;
293
294typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
295 u32 cycle,
296 size_t header_length,
297 void *header,
298 void *data);
299
300/*
301 * An iso buffer is just a set of pages mapped for DMA in the
302 * specified direction. Since the pages are to be used for DMA, they
303 * are not mapped into the kernel virtual address space. We store the
304 * DMA address in the page private. The helper function
305 * fw_iso_buffer_map() will map the pages into a given vma.
306 */
307
308struct fw_iso_buffer {
309 enum dma_data_direction direction;
310 struct page **pages;
311 int page_count;
312};
313
314struct fw_iso_context {
315 struct fw_card *card;
316 int type;
317 int channel;
318 int speed;
319 size_t header_size;
320 fw_iso_callback_t callback;
321 void *callback_data;
322};
323
324int
325fw_iso_buffer_init(struct fw_iso_buffer *buffer,
326 struct fw_card *card,
327 int page_count,
328 enum dma_data_direction direction);
329int
330fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
331void
332fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
333
334struct fw_iso_context *
335fw_iso_context_create(struct fw_card *card, int type,
336 int channel, int speed, size_t header_size,
337 fw_iso_callback_t callback, void *callback_data);
338
339void
340fw_iso_context_destroy(struct fw_iso_context *ctx);
341
342int
343fw_iso_context_queue(struct fw_iso_context *ctx,
344 struct fw_iso_packet *packet,
345 struct fw_iso_buffer *buffer,
346 unsigned long payload);
347
348int
349fw_iso_context_start(struct fw_iso_context *ctx,
350 int cycle, int sync, int tags);
351
352int
353fw_iso_context_stop(struct fw_iso_context *ctx);
354
355struct fw_card_driver {
356 const char *name;
357
358 /*
359 * Enable the given card with the given initial config rom.
360 * This function is expected to activate the card, and either
361 * enable the PHY or set the link_on bit and initiate a bus
362 * reset.
363 */
364 int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
365
366 int (*update_phy_reg)(struct fw_card *card, int address,
367 int clear_bits, int set_bits);
368
369 /*
370 * Update the config rom for an enabled card. This function
371 * should change the config rom that is presented on the bus
372 * an initiate a bus reset.
373 */
374 int (*set_config_rom)(struct fw_card *card,
375 u32 *config_rom, size_t length);
376
377 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
378 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
379 /* Calling cancel is valid once a packet has been submitted. */
380 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
381
382 /*
383 * Allow the specified node ID to do direct DMA out and in of
384 * host memory. The card will disable this for all node when
385 * a bus reset happens, so driver need to reenable this after
386 * bus reset. Returns 0 on success, -ENODEV if the card
387 * doesn't support this, -ESTALE if the generation doesn't
388 * match.
389 */
390 int (*enable_phys_dma)(struct fw_card *card,
391 int node_id, int generation);
392
393 u64 (*get_bus_time)(struct fw_card *card);
394
395 struct fw_iso_context *
396 (*allocate_iso_context)(struct fw_card *card,
397 int type, size_t header_size);
398 void (*free_iso_context)(struct fw_iso_context *ctx);
399
400 int (*start_iso)(struct fw_iso_context *ctx,
401 s32 cycle, u32 sync, u32 tags);
402
403 int (*queue_iso)(struct fw_iso_context *ctx,
404 struct fw_iso_packet *packet,
405 struct fw_iso_buffer *buffer,
406 unsigned long payload);
407
408 int (*stop_iso)(struct fw_iso_context *ctx);
409};
410
411int
412fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
413
414void
415fw_send_request(struct fw_card *card, struct fw_transaction *t,
416 int tcode, int node_id, int generation, int speed,
417 unsigned long long offset,
418 void *data, size_t length,
419 fw_transaction_callback_t callback, void *callback_data);
420
421int fw_cancel_transaction(struct fw_card *card,
422 struct fw_transaction *transaction);
423
424void fw_flush_transactions(struct fw_card *card);
425
426void fw_send_phy_config(struct fw_card *card,
427 int node_id, int generation, int gap_count);
428
429/*
430 * Called by the topology code to inform the device code of node
431 * activity; found, lost, or updated nodes.
432 */
433void
434fw_node_event(struct fw_card *card, struct fw_node *node, int event);
435
436/* API used by card level drivers */
437
438void
439fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
440 struct device *device);
441int
442fw_card_add(struct fw_card *card,
443 u32 max_receive, u32 link_speed, u64 guid);
444
445void
446fw_core_remove_card(struct fw_card *card);
447
448void
449fw_core_handle_bus_reset(struct fw_card *card,
450 int node_id, int generation,
451 int self_id_count, u32 *self_ids);
452void
453fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
454
455void
456fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
457
458#endif /* __fw_transaction_h */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 3ba3a5221c41..4d1cb5b855d1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig HWMON 5menuconfig HWMON
6 tristate "Hardware Monitoring support" 6 tristate "Hardware Monitoring support"
7 depends on HAS_IOMEM
7 default y 8 default y
8 help 9 help
9 Hardware monitoring devices let you monitor the hardware health 10 Hardware monitoring devices let you monitor the hardware health
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/hwmon/ams/ams-input.c
index 18210164e307..ca7095d96ad0 100644
--- a/drivers/hwmon/ams/ams-input.c
+++ b/drivers/hwmon/ams/ams-input.c
@@ -87,7 +87,7 @@ static void ams_input_enable(void)
87 ams_info.idev->id.vendor = 0; 87 ams_info.idev->id.vendor = 0;
88 ams_info.idev->open = ams_input_open; 88 ams_info.idev->open = ams_input_open;
89 ams_info.idev->close = ams_input_close; 89 ams_info.idev->close = ams_input_close;
90 ams_info.idev->cdev.dev = &ams_info.of_dev->dev; 90 ams_info.idev->dev.parent = &ams_info.of_dev->dev;
91 91
92 input_set_abs_params(ams_info.idev, ABS_X, -50, 50, 3, 0); 92 input_set_abs_params(ams_info.idev, ABS_X, -50, 50, 3, 0);
93 input_set_abs_params(ams_info.idev, ABS_Y, -50, 50, 3, 0); 93 input_set_abs_params(ams_info.idev, ABS_Y, -50, 50, 3, 0);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index b51c104a28a2..0c160675b3ac 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1100,7 +1100,7 @@ static int applesmc_create_accelerometer(void)
1100 /* initialize the input class */ 1100 /* initialize the input class */
1101 applesmc_idev->name = "applesmc"; 1101 applesmc_idev->name = "applesmc";
1102 applesmc_idev->id.bustype = BUS_HOST; 1102 applesmc_idev->id.bustype = BUS_HOST;
1103 applesmc_idev->cdev.dev = &pdev->dev; 1103 applesmc_idev->dev.parent = &pdev->dev;
1104 applesmc_idev->evbit[0] = BIT(EV_ABS); 1104 applesmc_idev->evbit[0] = BIT(EV_ABS);
1105 applesmc_idev->open = applesmc_idev_open; 1105 applesmc_idev->open = applesmc_idev_open;
1106 applesmc_idev->close = applesmc_idev_close; 1106 applesmc_idev->close = applesmc_idev_close;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 03b1f650d1c4..75e3911810a3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -309,9 +309,11 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
309 309
310 switch (action) { 310 switch (action) {
311 case CPU_ONLINE: 311 case CPU_ONLINE:
312 case CPU_ONLINE_FROZEN:
312 coretemp_device_add(cpu); 313 coretemp_device_add(cpu);
313 break; 314 break;
314 case CPU_DEAD: 315 case CPU_DEAD:
316 case CPU_DEAD_FROZEN:
315 coretemp_device_remove(cpu); 317 coretemp_device_remove(cpu);
316 break; 318 break;
317 } 319 }
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index f82fa2d23f95..e0cf5e6fe5bc 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -574,7 +574,7 @@ static int __init hdaps_init(void)
574 574
575 /* initialize the input class */ 575 /* initialize the input class */
576 hdaps_idev->name = "hdaps"; 576 hdaps_idev->name = "hdaps";
577 hdaps_idev->cdev.dev = &pdev->dev; 577 hdaps_idev->dev.parent = &pdev->dev;
578 hdaps_idev->evbit[0] = BIT(EV_ABS); 578 hdaps_idev->evbit[0] = BIT(EV_ABS);
579 input_set_abs_params(hdaps_idev, ABS_X, 579 input_set_abs_params(hdaps_idev, ABS_X,
580 -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT); 580 -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 434a61b415a3..96867347bcbf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig I2C 5menuconfig I2C
6 tristate "I2C support" 6 tristate "I2C support"
7 depends on HAS_IOMEM
7 ---help--- 8 ---help---
8 I2C (pronounce: I-square-C) is a slow serial bus protocol used in 9 I2C (pronounce: I-square-C) is a slow serial bus protocol used in
9 many micro controller applications and developed by Philips. SMBus, 10 many micro controller applications and developed by Philips. SMBus,
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index f35156c58922..9c8b6d5eaec9 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/version.h> 17#include <linux/version.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/err.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
@@ -226,13 +227,14 @@ static int __devinit at91_i2c_probe(struct platform_device *pdev)
226 adapter->algo = &at91_algorithm; 227 adapter->algo = &at91_algorithm;
227 adapter->class = I2C_CLASS_HWMON; 228 adapter->class = I2C_CLASS_HWMON;
228 adapter->dev.parent = &pdev->dev; 229 adapter->dev.parent = &pdev->dev;
230 /* adapter->id == 0 ... only one TWI controller for now */
229 231
230 platform_set_drvdata(pdev, adapter); 232 platform_set_drvdata(pdev, adapter);
231 233
232 clk_enable(twi_clk); /* enable peripheral clock */ 234 clk_enable(twi_clk); /* enable peripheral clock */
233 at91_twi_hwinit(); /* initialize TWI controller */ 235 at91_twi_hwinit(); /* initialize TWI controller */
234 236
235 rc = i2c_add_adapter(adapter); 237 rc = i2c_add_numbered_adapter(adapter);
236 if (rc) { 238 if (rc) {
237 dev_err(&pdev->dev, "Adapter %s registration failed\n", 239 dev_err(&pdev->dev, "Adapter %s registration failed\n",
238 adapter->name); 240 adapter->name);
@@ -295,6 +297,9 @@ static int at91_i2c_resume(struct platform_device *pdev)
295#define at91_i2c_resume NULL 297#define at91_i2c_resume NULL
296#endif 298#endif
297 299
300/* work with "modprobe at91_i2c" from hotplugging or coldplugging */
301MODULE_ALIAS("at91_i2c");
302
298static struct platform_driver at91_i2c_driver = { 303static struct platform_driver at91_i2c_driver = {
299 .probe = at91_i2c_probe, 304 .probe = at91_i2c_probe,
300 .remove = __devexit_p(at91_i2c_remove), 305 .remove = __devexit_p(at91_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 873544ab598e..8a0a99b93641 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -548,7 +548,7 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
548 */ 548 */
549 icr = readl(_ICR(i2c)); 549 icr = readl(_ICR(i2c));
550 icr &= ~(ICR_STOP | ICR_ACKNAK); 550 icr &= ~(ICR_STOP | ICR_ACKNAK);
551 writel(icr, _IRC(i2c)); 551 writel(icr, _ICR(i2c));
552} 552}
553 553
554/* 554/*
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 7ed92dc3d833..3c3f2ebf3fc9 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -354,7 +354,7 @@ static void tps65010_interrupt(struct tps65010 *tps)
354 * also needs to get error handling and probably 354 * also needs to get error handling and probably
355 * an #ifdef CONFIG_SOFTWARE_SUSPEND 355 * an #ifdef CONFIG_SOFTWARE_SUSPEND
356 */ 356 */
357 pm_suspend(PM_SUSPEND_DISK); 357 hibernate();
358#endif 358#endif
359 poll = 1; 359 poll = 1;
360 } 360 }
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 5bdf64b77913..9040809d2c25 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -7,6 +7,7 @@
7if BLOCK 7if BLOCK
8 8
9menu "ATA/ATAPI/MFM/RLL support" 9menu "ATA/ATAPI/MFM/RLL support"
10 depends on HAS_IOMEM
10 11
11config IDE 12config IDE
12 tristate "ATA/ATAPI/MFM/RLL support" 13 tristate "ATA/ATAPI/MFM/RLL support"
@@ -291,6 +292,17 @@ config IDE_TASK_IOCTL
291 292
292 If you are unsure, say N here. 293 If you are unsure, say N here.
293 294
295config IDE_PROC_FS
296 bool "legacy /proc/ide/ support"
297 depends on IDE && PROC_FS
298 default y
299 help
300 This option enables support for the various files in
301 /proc/ide. In Linux 2.6 this has been superseded by
302 files in sysfs but many legacy applications rely on this.
303
304 If unsure say Y.
305
294comment "IDE chipset support/bugfixes" 306comment "IDE chipset support/bugfixes"
295 307
296config IDE_GENERIC 308config IDE_GENERIC
@@ -360,6 +372,9 @@ config IDEPCI_SHARE_IRQ
360 It is safe to say Y to this question, in most cases. 372 It is safe to say Y to this question, in most cases.
361 If unsure, say N. 373 If unsure, say N.
362 374
375config IDEPCI_PCIBUS_ORDER
376 def_bool PCI && BLK_DEV_IDE=y && BLK_DEV_IDEPCI
377
363config BLK_DEV_OFFBOARD 378config BLK_DEV_OFFBOARD
364 bool "Boot off-board chipsets first support" 379 bool "Boot off-board chipsets first support"
365 depends on PCI && BLK_DEV_IDEPCI 380 depends on PCI && BLK_DEV_IDEPCI
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index d9f029e8ff74..75dc6969e0a7 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -20,7 +20,7 @@ ide-core-$(CONFIG_BLK_DEV_CMD640) += pci/cmd640.o
20# Core IDE code - must come before legacy 20# Core IDE code - must come before legacy
21ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o 21ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
22ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o 22ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
23ide-core-$(CONFIG_PROC_FS) += ide-proc.o 23ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
24ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o 24ide-core-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
25ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o 25ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
26 26
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index 9d474e5fd8dc..f7449d04114a 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -45,7 +45,7 @@ bastide_register(unsigned int base, unsigned int aux, int irq,
45 hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); 45 hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
46 hw.irq = irq; 46 hw.irq = irq;
47 47
48 ide_register_hw(&hw, hwif); 48 ide_register_hw(&hw, 0, hwif);
49 49
50 return 0; 50 return 0;
51} 51}
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index e2953fc1fafb..1fe0457243db 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -342,7 +342,7 @@ static int icside_dma_check(ide_drive_t *drive)
342 * Enable DMA on any drive that has multiword DMA 342 * Enable DMA on any drive that has multiword DMA
343 */ 343 */
344 if (id->field_valid & 2) { 344 if (id->field_valid & 2) {
345 xfer_mode = ide_dma_speed(drive, 0); 345 xfer_mode = ide_max_dma_mode(drive);
346 goto out; 346 goto out;
347 } 347 }
348 348
@@ -591,7 +591,8 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
591 state->hwif[0] = hwif; 591 state->hwif[0] = hwif;
592 592
593 probe_hwif_init(hwif); 593 probe_hwif_init(hwif);
594 create_proc_ide_interfaces(); 594
595 ide_proc_register_port(hwif);
595 596
596 return 0; 597 return 0;
597} 598}
@@ -679,7 +680,9 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
679 680
680 probe_hwif_init(hwif); 681 probe_hwif_init(hwif);
681 probe_hwif_init(mate); 682 probe_hwif_init(mate);
682 create_proc_ide_interfaces(); 683
684 ide_proc_register_port(hwif);
685 ide_proc_register_port(mate);
683 686
684 return 0; 687 return 0;
685 688
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/arm/ide_arm.c
index 23488c4d1fcd..a3d6744e870a 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/arm/ide_arm.c
@@ -38,6 +38,6 @@ void __init ide_arm_init(void)
38 memset(&hw, 0, sizeof(hw)); 38 memset(&hw, 0, sizeof(hw));
39 ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206); 39 ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
40 hw.irq = IDE_ARM_IRQ; 40 hw.irq = IDE_ARM_IRQ;
41 ide_register_hw(&hw, NULL); 41 ide_register_hw(&hw, 1, NULL);
42 } 42 }
43} 43}
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c
index 9c6c49fdd2b1..890ea3fac3c6 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/arm/rapide.c
@@ -76,7 +76,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
76 hwif->gendev.parent = &ec->dev; 76 hwif->gendev.parent = &ec->dev;
77 hwif->noprobe = 0; 77 hwif->noprobe = 0;
78 probe_hwif_init(hwif); 78 probe_hwif_init(hwif);
79 create_proc_ide_interfaces(); 79 ide_proc_register_port(hwif);
80 ecard_set_drvdata(ec, hwif); 80 ecard_set_drvdata(ec, hwif);
81 goto out; 81 goto out;
82 } 82 }
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 5e8efc89255a..c04cb25a01ff 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -796,7 +796,7 @@ init_e100_ide (void)
796 ide_offsets, 796 ide_offsets,
797 0, 0, cris_ide_ack_intr, 797 0, 0, cris_ide_ack_intr,
798 ide_default_irq(0)); 798 ide_default_irq(0));
799 ide_register_hw(&hw, &hwif); 799 ide_register_hw(&hw, 1, &hwif);
800 hwif->mmio = 1; 800 hwif->mmio = 1;
801 hwif->chipset = ide_etrax100; 801 hwif->chipset = ide_etrax100;
802 hwif->tuneproc = &tune_cris_ide; 802 hwif->tuneproc = &tune_cris_ide;
@@ -1004,7 +1004,7 @@ static int cris_ide_build_dmatable (ide_drive_t *drive)
1004 1004
1005static int cris_config_drive_for_dma (ide_drive_t *drive) 1005static int cris_config_drive_for_dma (ide_drive_t *drive)
1006{ 1006{
1007 u8 speed = ide_dma_speed(drive, 1); 1007 u8 speed = ide_max_dma_mode(drive);
1008 1008
1009 if (!speed) 1009 if (!speed)
1010 return 0; 1010 return 0;
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
index 88750a300337..6d26ad7360d5 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/h8300/ide-h8300.c
@@ -101,7 +101,7 @@ void __init h8300_ide_init(void)
101 hw_setup(&hw); 101 hw_setup(&hw);
102 102
103 /* register if */ 103 /* register if */
104 idx = ide_register_hw(&hw, &hwif); 104 idx = ide_register_hw(&hw, 1, &hwif);
105 if (idx == -1) { 105 if (idx == -1) {
106 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); 106 printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
107 return; 107 return;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 638becda81c6..252ab8295edf 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -3059,10 +3059,14 @@ int ide_cdrom_probe_capabilities (ide_drive_t *drive)
3059 return nslots; 3059 return nslots;
3060} 3060}
3061 3061
3062#ifdef CONFIG_IDE_PROC_FS
3062static void ide_cdrom_add_settings(ide_drive_t *drive) 3063static void ide_cdrom_add_settings(ide_drive_t *drive)
3063{ 3064{
3064 ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); 3065 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
3065} 3066}
3067#else
3068static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
3069#endif
3066 3070
3067/* 3071/*
3068 * standard prep_rq_fn that builds 10 byte cmds 3072 * standard prep_rq_fn that builds 10 byte cmds
@@ -3274,7 +3278,7 @@ int ide_cdrom_setup (ide_drive_t *drive)
3274 return 0; 3278 return 0;
3275} 3279}
3276 3280
3277#ifdef CONFIG_PROC_FS 3281#ifdef CONFIG_IDE_PROC_FS
3278static 3282static
3279sector_t ide_cdrom_capacity (ide_drive_t *drive) 3283sector_t ide_cdrom_capacity (ide_drive_t *drive)
3280{ 3284{
@@ -3291,7 +3295,7 @@ static void ide_cd_remove(ide_drive_t *drive)
3291{ 3295{
3292 struct cdrom_info *info = drive->driver_data; 3296 struct cdrom_info *info = drive->driver_data;
3293 3297
3294 ide_unregister_subdriver(drive, info->driver); 3298 ide_proc_unregister_driver(drive, info->driver);
3295 3299
3296 del_gendisk(info->disk); 3300 del_gendisk(info->disk);
3297 3301
@@ -3321,7 +3325,7 @@ static void ide_cd_release(struct kref *kref)
3321 3325
3322static int ide_cd_probe(ide_drive_t *); 3326static int ide_cd_probe(ide_drive_t *);
3323 3327
3324#ifdef CONFIG_PROC_FS 3328#ifdef CONFIG_IDE_PROC_FS
3325static int proc_idecd_read_capacity 3329static int proc_idecd_read_capacity
3326 (char *page, char **start, off_t off, int count, int *eof, void *data) 3330 (char *page, char **start, off_t off, int count, int *eof, void *data)
3327{ 3331{
@@ -3336,8 +3340,6 @@ static ide_proc_entry_t idecd_proc[] = {
3336 { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL }, 3340 { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
3337 { NULL, 0, NULL, NULL } 3341 { NULL, 0, NULL, NULL }
3338}; 3342};
3339#else
3340# define idecd_proc NULL
3341#endif 3343#endif
3342 3344
3343static ide_driver_t ide_cdrom_driver = { 3345static ide_driver_t ide_cdrom_driver = {
@@ -3355,7 +3357,9 @@ static ide_driver_t ide_cdrom_driver = {
3355 .end_request = ide_end_request, 3357 .end_request = ide_end_request,
3356 .error = __ide_error, 3358 .error = __ide_error,
3357 .abort = __ide_abort, 3359 .abort = __ide_abort,
3360#ifdef CONFIG_IDE_PROC_FS
3358 .proc = idecd_proc, 3361 .proc = idecd_proc,
3362#endif
3359}; 3363};
3360 3364
3361static int idecd_open(struct inode * inode, struct file * file) 3365static int idecd_open(struct inode * inode, struct file * file)
@@ -3517,7 +3521,7 @@ static int ide_cd_probe(ide_drive_t *drive)
3517 3521
3518 ide_init_disk(g, drive); 3522 ide_init_disk(g, drive);
3519 3523
3520 ide_register_subdriver(drive, &ide_cdrom_driver); 3524 ide_proc_register_driver(drive, &ide_cdrom_driver);
3521 3525
3522 kref_init(&info->kref); 3526 kref_init(&info->kref);
3523 3527
@@ -3534,7 +3538,7 @@ static int ide_cd_probe(ide_drive_t *drive)
3534 g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; 3538 g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
3535 if (ide_cdrom_setup(drive)) { 3539 if (ide_cdrom_setup(drive)) {
3536 struct cdrom_device_info *devinfo = &info->devinfo; 3540 struct cdrom_device_info *devinfo = &info->devinfo;
3537 ide_unregister_subdriver(drive, &ide_cdrom_driver); 3541 ide_proc_unregister_driver(drive, &ide_cdrom_driver);
3538 kfree(info->buffer); 3542 kfree(info->buffer);
3539 kfree(info->toc); 3543 kfree(info->toc);
3540 kfree(info->changer_info); 3544 kfree(info->changer_info);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37aa6ddd9702..7fff773f2df7 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -559,8 +559,7 @@ static sector_t idedisk_capacity (ide_drive_t *drive)
559 return drive->capacity64 - drive->sect0; 559 return drive->capacity64 - drive->sect0;
560} 560}
561 561
562#ifdef CONFIG_PROC_FS 562#ifdef CONFIG_IDE_PROC_FS
563
564static int smart_enable(ide_drive_t *drive) 563static int smart_enable(ide_drive_t *drive)
565{ 564{
566 ide_task_t args; 565 ide_task_t args;
@@ -678,12 +677,7 @@ static ide_proc_entry_t idedisk_proc[] = {
678 { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL }, 677 { "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL },
679 { NULL, 0, NULL, NULL } 678 { NULL, 0, NULL, NULL }
680}; 679};
681 680#endif /* CONFIG_IDE_PROC_FS */
682#else
683
684#define idedisk_proc NULL
685
686#endif /* CONFIG_PROC_FS */
687 681
688static void idedisk_prepare_flush(request_queue_t *q, struct request *rq) 682static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
689{ 683{
@@ -737,6 +731,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
737{ 731{
738 struct request rq; 732 struct request rq;
739 733
734 if (arg < 0 || arg > drive->id->max_multsect)
735 return -EINVAL;
736
740 if (drive->special.b.set_multmode) 737 if (drive->special.b.set_multmode)
741 return -EBUSY; 738 return -EBUSY;
742 ide_init_drive_cmd (&rq); 739 ide_init_drive_cmd (&rq);
@@ -749,6 +746,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
749 746
750static int set_nowerr(ide_drive_t *drive, int arg) 747static int set_nowerr(ide_drive_t *drive, int arg)
751{ 748{
749 if (arg < 0 || arg > 1)
750 return -EINVAL;
751
752 if (ide_spin_wait_hwgroup(drive)) 752 if (ide_spin_wait_hwgroup(drive))
753 return -EBUSY; 753 return -EBUSY;
754 drive->nowerr = arg; 754 drive->nowerr = arg;
@@ -800,6 +800,9 @@ static int write_cache(ide_drive_t *drive, int arg)
800 ide_task_t args; 800 ide_task_t args;
801 int err = 1; 801 int err = 1;
802 802
803 if (arg < 0 || arg > 1)
804 return -EINVAL;
805
803 if (ide_id_has_flush_cache(drive->id)) { 806 if (ide_id_has_flush_cache(drive->id)) {
804 memset(&args, 0, sizeof(ide_task_t)); 807 memset(&args, 0, sizeof(ide_task_t));
805 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? 808 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
@@ -835,6 +838,9 @@ static int set_acoustic (ide_drive_t *drive, int arg)
835{ 838{
836 ide_task_t args; 839 ide_task_t args;
837 840
841 if (arg < 0 || arg > 254)
842 return -EINVAL;
843
838 memset(&args, 0, sizeof(ide_task_t)); 844 memset(&args, 0, sizeof(ide_task_t));
839 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_AAM : 845 args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_AAM :
840 SETFEATURES_DIS_AAM; 846 SETFEATURES_DIS_AAM;
@@ -855,6 +861,9 @@ static int set_acoustic (ide_drive_t *drive, int arg)
855 */ 861 */
856static int set_lba_addressing(ide_drive_t *drive, int arg) 862static int set_lba_addressing(ide_drive_t *drive, int arg)
857{ 863{
864 if (arg < 0 || arg > 2)
865 return -EINVAL;
866
858 drive->addressing = 0; 867 drive->addressing = 0;
859 868
860 if (HWIF(drive)->no_lba48) 869 if (HWIF(drive)->no_lba48)
@@ -866,23 +875,27 @@ static int set_lba_addressing(ide_drive_t *drive, int arg)
866 return 0; 875 return 0;
867} 876}
868 877
878#ifdef CONFIG_IDE_PROC_FS
869static void idedisk_add_settings(ide_drive_t *drive) 879static void idedisk_add_settings(ide_drive_t *drive)
870{ 880{
871 struct hd_driveid *id = drive->id; 881 struct hd_driveid *id = drive->id;
872 882
873 ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL); 883 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->bios_cyl, NULL);
874 ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); 884 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
875 ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); 885 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
876 ide_add_setting(drive, "address", SETTING_RW, HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, TYPE_INTA, 0, 2, 1, 1, &drive->addressing, set_lba_addressing); 886 ide_add_setting(drive, "address", SETTING_RW, TYPE_BYTE, 0, 2, 1, 1, &drive->addressing, set_lba_addressing);
877 ide_add_setting(drive, "bswap", SETTING_READ, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL); 887 ide_add_setting(drive, "bswap", SETTING_READ, TYPE_BYTE, 0, 1, 1, 1, &drive->bswap, NULL);
878 ide_add_setting(drive, "multcount", id ? SETTING_RW : SETTING_READ, HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, TYPE_BYTE, 0, id ? id->max_multsect : 0, 1, 1, &drive->mult_count, set_multcount); 888 ide_add_setting(drive, "multcount", SETTING_RW, TYPE_BYTE, 0, id->max_multsect, 1, 1, &drive->mult_count, set_multcount);
879 ide_add_setting(drive, "nowerr", SETTING_RW, HDIO_GET_NOWERR, HDIO_SET_NOWERR, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr); 889 ide_add_setting(drive, "nowerr", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nowerr, set_nowerr);
880 ide_add_setting(drive, "lun", SETTING_RW, -1, -1, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL); 890 ide_add_setting(drive, "lun", SETTING_RW, TYPE_INT, 0, 7, 1, 1, &drive->lun, NULL);
881 ide_add_setting(drive, "wcache", SETTING_RW, HDIO_GET_WCACHE, HDIO_SET_WCACHE, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache); 891 ide_add_setting(drive, "wcache", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->wcache, write_cache);
882 ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic); 892 ide_add_setting(drive, "acoustic", SETTING_RW, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
883 ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL); 893 ide_add_setting(drive, "failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
884 ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL); 894 ide_add_setting(drive, "max_failures", SETTING_RW, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
885} 895}
896#else
897static inline void idedisk_add_settings(ide_drive_t *drive) { ; }
898#endif
886 899
887static void idedisk_setup (ide_drive_t *drive) 900static void idedisk_setup (ide_drive_t *drive)
888{ 901{
@@ -1001,7 +1014,7 @@ static void ide_disk_remove(ide_drive_t *drive)
1001 struct ide_disk_obj *idkp = drive->driver_data; 1014 struct ide_disk_obj *idkp = drive->driver_data;
1002 struct gendisk *g = idkp->disk; 1015 struct gendisk *g = idkp->disk;
1003 1016
1004 ide_unregister_subdriver(drive, idkp->driver); 1017 ide_proc_unregister_driver(drive, idkp->driver);
1005 1018
1006 del_gendisk(g); 1019 del_gendisk(g);
1007 1020
@@ -1066,7 +1079,9 @@ static ide_driver_t idedisk_driver = {
1066 .end_request = ide_end_request, 1079 .end_request = ide_end_request,
1067 .error = __ide_error, 1080 .error = __ide_error,
1068 .abort = __ide_abort, 1081 .abort = __ide_abort,
1082#ifdef CONFIG_IDE_PROC_FS
1069 .proc = idedisk_proc, 1083 .proc = idedisk_proc,
1084#endif
1070}; 1085};
1071 1086
1072static int idedisk_open(struct inode *inode, struct file *filp) 1087static int idedisk_open(struct inode *inode, struct file *filp)
@@ -1140,9 +1155,49 @@ static int idedisk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1140static int idedisk_ioctl(struct inode *inode, struct file *file, 1155static int idedisk_ioctl(struct inode *inode, struct file *file,
1141 unsigned int cmd, unsigned long arg) 1156 unsigned int cmd, unsigned long arg)
1142{ 1157{
1158 unsigned long flags;
1143 struct block_device *bdev = inode->i_bdev; 1159 struct block_device *bdev = inode->i_bdev;
1144 struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk); 1160 struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk);
1145 return generic_ide_ioctl(idkp->drive, file, bdev, cmd, arg); 1161 ide_drive_t *drive = idkp->drive;
1162 int err, (*setfunc)(ide_drive_t *, int);
1163 u8 *val;
1164
1165 switch (cmd) {
1166 case HDIO_GET_ADDRESS: val = &drive->addressing; goto read_val;
1167 case HDIO_GET_MULTCOUNT: val = &drive->mult_count; goto read_val;
1168 case HDIO_GET_NOWERR: val = &drive->nowerr; goto read_val;
1169 case HDIO_GET_WCACHE: val = &drive->wcache; goto read_val;
1170 case HDIO_GET_ACOUSTIC: val = &drive->acoustic; goto read_val;
1171 case HDIO_SET_ADDRESS: setfunc = set_lba_addressing; goto set_val;
1172 case HDIO_SET_MULTCOUNT: setfunc = set_multcount; goto set_val;
1173 case HDIO_SET_NOWERR: setfunc = set_nowerr; goto set_val;
1174 case HDIO_SET_WCACHE: setfunc = write_cache; goto set_val;
1175 case HDIO_SET_ACOUSTIC: setfunc = set_acoustic; goto set_val;
1176 }
1177
1178 return generic_ide_ioctl(drive, file, bdev, cmd, arg);
1179
1180read_val:
1181 down(&ide_setting_sem);
1182 spin_lock_irqsave(&ide_lock, flags);
1183 err = *val;
1184 spin_unlock_irqrestore(&ide_lock, flags);
1185 up(&ide_setting_sem);
1186 return err >= 0 ? put_user(err, (long __user *)arg) : err;
1187
1188set_val:
1189 if (bdev != bdev->bd_contains)
1190 err = -EINVAL;
1191 else {
1192 if (!capable(CAP_SYS_ADMIN))
1193 err = -EACCES;
1194 else {
1195 down(&ide_setting_sem);
1196 err = setfunc(drive, arg);
1197 up(&ide_setting_sem);
1198 }
1199 }
1200 return err;
1146} 1201}
1147 1202
1148static int idedisk_media_changed(struct gendisk *disk) 1203static int idedisk_media_changed(struct gendisk *disk)
@@ -1202,7 +1257,7 @@ static int ide_disk_probe(ide_drive_t *drive)
1202 1257
1203 ide_init_disk(g, drive); 1258 ide_init_disk(g, drive);
1204 1259
1205 ide_register_subdriver(drive, &idedisk_driver); 1260 ide_proc_register_driver(drive, &idedisk_driver);
1206 1261
1207 kref_init(&idkp->kref); 1262 kref_init(&idkp->kref);
1208 1263
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index fd213088b06b..5fe85191d49c 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -705,6 +705,100 @@ int ide_use_dma(ide_drive_t *drive)
705 705
706EXPORT_SYMBOL_GPL(ide_use_dma); 706EXPORT_SYMBOL_GPL(ide_use_dma);
707 707
708static const u8 xfer_mode_bases[] = {
709 XFER_UDMA_0,
710 XFER_MW_DMA_0,
711 XFER_SW_DMA_0,
712};
713
714static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base)
715{
716 struct hd_driveid *id = drive->id;
717 ide_hwif_t *hwif = drive->hwif;
718 unsigned int mask = 0;
719
720 switch(base) {
721 case XFER_UDMA_0:
722 if ((id->field_valid & 4) == 0)
723 break;
724
725 mask = id->dma_ultra & hwif->ultra_mask;
726
727 if (hwif->udma_filter)
728 mask &= hwif->udma_filter(drive);
729
730 if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
731 mask &= 0x07;
732 break;
733 case XFER_MW_DMA_0:
734 mask = id->dma_mword & hwif->mwdma_mask;
735 break;
736 case XFER_SW_DMA_0:
737 mask = id->dma_1word & hwif->swdma_mask;
738 break;
739 default:
740 BUG();
741 break;
742 }
743
744 return mask;
745}
746
747/**
748 * ide_max_dma_mode - compute DMA speed
749 * @drive: IDE device
750 *
751 * Checks the drive capabilities and returns the speed to use
752 * for the DMA transfer. Returns 0 if the drive is incapable
753 * of DMA transfers.
754 */
755
756u8 ide_max_dma_mode(ide_drive_t *drive)
757{
758 ide_hwif_t *hwif = drive->hwif;
759 unsigned int mask;
760 int x, i;
761 u8 mode = 0;
762
763 if (drive->media != ide_disk && hwif->atapi_dma == 0)
764 return 0;
765
766 for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
767 mask = ide_get_mode_mask(drive, xfer_mode_bases[i]);
768 x = fls(mask) - 1;
769 if (x >= 0) {
770 mode = xfer_mode_bases[i] + x;
771 break;
772 }
773 }
774
775 printk(KERN_DEBUG "%s: selected mode 0x%x\n", drive->name, mode);
776
777 return mode;
778}
779
780EXPORT_SYMBOL_GPL(ide_max_dma_mode);
781
782int ide_tune_dma(ide_drive_t *drive)
783{
784 u8 speed;
785
786 /* TODO: use only ide_max_dma_mode() */
787 if (!ide_use_dma(drive))
788 return 0;
789
790 speed = ide_max_dma_mode(drive);
791
792 if (!speed)
793 return 0;
794
795 drive->hwif->speedproc(drive, speed);
796
797 return ide_dma_enable(drive);
798}
799
800EXPORT_SYMBOL_GPL(ide_tune_dma);
801
708void ide_dma_verbose(ide_drive_t *drive) 802void ide_dma_verbose(ide_drive_t *drive)
709{ 803{
710 struct hd_driveid *id = drive->id; 804 struct hd_driveid *id = drive->id;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 57cd21c5b2c1..f429be88c4f9 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1811,18 +1811,22 @@ static int idefloppy_identify_device (ide_drive_t *drive,struct hd_driveid *id)
1811 return 0; 1811 return 0;
1812} 1812}
1813 1813
1814#ifdef CONFIG_IDE_PROC_FS
1814static void idefloppy_add_settings(ide_drive_t *drive) 1815static void idefloppy_add_settings(ide_drive_t *drive)
1815{ 1816{
1816 idefloppy_floppy_t *floppy = drive->driver_data; 1817 idefloppy_floppy_t *floppy = drive->driver_data;
1817 1818
1818/* 1819/*
1819 * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function 1820 * drive setting name read/write data type min max mul_factor div_factor data pointer set function
1820 */ 1821 */
1821 ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); 1822 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
1822 ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); 1823 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
1823 ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); 1824 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
1824 ide_add_setting(drive, "ticks", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL); 1825 ide_add_setting(drive, "ticks", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &floppy->ticks, NULL);
1825} 1826}
1827#else
1828static inline void idefloppy_add_settings(ide_drive_t *drive) { ; }
1829#endif
1826 1830
1827/* 1831/*
1828 * Driver initialization. 1832 * Driver initialization.
@@ -1873,7 +1877,7 @@ static void ide_floppy_remove(ide_drive_t *drive)
1873 idefloppy_floppy_t *floppy = drive->driver_data; 1877 idefloppy_floppy_t *floppy = drive->driver_data;
1874 struct gendisk *g = floppy->disk; 1878 struct gendisk *g = floppy->disk;
1875 1879
1876 ide_unregister_subdriver(drive, floppy->driver); 1880 ide_proc_unregister_driver(drive, floppy->driver);
1877 1881
1878 del_gendisk(g); 1882 del_gendisk(g);
1879 1883
@@ -1892,8 +1896,7 @@ static void ide_floppy_release(struct kref *kref)
1892 kfree(floppy); 1896 kfree(floppy);
1893} 1897}
1894 1898
1895#ifdef CONFIG_PROC_FS 1899#ifdef CONFIG_IDE_PROC_FS
1896
1897static int proc_idefloppy_read_capacity 1900static int proc_idefloppy_read_capacity
1898 (char *page, char **start, off_t off, int count, int *eof, void *data) 1901 (char *page, char **start, off_t off, int count, int *eof, void *data)
1899{ 1902{
@@ -1909,12 +1912,7 @@ static ide_proc_entry_t idefloppy_proc[] = {
1909 { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL }, 1912 { "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
1910 { NULL, 0, NULL, NULL } 1913 { NULL, 0, NULL, NULL }
1911}; 1914};
1912 1915#endif /* CONFIG_IDE_PROC_FS */
1913#else
1914
1915#define idefloppy_proc NULL
1916
1917#endif /* CONFIG_PROC_FS */
1918 1916
1919static int ide_floppy_probe(ide_drive_t *); 1917static int ide_floppy_probe(ide_drive_t *);
1920 1918
@@ -1933,7 +1931,9 @@ static ide_driver_t idefloppy_driver = {
1933 .end_request = idefloppy_do_end_request, 1931 .end_request = idefloppy_do_end_request,
1934 .error = __ide_error, 1932 .error = __ide_error,
1935 .abort = __ide_abort, 1933 .abort = __ide_abort,
1934#ifdef CONFIG_IDE_PROC_FS
1936 .proc = idefloppy_proc, 1935 .proc = idefloppy_proc,
1936#endif
1937}; 1937};
1938 1938
1939static int idefloppy_open(struct inode *inode, struct file *filp) 1939static int idefloppy_open(struct inode *inode, struct file *filp)
@@ -2159,7 +2159,7 @@ static int ide_floppy_probe(ide_drive_t *drive)
2159 2159
2160 ide_init_disk(g, drive); 2160 ide_init_disk(g, drive);
2161 2161
2162 ide_register_subdriver(drive, &idefloppy_driver); 2162 ide_proc_register_driver(drive, &idefloppy_driver);
2163 2163
2164 kref_init(&floppy->kref); 2164 kref_init(&floppy->kref);
2165 2165
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 99fd56151131..0f72b98d727f 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -22,8 +22,6 @@ static int __init ide_generic_init(void)
22 if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET]) 22 if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
23 ide_release_lock(); /* for atari only */ 23 ide_release_lock(); /* for atari only */
24 24
25 create_proc_ide_interfaces();
26
27 return 0; 25 return 0;
28} 26}
29 27
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8670112f1d39..8e568143d90d 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -172,15 +172,6 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
172 172
173 memset(args, 0, sizeof(*args)); 173 memset(args, 0, sizeof(*args));
174 174
175 if (drive->media != ide_disk) {
176 /*
177 * skip idedisk_pm_restore_pio and idedisk_pm_idle for ATAPI
178 * devices
179 */
180 if (pm->pm_step == idedisk_pm_restore_pio)
181 pm->pm_step = ide_pm_restore_dma;
182 }
183
184 switch (pm->pm_step) { 175 switch (pm->pm_step) {
185 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */ 176 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */
186 if (drive->media != ide_disk) 177 if (drive->media != ide_disk)
@@ -207,7 +198,13 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
207 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ 198 case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */
208 if (drive->hwif->tuneproc != NULL) 199 if (drive->hwif->tuneproc != NULL)
209 drive->hwif->tuneproc(drive, 255); 200 drive->hwif->tuneproc(drive, 255);
210 ide_complete_power_step(drive, rq, 0, 0); 201 /*
202 * skip idedisk_pm_idle for ATAPI devices
203 */
204 if (drive->media != ide_disk)
205 pm->pm_step = ide_pm_restore_dma;
206 else
207 ide_complete_power_step(drive, rq, 0, 0);
211 return ide_stopped; 208 return ide_stopped;
212 209
213 case idedisk_pm_idle: /* Resume step 2 (idle) */ 210 case idedisk_pm_idle: /* Resume step 2 (idle) */
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 3caa176b3155..f0be5f665a0e 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -571,51 +571,54 @@ EXPORT_SYMBOL(ide_wait_stat);
571 */ 571 */
572u8 eighty_ninty_three (ide_drive_t *drive) 572u8 eighty_ninty_three (ide_drive_t *drive)
573{ 573{
574 if(HWIF(drive)->udma_four == 0) 574 ide_hwif_t *hwif = drive->hwif;
575 return 0; 575 struct hd_driveid *id = drive->id;
576
577 if (hwif->udma_four == 0)
578 goto no_80w;
576 579
577 /* Check for SATA but only if we are ATA5 or higher */ 580 /* Check for SATA but only if we are ATA5 or higher */
578 if (drive->id->hw_config == 0 && (drive->id->major_rev_num & 0x7FE0)) 581 if (id->hw_config == 0 && (id->major_rev_num & 0x7FE0))
579 return 1; 582 return 1;
580 if (!(drive->id->hw_config & 0x6000)) 583
581 return 0;
582#ifndef CONFIG_IDEDMA_IVB
583 if(!(drive->id->hw_config & 0x4000))
584 return 0;
585#endif /* CONFIG_IDEDMA_IVB */
586 /* 584 /*
587 * FIXME: 585 * FIXME:
588 * - change master/slave IDENTIFY order 586 * - change master/slave IDENTIFY order
589 * - force bit13 (80c cable present) check 587 * - force bit13 (80c cable present) check
590 * (unless the slave device is pre-ATA3) 588 * (unless the slave device is pre-ATA3)
591 */ 589 */
592 return 1; 590#ifndef CONFIG_IDEDMA_IVB
593} 591 if (id->hw_config & 0x4000)
592#else
593 if (id->hw_config & 0x6000)
594#endif
595 return 1;
596
597no_80w:
598 if (drive->udma33_warned == 1)
599 return 0;
594 600
595EXPORT_SYMBOL(eighty_ninty_three); 601 printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
602 "limiting max speed to UDMA33\n",
603 drive->name, hwif->udma_four ? "drive" : "host");
604
605 drive->udma33_warned = 1;
606
607 return 0;
608}
596 609
597int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) 610int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
598{ 611{
599 if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && 612 if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
600 (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) && 613 (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
601 (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) { 614 (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
602#ifndef CONFIG_IDEDMA_IVB 615 if (eighty_ninty_three(drive) == 0) {
603 if ((drive->id->hw_config & 0x6000) == 0) { 616 printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
604#else /* !CONFIG_IDEDMA_IVB */ 617 "be set\n", drive->name);
605 if (((drive->id->hw_config & 0x2000) == 0) ||
606 ((drive->id->hw_config & 0x4000) == 0)) {
607#endif /* CONFIG_IDEDMA_IVB */
608 printk("%s: Speed warnings UDMA 3/4/5 is not "
609 "functional.\n", drive->name);
610 return 1;
611 }
612 if (!HWIF(drive)->udma_four) {
613 printk("%s: Speed warnings UDMA 3/4/5 is not "
614 "functional.\n",
615 HWIF(drive)->name);
616 return 1; 618 return 1;
617 } 619 }
618 } 620 }
621
619 return 0; 622 return 0;
620} 623}
621 624
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 68719314df3f..3be3c69383f2 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -69,123 +69,41 @@ char *ide_xfer_verbose (u8 xfer_rate)
69EXPORT_SYMBOL(ide_xfer_verbose); 69EXPORT_SYMBOL(ide_xfer_verbose);
70 70
71/** 71/**
72 * ide_dma_speed - compute DMA speed 72 * ide_rate_filter - filter transfer mode
73 * @drive: drive 73 * @drive: IDE device
74 * @mode: modes available
75 *
76 * Checks the drive capabilities and returns the speed to use
77 * for the DMA transfer. Returns 0 if the drive is incapable
78 * of DMA transfers.
79 */
80
81u8 ide_dma_speed(ide_drive_t *drive, u8 mode)
82{
83 struct hd_driveid *id = drive->id;
84 ide_hwif_t *hwif = HWIF(drive);
85 u8 ultra_mask, mwdma_mask, swdma_mask;
86 u8 speed = 0;
87
88 if (drive->media != ide_disk && hwif->atapi_dma == 0)
89 return 0;
90
91 /* Capable of UltraDMA modes? */
92 ultra_mask = id->dma_ultra & hwif->ultra_mask;
93
94 if (!(id->field_valid & 4))
95 mode = 0; /* fallback to MW/SW DMA if no UltraDMA */
96
97 switch (mode) {
98 case 4:
99 if (ultra_mask & 0x40) {
100 speed = XFER_UDMA_6;
101 break;
102 }
103 case 3:
104 if (ultra_mask & 0x20) {
105 speed = XFER_UDMA_5;
106 break;
107 }
108 case 2:
109 if (ultra_mask & 0x10) {
110 speed = XFER_UDMA_4;
111 break;
112 }
113 if (ultra_mask & 0x08) {
114 speed = XFER_UDMA_3;
115 break;
116 }
117 case 1:
118 if (ultra_mask & 0x04) {
119 speed = XFER_UDMA_2;
120 break;
121 }
122 if (ultra_mask & 0x02) {
123 speed = XFER_UDMA_1;
124 break;
125 }
126 if (ultra_mask & 0x01) {
127 speed = XFER_UDMA_0;
128 break;
129 }
130 case 0:
131 mwdma_mask = id->dma_mword & hwif->mwdma_mask;
132
133 if (mwdma_mask & 0x04) {
134 speed = XFER_MW_DMA_2;
135 break;
136 }
137 if (mwdma_mask & 0x02) {
138 speed = XFER_MW_DMA_1;
139 break;
140 }
141 if (mwdma_mask & 0x01) {
142 speed = XFER_MW_DMA_0;
143 break;
144 }
145
146 swdma_mask = id->dma_1word & hwif->swdma_mask;
147
148 if (swdma_mask & 0x04) {
149 speed = XFER_SW_DMA_2;
150 break;
151 }
152 if (swdma_mask & 0x02) {
153 speed = XFER_SW_DMA_1;
154 break;
155 }
156 if (swdma_mask & 0x01) {
157 speed = XFER_SW_DMA_0;
158 break;
159 }
160 }
161
162 return speed;
163}
164EXPORT_SYMBOL(ide_dma_speed);
165
166
167/**
168 * ide_rate_filter - return best speed for mode
169 * @mode: modes available
170 * @speed: desired speed 74 * @speed: desired speed
171 * 75 *
172 * Given the available DMA/UDMA mode this function returns 76 * Given the available transfer modes this function returns
173 * the best available speed at or below the speed requested. 77 * the best available speed at or below the speed requested.
78 *
79 * FIXME: filter also PIO/SWDMA/MWDMA modes
174 */ 80 */
175 81
176u8 ide_rate_filter (u8 mode, u8 speed) 82u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
177{ 83{
178#ifdef CONFIG_BLK_DEV_IDEDMA 84#ifdef CONFIG_BLK_DEV_IDEDMA
179 static u8 speed_max[] = { 85 ide_hwif_t *hwif = drive->hwif;
180 XFER_MW_DMA_2, XFER_UDMA_2, XFER_UDMA_4, 86 u8 mask = hwif->ultra_mask, mode = XFER_MW_DMA_2;
181 XFER_UDMA_5, XFER_UDMA_6 87
182 }; 88 if (hwif->udma_filter)
89 mask = hwif->udma_filter(drive);
90
91 /*
92 * TODO: speed > XFER_UDMA_2 extra check is needed to avoid false
93 * cable warning from eighty_ninty_three(), moving ide_rate_filter()
94 * calls from ->speedproc to core code will make this hack go away
95 */
96 if (speed > XFER_UDMA_2) {
97 if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
98 mask &= 0x07;
99 }
100
101 if (mask)
102 mode = fls(mask) - 1 + XFER_UDMA_0;
183 103
184// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed); 104// printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed);
185 105
186 /* So that we remember to update this if new modes appear */ 106 return min(speed, mode);
187 BUG_ON(mode > 4);
188 return min(speed, speed_max[mode]);
189#else /* !CONFIG_BLK_DEV_IDEDMA */ 107#else /* !CONFIG_BLK_DEV_IDEDMA */
190 return min(speed, (u8)XFER_PIO_4); 108 return min(speed, (u8)XFER_PIO_4);
191#endif /* CONFIG_BLK_DEV_IDEDMA */ 109#endif /* CONFIG_BLK_DEV_IDEDMA */
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index 98410ca044cf..2b8009c50e91 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -42,7 +42,7 @@ static int idepnp_probe(struct pnp_dev * dev, const struct pnp_device_id *dev_id
42 hw.irq = pnp_irq(dev, 0); 42 hw.irq = pnp_irq(dev, 0);
43 hw.dma = NO_DMA; 43 hw.dma = NO_DMA;
44 44
45 index = ide_register_hw(&hw, &hwif); 45 index = ide_register_hw(&hw, 1, &hwif);
46 46
47 if (index != -1) { 47 if (index != -1) {
48 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index); 48 printk(KERN_INFO "ide%d: generic PnP IDE interface\n", index);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 8f15c23aa70d..3cebed77f55d 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1427,6 +1427,9 @@ int ideprobe_init (void)
1427 } 1427 }
1428 } 1428 }
1429 } 1429 }
1430 for (index = 0; index < MAX_HWIFS; ++index)
1431 if (probe[index])
1432 ide_proc_register_port(&ide_hwifs[index]);
1430 return 0; 1433 return 0;
1431} 1434}
1432 1435
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index a9e0b30fb1f2..d50bd996ff22 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Copyright (C) 1997-1998 Mark Lord 4 * Copyright (C) 1997-1998 Mark Lord
5 * Copyright (C) 2003 Red Hat <alan@redhat.com> 5 * Copyright (C) 2003 Red Hat <alan@redhat.com>
6 *
7 * Some code was moved here from ide.c, see it for original copyrights.
6 */ 8 */
7 9
8/* 10/*
@@ -37,6 +39,8 @@
37 39
38#include <asm/io.h> 40#include <asm/io.h>
39 41
42static struct proc_dir_entry *proc_ide_root;
43
40static int proc_ide_read_imodel 44static int proc_ide_read_imodel
41 (char *page, char **start, off_t off, int count, int *eof, void *data) 45 (char *page, char **start, off_t off, int count, int *eof, void *data)
42{ 46{
@@ -121,6 +125,265 @@ static int proc_ide_read_identify
121 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 125 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
122} 126}
123 127
128/**
129 * __ide_add_setting - add an ide setting option
130 * @drive: drive to use
131 * @name: setting name
132 * @rw: true if the function is read write
133 * @data_type: type of data
134 * @min: range minimum
135 * @max: range maximum
136 * @mul_factor: multiplication scale
137 * @div_factor: divison scale
138 * @data: private data field
139 * @set: setting
140 * @auto_remove: setting auto removal flag
141 *
142 * Removes the setting named from the device if it is present.
143 * The function takes the settings_lock to protect against
144 * parallel changes. This function must not be called from IRQ
145 * context. Returns 0 on success or -1 on failure.
146 *
147 * BUGS: This code is seriously over-engineered. There is also
148 * magic about how the driver specific features are setup. If
149 * a driver is attached we assume the driver settings are auto
150 * remove.
151 */
152
153static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove)
154{
155 ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
156
157 down(&ide_setting_sem);
158 while ((*p) && strcmp((*p)->name, name) < 0)
159 p = &((*p)->next);
160 if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL)
161 goto abort;
162 if ((setting->name = kmalloc(strlen(name) + 1, GFP_KERNEL)) == NULL)
163 goto abort;
164 strcpy(setting->name, name);
165 setting->rw = rw;
166 setting->data_type = data_type;
167 setting->min = min;
168 setting->max = max;
169 setting->mul_factor = mul_factor;
170 setting->div_factor = div_factor;
171 setting->data = data;
172 setting->set = set;
173
174 setting->next = *p;
175 if (auto_remove)
176 setting->auto_remove = 1;
177 *p = setting;
178 up(&ide_setting_sem);
179 return 0;
180abort:
181 up(&ide_setting_sem);
182 kfree(setting);
183 return -1;
184}
185
186int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set)
187{
188 return __ide_add_setting(drive, name, rw, data_type, min, max, mul_factor, div_factor, data, set, 1);
189}
190
191EXPORT_SYMBOL(ide_add_setting);
192
193/**
194 * __ide_remove_setting - remove an ide setting option
195 * @drive: drive to use
196 * @name: setting name
197 *
198 * Removes the setting named from the device if it is present.
199 * The caller must hold the setting semaphore.
200 */
201
202static void __ide_remove_setting (ide_drive_t *drive, char *name)
203{
204 ide_settings_t **p, *setting;
205
206 p = (ide_settings_t **) &drive->settings;
207
208 while ((*p) && strcmp((*p)->name, name))
209 p = &((*p)->next);
210 if ((setting = (*p)) == NULL)
211 return;
212
213 (*p) = setting->next;
214
215 kfree(setting->name);
216 kfree(setting);
217}
218
219/**
220 * auto_remove_settings - remove driver specific settings
221 * @drive: drive
222 *
223 * Automatically remove all the driver specific settings for this
224 * drive. This function may not be called from IRQ context. The
225 * caller must hold ide_setting_sem.
226 */
227
228static void auto_remove_settings (ide_drive_t *drive)
229{
230 ide_settings_t *setting;
231repeat:
232 setting = drive->settings;
233 while (setting) {
234 if (setting->auto_remove) {
235 __ide_remove_setting(drive, setting->name);
236 goto repeat;
237 }
238 setting = setting->next;
239 }
240}
241
242/**
243 * ide_find_setting_by_name - find a drive specific setting
244 * @drive: drive to scan
245 * @name: setting name
246 *
247 * Scan's the device setting table for a matching entry and returns
248 * this or NULL if no entry is found. The caller must hold the
249 * setting semaphore
250 */
251
252static ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name)
253{
254 ide_settings_t *setting = drive->settings;
255
256 while (setting) {
257 if (strcmp(setting->name, name) == 0)
258 break;
259 setting = setting->next;
260 }
261 return setting;
262}
263
264/**
265 * ide_read_setting - read an IDE setting
266 * @drive: drive to read from
267 * @setting: drive setting
268 *
269 * Read a drive setting and return the value. The caller
270 * must hold the ide_setting_sem when making this call.
271 *
272 * BUGS: the data return and error are the same return value
273 * so an error -EINVAL and true return of the same value cannot
274 * be told apart
275 */
276
277static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
278{
279 int val = -EINVAL;
280 unsigned long flags;
281
282 if ((setting->rw & SETTING_READ)) {
283 spin_lock_irqsave(&ide_lock, flags);
284 switch(setting->data_type) {
285 case TYPE_BYTE:
286 val = *((u8 *) setting->data);
287 break;
288 case TYPE_SHORT:
289 val = *((u16 *) setting->data);
290 break;
291 case TYPE_INT:
292 val = *((u32 *) setting->data);
293 break;
294 }
295 spin_unlock_irqrestore(&ide_lock, flags);
296 }
297 return val;
298}
299
300/**
301 * ide_write_setting - read an IDE setting
302 * @drive: drive to read from
303 * @setting: drive setting
304 * @val: value
305 *
306 * Write a drive setting if it is possible. The caller
307 * must hold the ide_setting_sem when making this call.
308 *
309 * BUGS: the data return and error are the same return value
310 * so an error -EINVAL and true return of the same value cannot
311 * be told apart
312 *
313 * FIXME: This should be changed to enqueue a special request
314 * to the driver to change settings, and then wait on a sema for completion.
315 * The current scheme of polling is kludgy, though safe enough.
316 */
317
318static int ide_write_setting(ide_drive_t *drive, ide_settings_t *setting, int val)
319{
320 if (!capable(CAP_SYS_ADMIN))
321 return -EACCES;
322 if (setting->set)
323 return setting->set(drive, val);
324 if (!(setting->rw & SETTING_WRITE))
325 return -EPERM;
326 if (val < setting->min || val > setting->max)
327 return -EINVAL;
328 if (ide_spin_wait_hwgroup(drive))
329 return -EBUSY;
330 switch (setting->data_type) {
331 case TYPE_BYTE:
332 *((u8 *) setting->data) = val;
333 break;
334 case TYPE_SHORT:
335 *((u16 *) setting->data) = val;
336 break;
337 case TYPE_INT:
338 *((u32 *) setting->data) = val;
339 break;
340 }
341 spin_unlock_irq(&ide_lock);
342 return 0;
343}
344
345static int set_xfer_rate (ide_drive_t *drive, int arg)
346{
347 int err;
348
349 if (arg < 0 || arg > 70)
350 return -EINVAL;
351
352 err = ide_wait_cmd(drive,
353 WIN_SETFEATURES, (u8) arg,
354 SETFEATURES_XFER, 0, NULL);
355
356 if (!err && arg) {
357 ide_set_xfer_rate(drive, (u8) arg);
358 ide_driveid_update(drive);
359 }
360 return err;
361}
362
363/**
364 * ide_add_generic_settings - generic ide settings
365 * @drive: drive being configured
366 *
367 * Add the generic parts of the system settings to the /proc files.
368 * The caller must not be holding the ide_setting_sem.
369 */
370
371void ide_add_generic_settings (ide_drive_t *drive)
372{
373/*
374 * drive setting name read/write access data type min max mul_factor div_factor data pointer set function
375 */
376 __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0);
377 __ide_add_setting(drive, "keepsettings", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0);
378 __ide_add_setting(drive, "nice1", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0);
379 __ide_add_setting(drive, "pio_mode", SETTING_WRITE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0);
380 __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0);
381 __ide_add_setting(drive, "using_dma", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0);
382 __ide_add_setting(drive, "init_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0);
383 __ide_add_setting(drive, "current_speed", SETTING_RW, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0);
384 __ide_add_setting(drive, "number", SETTING_RW, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0);
385}
386
124static void proc_ide_settings_warn(void) 387static void proc_ide_settings_warn(void)
125{ 388{
126 static int warned = 0; 389 static int warned = 0;
@@ -399,7 +662,7 @@ static ide_proc_entry_t generic_drive_entries[] = {
399 { NULL, 0, NULL, NULL } 662 { NULL, 0, NULL, NULL }
400}; 663};
401 664
402void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data) 665static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
403{ 666{
404 struct proc_dir_entry *ent; 667 struct proc_dir_entry *ent;
405 668
@@ -415,7 +678,7 @@ void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void
415 } 678 }
416} 679}
417 680
418void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p) 681static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
419{ 682{
420 if (!dir || !p) 683 if (!dir || !p)
421 return; 684 return;
@@ -425,6 +688,51 @@ void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
425 } 688 }
426} 689}
427 690
691void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver)
692{
693 ide_add_proc_entries(drive->proc, driver->proc, drive);
694}
695
696EXPORT_SYMBOL(ide_proc_register_driver);
697
698/**
699 * ide_proc_unregister_driver - remove driver specific data
700 * @drive: drive
701 * @driver: driver
702 *
703 * Clean up the driver specific /proc files and IDE settings
704 * for a given drive.
705 *
706 * Takes ide_setting_sem and ide_lock.
707 * Caller must hold none of the locks.
708 */
709
710void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
711{
712 unsigned long flags;
713
714 ide_remove_proc_entries(drive->proc, driver->proc);
715
716 down(&ide_setting_sem);
717 spin_lock_irqsave(&ide_lock, flags);
718 /*
719 * ide_setting_sem protects the settings list
720 * ide_lock protects the use of settings
721 *
722 * so we need to hold both, ide_settings_sem because we want to
723 * modify the settings list, and ide_lock because we cannot take
724 * a setting out that is being used.
725 *
726 * OTOH both ide_{read,write}_setting are only ever used under
727 * ide_setting_sem.
728 */
729 auto_remove_settings(drive);
730 spin_unlock_irqrestore(&ide_lock, flags);
731 up(&ide_setting_sem);
732}
733
734EXPORT_SYMBOL(ide_proc_unregister_driver);
735
428static void create_proc_ide_drives(ide_hwif_t *hwif) 736static void create_proc_ide_drives(ide_hwif_t *hwif)
429{ 737{
430 int d; 738 int d;
@@ -477,26 +785,24 @@ static ide_proc_entry_t hwif_entries[] = {
477 { NULL, 0, NULL, NULL } 785 { NULL, 0, NULL, NULL }
478}; 786};
479 787
480void create_proc_ide_interfaces(void) 788void ide_proc_register_port(ide_hwif_t *hwif)
481{ 789{
482 int h; 790 if (!hwif->present)
791 return;
483 792
484 for (h = 0; h < MAX_HWIFS; h++) { 793 if (!hwif->proc) {
485 ide_hwif_t *hwif = &ide_hwifs[h]; 794 hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
486 795
487 if (!hwif->present) 796 if (!hwif->proc)
488 continue; 797 return;
489 if (!hwif->proc) { 798
490 hwif->proc = proc_mkdir(hwif->name, proc_ide_root); 799 ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
491 if (!hwif->proc)
492 return;
493 ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
494 }
495 create_proc_ide_drives(hwif);
496 } 800 }
801
802 create_proc_ide_drives(hwif);
497} 803}
498 804
499EXPORT_SYMBOL(create_proc_ide_interfaces); 805EXPORT_SYMBOL_GPL(ide_proc_register_port);
500 806
501#ifdef CONFIG_BLK_DEV_IDEPCI 807#ifdef CONFIG_BLK_DEV_IDEPCI
502void ide_pci_create_host_proc(const char *name, get_info_t *get_info) 808void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
@@ -507,7 +813,7 @@ void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
507EXPORT_SYMBOL_GPL(ide_pci_create_host_proc); 813EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
508#endif 814#endif
509 815
510void destroy_proc_ide_interface(ide_hwif_t *hwif) 816void ide_proc_unregister_port(ide_hwif_t *hwif)
511{ 817{
512 if (hwif->proc) { 818 if (hwif->proc) {
513 destroy_proc_ide_drives(hwif); 819 destroy_proc_ide_drives(hwif);
@@ -554,11 +860,11 @@ void proc_ide_create(void)
554{ 860{
555 struct proc_dir_entry *entry; 861 struct proc_dir_entry *entry;
556 862
863 proc_ide_root = proc_mkdir("ide", NULL);
864
557 if (!proc_ide_root) 865 if (!proc_ide_root)
558 return; 866 return;
559 867
560 create_proc_ide_interfaces();
561
562 entry = create_proc_entry("drivers", 0, proc_ide_root); 868 entry = create_proc_entry("drivers", 0, proc_ide_root);
563 if (entry) 869 if (entry)
564 entry->proc_fops = &ide_drivers_operations; 870 entry->proc_fops = &ide_drivers_operations;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 4e59239fef75..e82bfa5e0ab8 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -4561,28 +4561,33 @@ static void idetape_get_blocksize_from_block_descriptor(ide_drive_t *drive)
4561 printk(KERN_INFO "ide-tape: Adjusted block size - %d\n", tape->tape_block_size); 4561 printk(KERN_INFO "ide-tape: Adjusted block size - %d\n", tape->tape_block_size);
4562#endif /* IDETAPE_DEBUG_INFO */ 4562#endif /* IDETAPE_DEBUG_INFO */
4563} 4563}
4564
4565#ifdef CONFIG_IDE_PROC_FS
4564static void idetape_add_settings (ide_drive_t *drive) 4566static void idetape_add_settings (ide_drive_t *drive)
4565{ 4567{
4566 idetape_tape_t *tape = drive->driver_data; 4568 idetape_tape_t *tape = drive->driver_data;
4567 4569
4568/* 4570/*
4569 * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function 4571 * drive setting name read/write data type min max mul_factor div_factor data pointer set function
4570 */ 4572 */
4571 ide_add_setting(drive, "buffer", SETTING_READ, -1, -1, TYPE_SHORT, 0, 0xffff, 1, 2, &tape->capabilities.buffer_size, NULL); 4573 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, 1, 2, &tape->capabilities.buffer_size, NULL);
4572 ide_add_setting(drive, "pipeline_min", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL); 4574 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
4573 ide_add_setting(drive, "pipeline", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL); 4575 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL);
4574 ide_add_setting(drive, "pipeline_max", SETTING_RW, -1, -1, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL); 4576 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
4575 ide_add_setting(drive, "pipeline_used",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL); 4577 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL);
4576 ide_add_setting(drive, "pipeline_pending",SETTING_READ,-1, -1, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL); 4578 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL);
4577 ide_add_setting(drive, "speed", SETTING_READ, -1, -1, TYPE_SHORT, 0, 0xffff, 1, 1, &tape->capabilities.speed, NULL); 4579 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, 1, 1, &tape->capabilities.speed, NULL);
4578 ide_add_setting(drive, "stage", SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL); 4580 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL);
4579 ide_add_setting(drive, "tdsc", SETTING_RW, -1, -1, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL); 4581 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL);
4580 ide_add_setting(drive, "dsc_overlap", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL); 4582 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
4581 ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL); 4583 ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL);
4582 ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed, NULL); 4584 ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed,NULL);
4583 ide_add_setting(drive, "avg_speed", SETTING_READ, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL); 4585 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL);
4584 ide_add_setting(drive, "debug_level",SETTING_RW, -1, -1, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL); 4586 ide_add_setting(drive, "debug_level", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL);
4585} 4587}
4588#else
4589static inline void idetape_add_settings(ide_drive_t *drive) { ; }
4590#endif
4586 4591
4587/* 4592/*
4588 * ide_setup is called to: 4593 * ide_setup is called to:
@@ -4703,7 +4708,7 @@ static void ide_tape_remove(ide_drive_t *drive)
4703{ 4708{
4704 idetape_tape_t *tape = drive->driver_data; 4709 idetape_tape_t *tape = drive->driver_data;
4705 4710
4706 ide_unregister_subdriver(drive, tape->driver); 4711 ide_proc_unregister_driver(drive, tape->driver);
4707 4712
4708 ide_unregister_region(tape->disk); 4713 ide_unregister_region(tape->disk);
4709 4714
@@ -4730,8 +4735,7 @@ static void ide_tape_release(struct kref *kref)
4730 kfree(tape); 4735 kfree(tape);
4731} 4736}
4732 4737
4733#ifdef CONFIG_PROC_FS 4738#ifdef CONFIG_IDE_PROC_FS
4734
4735static int proc_idetape_read_name 4739static int proc_idetape_read_name
4736 (char *page, char **start, off_t off, int count, int *eof, void *data) 4740 (char *page, char **start, off_t off, int count, int *eof, void *data)
4737{ 4741{
@@ -4749,11 +4753,6 @@ static ide_proc_entry_t idetape_proc[] = {
4749 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL }, 4753 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
4750 { NULL, 0, NULL, NULL } 4754 { NULL, 0, NULL, NULL }
4751}; 4755};
4752
4753#else
4754
4755#define idetape_proc NULL
4756
4757#endif 4756#endif
4758 4757
4759static int ide_tape_probe(ide_drive_t *); 4758static int ide_tape_probe(ide_drive_t *);
@@ -4773,7 +4772,9 @@ static ide_driver_t idetape_driver = {
4773 .end_request = idetape_end_request, 4772 .end_request = idetape_end_request,
4774 .error = __ide_error, 4773 .error = __ide_error,
4775 .abort = __ide_abort, 4774 .abort = __ide_abort,
4775#ifdef CONFIG_IDE_PROC_FS
4776 .proc = idetape_proc, 4776 .proc = idetape_proc,
4777#endif
4777}; 4778};
4778 4779
4779/* 4780/*
@@ -4864,7 +4865,7 @@ static int ide_tape_probe(ide_drive_t *drive)
4864 4865
4865 ide_init_disk(g, drive); 4866 ide_init_disk(g, drive);
4866 4867
4867 ide_register_subdriver(drive, &idetape_driver); 4868 ide_proc_register_driver(drive, &idetape_driver);
4868 4869
4869 kref_init(&tape->kref); 4870 kref_init(&tape->kref);
4870 4871
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index ae5bf2be6f52..f2b547ff7722 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -168,12 +168,11 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
168 168
169static int idebus_parameter; /* holds the "idebus=" parameter */ 169static int idebus_parameter; /* holds the "idebus=" parameter */
170static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */ 170static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */
171static int initializing; /* set while initializing built-in drivers */
172 171
173DECLARE_MUTEX(ide_cfg_sem); 172DECLARE_MUTEX(ide_cfg_sem);
174 __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); 173 __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
175 174
176#ifdef CONFIG_BLK_DEV_IDEPCI 175#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
177static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */ 176static int ide_scan_direction; /* THIS was formerly 2.2.x pci=reverse */
178#endif 177#endif
179 178
@@ -216,9 +215,6 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
216 hwif->bus_state = BUSSTATE_ON; 215 hwif->bus_state = BUSSTATE_ON;
217 216
218 hwif->atapi_dma = 0; /* disable all atapi dma */ 217 hwif->atapi_dma = 0; /* disable all atapi dma */
219 hwif->ultra_mask = 0x80; /* disable all ultra */
220 hwif->mwdma_mask = 0x80; /* disable all mwdma */
221 hwif->swdma_mask = 0x80; /* disable all swdma */
222 218
223 init_completion(&hwif->gendev_rel_comp); 219 init_completion(&hwif->gendev_rel_comp);
224 220
@@ -305,9 +301,7 @@ static void __init init_ide_data (void)
305#endif 301#endif
306 } 302 }
307#ifdef CONFIG_IDE_ARM 303#ifdef CONFIG_IDE_ARM
308 initializing = 1;
309 ide_arm_init(); 304 ide_arm_init();
310 initializing = 0;
311#endif 305#endif
312} 306}
313 307
@@ -353,10 +347,6 @@ static int ide_system_bus_speed(void)
353 return system_bus_speed; 347 return system_bus_speed;
354} 348}
355 349
356#ifdef CONFIG_PROC_FS
357struct proc_dir_entry *proc_ide_root;
358#endif
359
360static struct resource* hwif_request_region(ide_hwif_t *hwif, 350static struct resource* hwif_request_region(ide_hwif_t *hwif,
361 unsigned long addr, int num) 351 unsigned long addr, int num)
362{ 352{
@@ -480,6 +470,7 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
480 470
481 hwif->tuneproc = tmp_hwif->tuneproc; 471 hwif->tuneproc = tmp_hwif->tuneproc;
482 hwif->speedproc = tmp_hwif->speedproc; 472 hwif->speedproc = tmp_hwif->speedproc;
473 hwif->udma_filter = tmp_hwif->udma_filter;
483 hwif->selectproc = tmp_hwif->selectproc; 474 hwif->selectproc = tmp_hwif->selectproc;
484 hwif->reset_poll = tmp_hwif->reset_poll; 475 hwif->reset_poll = tmp_hwif->reset_poll;
485 hwif->pre_reset = tmp_hwif->pre_reset; 476 hwif->pre_reset = tmp_hwif->pre_reset;
@@ -599,7 +590,7 @@ void ide_unregister(unsigned int index)
599 590
600 spin_unlock_irq(&ide_lock); 591 spin_unlock_irq(&ide_lock);
601 592
602 destroy_proc_ide_interface(hwif); 593 ide_proc_unregister_port(hwif);
603 594
604 hwgroup = hwif->hwgroup; 595 hwgroup = hwif->hwgroup;
605 /* 596 /*
@@ -751,6 +742,7 @@ void ide_setup_ports ( hw_regs_t *hw,
751/** 742/**
752 * ide_register_hw_with_fixup - register IDE interface 743 * ide_register_hw_with_fixup - register IDE interface
753 * @hw: hardware registers 744 * @hw: hardware registers
745 * @initializing: set while initializing built-in drivers
754 * @hwifp: pointer to returned hwif 746 * @hwifp: pointer to returned hwif
755 * @fixup: fixup function 747 * @fixup: fixup function
756 * 748 *
@@ -760,7 +752,9 @@ void ide_setup_ports ( hw_regs_t *hw,
760 * Returns -1 on error. 752 * Returns -1 on error.
761 */ 753 */
762 754
763int ide_register_hw_with_fixup(hw_regs_t *hw, ide_hwif_t **hwifp, void(*fixup)(ide_hwif_t *hwif)) 755int ide_register_hw_with_fixup(hw_regs_t *hw, int initializing,
756 ide_hwif_t **hwifp,
757 void(*fixup)(ide_hwif_t *hwif))
764{ 758{
765 int index, retry = 1; 759 int index, retry = 1;
766 ide_hwif_t *hwif; 760 ide_hwif_t *hwif;
@@ -801,7 +795,7 @@ found:
801 795
802 if (!initializing) { 796 if (!initializing) {
803 probe_hwif_init_with_fixup(hwif, fixup); 797 probe_hwif_init_with_fixup(hwif, fixup);
804 create_proc_ide_interfaces(); 798 ide_proc_register_port(hwif);
805 } 799 }
806 800
807 if (hwifp) 801 if (hwifp)
@@ -812,9 +806,9 @@ found:
812 806
813EXPORT_SYMBOL(ide_register_hw_with_fixup); 807EXPORT_SYMBOL(ide_register_hw_with_fixup);
814 808
815int ide_register_hw(hw_regs_t *hw, ide_hwif_t **hwifp) 809int ide_register_hw(hw_regs_t *hw, int initializing, ide_hwif_t **hwifp)
816{ 810{
817 return ide_register_hw_with_fixup(hw, hwifp, NULL); 811 return ide_register_hw_with_fixup(hw, initializing, hwifp, NULL);
818} 812}
819 813
820EXPORT_SYMBOL(ide_register_hw); 814EXPORT_SYMBOL(ide_register_hw);
@@ -825,205 +819,7 @@ EXPORT_SYMBOL(ide_register_hw);
825 819
826DECLARE_MUTEX(ide_setting_sem); 820DECLARE_MUTEX(ide_setting_sem);
827 821
828/** 822EXPORT_SYMBOL_GPL(ide_setting_sem);
829 * __ide_add_setting - add an ide setting option
830 * @drive: drive to use
831 * @name: setting name
832 * @rw: true if the function is read write
833 * @read_ioctl: function to call on read
834 * @write_ioctl: function to call on write
835 * @data_type: type of data
836 * @min: range minimum
837 * @max: range maximum
838 * @mul_factor: multiplication scale
839 * @div_factor: divison scale
840 * @data: private data field
841 * @set: setting
842 * @auto_remove: setting auto removal flag
843 *
844 * Removes the setting named from the device if it is present.
845 * The function takes the settings_lock to protect against
846 * parallel changes. This function must not be called from IRQ
847 * context. Returns 0 on success or -1 on failure.
848 *
849 * BUGS: This code is seriously over-engineered. There is also
850 * magic about how the driver specific features are setup. If
851 * a driver is attached we assume the driver settings are auto
852 * remove.
853 */
854
855static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set, int auto_remove)
856{
857 ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
858
859 down(&ide_setting_sem);
860 while ((*p) && strcmp((*p)->name, name) < 0)
861 p = &((*p)->next);
862 if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL)
863 goto abort;
864 if ((setting->name = kmalloc(strlen(name) + 1, GFP_KERNEL)) == NULL)
865 goto abort;
866 strcpy(setting->name, name);
867 setting->rw = rw;
868 setting->read_ioctl = read_ioctl;
869 setting->write_ioctl = write_ioctl;
870 setting->data_type = data_type;
871 setting->min = min;
872 setting->max = max;
873 setting->mul_factor = mul_factor;
874 setting->div_factor = div_factor;
875 setting->data = data;
876 setting->set = set;
877
878 setting->next = *p;
879 if (auto_remove)
880 setting->auto_remove = 1;
881 *p = setting;
882 up(&ide_setting_sem);
883 return 0;
884abort:
885 up(&ide_setting_sem);
886 kfree(setting);
887 return -1;
888}
889
890int ide_add_setting(ide_drive_t *drive, const char *name, int rw, int read_ioctl, int write_ioctl, int data_type, int min, int max, int mul_factor, int div_factor, void *data, ide_procset_t *set)
891{
892 return __ide_add_setting(drive, name, rw, read_ioctl, write_ioctl, data_type, min, max, mul_factor, div_factor, data, set, 1);
893}
894
895EXPORT_SYMBOL(ide_add_setting);
896
897/**
898 * __ide_remove_setting - remove an ide setting option
899 * @drive: drive to use
900 * @name: setting name
901 *
902 * Removes the setting named from the device if it is present.
903 * The caller must hold the setting semaphore.
904 */
905
906static void __ide_remove_setting (ide_drive_t *drive, char *name)
907{
908 ide_settings_t **p, *setting;
909
910 p = (ide_settings_t **) &drive->settings;
911
912 while ((*p) && strcmp((*p)->name, name))
913 p = &((*p)->next);
914 if ((setting = (*p)) == NULL)
915 return;
916
917 (*p) = setting->next;
918
919 kfree(setting->name);
920 kfree(setting);
921}
922
923/**
924 * ide_find_setting_by_ioctl - find a drive specific ioctl
925 * @drive: drive to scan
926 * @cmd: ioctl command to handle
927 *
928 * Scan's the device setting table for a matching entry and returns
929 * this or NULL if no entry is found. The caller must hold the
930 * setting semaphore
931 */
932
933static ide_settings_t *ide_find_setting_by_ioctl (ide_drive_t *drive, int cmd)
934{
935 ide_settings_t *setting = drive->settings;
936
937 while (setting) {
938 if (setting->read_ioctl == cmd || setting->write_ioctl == cmd)
939 break;
940 setting = setting->next;
941 }
942
943 return setting;
944}
945
946/**
947 * ide_find_setting_by_name - find a drive specific setting
948 * @drive: drive to scan
949 * @name: setting name
950 *
951 * Scan's the device setting table for a matching entry and returns
952 * this or NULL if no entry is found. The caller must hold the
953 * setting semaphore
954 */
955
956ide_settings_t *ide_find_setting_by_name (ide_drive_t *drive, char *name)
957{
958 ide_settings_t *setting = drive->settings;
959
960 while (setting) {
961 if (strcmp(setting->name, name) == 0)
962 break;
963 setting = setting->next;
964 }
965 return setting;
966}
967
968/**
969 * auto_remove_settings - remove driver specific settings
970 * @drive: drive
971 *
972 * Automatically remove all the driver specific settings for this
973 * drive. This function may not be called from IRQ context. The
974 * caller must hold ide_setting_sem.
975 */
976
977static void auto_remove_settings (ide_drive_t *drive)
978{
979 ide_settings_t *setting;
980repeat:
981 setting = drive->settings;
982 while (setting) {
983 if (setting->auto_remove) {
984 __ide_remove_setting(drive, setting->name);
985 goto repeat;
986 }
987 setting = setting->next;
988 }
989}
990
991/**
992 * ide_read_setting - read an IDE setting
993 * @drive: drive to read from
994 * @setting: drive setting
995 *
996 * Read a drive setting and return the value. The caller
997 * must hold the ide_setting_sem when making this call.
998 *
999 * BUGS: the data return and error are the same return value
1000 * so an error -EINVAL and true return of the same value cannot
1001 * be told apart
1002 */
1003
1004int ide_read_setting (ide_drive_t *drive, ide_settings_t *setting)
1005{
1006 int val = -EINVAL;
1007 unsigned long flags;
1008
1009 if ((setting->rw & SETTING_READ)) {
1010 spin_lock_irqsave(&ide_lock, flags);
1011 switch(setting->data_type) {
1012 case TYPE_BYTE:
1013 val = *((u8 *) setting->data);
1014 break;
1015 case TYPE_SHORT:
1016 val = *((u16 *) setting->data);
1017 break;
1018 case TYPE_INT:
1019 case TYPE_INTA:
1020 val = *((u32 *) setting->data);
1021 break;
1022 }
1023 spin_unlock_irqrestore(&ide_lock, flags);
1024 }
1025 return val;
1026}
1027 823
1028/** 824/**
1029 * ide_spin_wait_hwgroup - wait for group 825 * ide_spin_wait_hwgroup - wait for group
@@ -1058,61 +854,14 @@ int ide_spin_wait_hwgroup (ide_drive_t *drive)
1058 854
1059EXPORT_SYMBOL(ide_spin_wait_hwgroup); 855EXPORT_SYMBOL(ide_spin_wait_hwgroup);
1060 856
1061/** 857int set_io_32bit(ide_drive_t *drive, int arg)
1062 * ide_write_setting - read an IDE setting
1063 * @drive: drive to read from
1064 * @setting: drive setting
1065 * @val: value
1066 *
1067 * Write a drive setting if it is possible. The caller
1068 * must hold the ide_setting_sem when making this call.
1069 *
1070 * BUGS: the data return and error are the same return value
1071 * so an error -EINVAL and true return of the same value cannot
1072 * be told apart
1073 *
1074 * FIXME: This should be changed to enqueue a special request
1075 * to the driver to change settings, and then wait on a sema for completion.
1076 * The current scheme of polling is kludgy, though safe enough.
1077 */
1078
1079int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val)
1080{ 858{
1081 int i; 859 if (drive->no_io_32bit)
1082 u32 *p;
1083
1084 if (!capable(CAP_SYS_ADMIN))
1085 return -EACCES;
1086 if (!(setting->rw & SETTING_WRITE))
1087 return -EPERM; 860 return -EPERM;
1088 if (val < setting->min || val > setting->max) 861
862 if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
1089 return -EINVAL; 863 return -EINVAL;
1090 if (setting->set)
1091 return setting->set(drive, val);
1092 if (ide_spin_wait_hwgroup(drive))
1093 return -EBUSY;
1094 switch (setting->data_type) {
1095 case TYPE_BYTE:
1096 *((u8 *) setting->data) = val;
1097 break;
1098 case TYPE_SHORT:
1099 *((u16 *) setting->data) = val;
1100 break;
1101 case TYPE_INT:
1102 *((u32 *) setting->data) = val;
1103 break;
1104 case TYPE_INTA:
1105 p = (u32 *) setting->data;
1106 for (i = 0; i < 1 << PARTN_BITS; i++, p++)
1107 *p = val;
1108 break;
1109 }
1110 spin_unlock_irq(&ide_lock);
1111 return 0;
1112}
1113 864
1114static int set_io_32bit(ide_drive_t *drive, int arg)
1115{
1116 drive->io_32bit = arg; 865 drive->io_32bit = arg;
1117#ifdef CONFIG_BLK_DEV_DTC2278 866#ifdef CONFIG_BLK_DEV_DTC2278
1118 if (HWIF(drive)->chipset == ide_dtc2278) 867 if (HWIF(drive)->chipset == ide_dtc2278)
@@ -1121,12 +870,28 @@ static int set_io_32bit(ide_drive_t *drive, int arg)
1121 return 0; 870 return 0;
1122} 871}
1123 872
1124static int set_using_dma (ide_drive_t *drive, int arg) 873static int set_ksettings(ide_drive_t *drive, int arg)
874{
875 if (arg < 0 || arg > 1)
876 return -EINVAL;
877
878 if (ide_spin_wait_hwgroup(drive))
879 return -EBUSY;
880 drive->keep_settings = arg;
881 spin_unlock_irq(&ide_lock);
882
883 return 0;
884}
885
886int set_using_dma(ide_drive_t *drive, int arg)
1125{ 887{
1126#ifdef CONFIG_BLK_DEV_IDEDMA 888#ifdef CONFIG_BLK_DEV_IDEDMA
1127 ide_hwif_t *hwif = drive->hwif; 889 ide_hwif_t *hwif = drive->hwif;
1128 int err = -EPERM; 890 int err = -EPERM;
1129 891
892 if (arg < 0 || arg > 1)
893 return -EINVAL;
894
1130 if (!drive->id || !(drive->id->capability & 1)) 895 if (!drive->id || !(drive->id->capability & 1))
1131 goto out; 896 goto out;
1132 897
@@ -1159,14 +924,20 @@ static int set_using_dma (ide_drive_t *drive, int arg)
1159out: 924out:
1160 return err; 925 return err;
1161#else 926#else
927 if (arg < 0 || arg > 1)
928 return -EINVAL;
929
1162 return -EPERM; 930 return -EPERM;
1163#endif 931#endif
1164} 932}
1165 933
1166static int set_pio_mode (ide_drive_t *drive, int arg) 934int set_pio_mode(ide_drive_t *drive, int arg)
1167{ 935{
1168 struct request rq; 936 struct request rq;
1169 937
938 if (arg < 0 || arg > 255)
939 return -EINVAL;
940
1170 if (!HWIF(drive)->tuneproc) 941 if (!HWIF(drive)->tuneproc)
1171 return -ENOSYS; 942 return -ENOSYS;
1172 if (drive->special.b.set_tune) 943 if (drive->special.b.set_tune)
@@ -1178,42 +949,20 @@ static int set_pio_mode (ide_drive_t *drive, int arg)
1178 return 0; 949 return 0;
1179} 950}
1180 951
1181static int set_xfer_rate (ide_drive_t *drive, int arg) 952static int set_unmaskirq(ide_drive_t *drive, int arg)
1182{ 953{
1183 int err = ide_wait_cmd(drive, 954 if (drive->no_unmask)
1184 WIN_SETFEATURES, (u8) arg, 955 return -EPERM;
1185 SETFEATURES_XFER, 0, NULL);
1186 956
1187 if (!err && arg) { 957 if (arg < 0 || arg > 1)
1188 ide_set_xfer_rate(drive, (u8) arg); 958 return -EINVAL;
1189 ide_driveid_update(drive);
1190 }
1191 return err;
1192}
1193 959
1194/** 960 if (ide_spin_wait_hwgroup(drive))
1195 * ide_add_generic_settings - generic ide settings 961 return -EBUSY;
1196 * @drive: drive being configured 962 drive->unmask = arg;
1197 * 963 spin_unlock_irq(&ide_lock);
1198 * Add the generic parts of the system settings to the /proc files and
1199 * ioctls for this IDE device. The caller must not be holding the
1200 * ide_setting_sem.
1201 */
1202 964
1203void ide_add_generic_settings (ide_drive_t *drive) 965 return 0;
1204{
1205/*
1206 * drive setting name read/write access read ioctl write ioctl data type min max mul_factor div_factor data pointer set function
1207 */
1208 __ide_add_setting(drive, "io_32bit", drive->no_io_32bit ? SETTING_READ : SETTING_RW, HDIO_GET_32BIT, HDIO_SET_32BIT, TYPE_BYTE, 0, 1 + (SUPPORT_VLB_SYNC << 1), 1, 1, &drive->io_32bit, set_io_32bit, 0);
1209 __ide_add_setting(drive, "keepsettings", SETTING_RW, HDIO_GET_KEEPSETTINGS, HDIO_SET_KEEPSETTINGS, TYPE_BYTE, 0, 1, 1, 1, &drive->keep_settings, NULL, 0);
1210 __ide_add_setting(drive, "nice1", SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1, 1, &drive->nice1, NULL, 0);
1211 __ide_add_setting(drive, "pio_mode", SETTING_WRITE, -1, HDIO_SET_PIO_MODE, TYPE_BYTE, 0, 255, 1, 1, NULL, set_pio_mode, 0);
1212 __ide_add_setting(drive, "unmaskirq", drive->no_unmask ? SETTING_READ : SETTING_RW, HDIO_GET_UNMASKINTR, HDIO_SET_UNMASKINTR, TYPE_BYTE, 0, 1, 1, 1, &drive->unmask, NULL, 0);
1213 __ide_add_setting(drive, "using_dma", SETTING_RW, HDIO_GET_DMA, HDIO_SET_DMA, TYPE_BYTE, 0, 1, 1, 1, &drive->using_dma, set_using_dma, 0);
1214 __ide_add_setting(drive, "init_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 70, 1, 1, &drive->init_speed, NULL, 0);
1215 __ide_add_setting(drive, "current_speed", SETTING_RW, -1, -1, TYPE_BYTE, 0, 70, 1, 1, &drive->current_speed, set_xfer_rate, 0);
1216 __ide_add_setting(drive, "number", SETTING_RW, -1, -1, TYPE_BYTE, 0, 3, 1, 1, &drive->dn, NULL, 0);
1217} 966}
1218 967
1219/** 968/**
@@ -1285,27 +1034,23 @@ static int generic_ide_resume(struct device *dev)
1285int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev, 1034int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device *bdev,
1286 unsigned int cmd, unsigned long arg) 1035 unsigned int cmd, unsigned long arg)
1287{ 1036{
1288 ide_settings_t *setting; 1037 unsigned long flags;
1289 ide_driver_t *drv; 1038 ide_driver_t *drv;
1290 int err = 0;
1291 void __user *p = (void __user *)arg; 1039 void __user *p = (void __user *)arg;
1040 int err = 0, (*setfunc)(ide_drive_t *, int);
1041 u8 *val;
1292 1042
1293 down(&ide_setting_sem); 1043 switch (cmd) {
1294 if ((setting = ide_find_setting_by_ioctl(drive, cmd)) != NULL) { 1044 case HDIO_GET_32BIT: val = &drive->io_32bit; goto read_val;
1295 if (cmd == setting->read_ioctl) { 1045 case HDIO_GET_KEEPSETTINGS: val = &drive->keep_settings; goto read_val;
1296 err = ide_read_setting(drive, setting); 1046 case HDIO_GET_UNMASKINTR: val = &drive->unmask; goto read_val;
1297 up(&ide_setting_sem); 1047 case HDIO_GET_DMA: val = &drive->using_dma; goto read_val;
1298 return err >= 0 ? put_user(err, (long __user *)arg) : err; 1048 case HDIO_SET_32BIT: setfunc = set_io_32bit; goto set_val;
1299 } else { 1049 case HDIO_SET_KEEPSETTINGS: setfunc = set_ksettings; goto set_val;
1300 if (bdev != bdev->bd_contains) 1050 case HDIO_SET_PIO_MODE: setfunc = set_pio_mode; goto set_val;
1301 err = -EINVAL; 1051 case HDIO_SET_UNMASKINTR: setfunc = set_unmaskirq; goto set_val;
1302 else 1052 case HDIO_SET_DMA: setfunc = set_using_dma; goto set_val;
1303 err = ide_write_setting(drive, setting, arg);
1304 up(&ide_setting_sem);
1305 return err;
1306 }
1307 } 1053 }
1308 up(&ide_setting_sem);
1309 1054
1310 switch (cmd) { 1055 switch (cmd) {
1311 case HDIO_OBSOLETE_IDENTITY: 1056 case HDIO_OBSOLETE_IDENTITY:
@@ -1359,7 +1104,7 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
1359 ide_init_hwif_ports(&hw, (unsigned long) args[0], 1104 ide_init_hwif_ports(&hw, (unsigned long) args[0],
1360 (unsigned long) args[1], NULL); 1105 (unsigned long) args[1], NULL);
1361 hw.irq = args[2]; 1106 hw.irq = args[2];
1362 if (ide_register_hw(&hw, NULL) == -1) 1107 if (ide_register_hw(&hw, 0, NULL) == -1)
1363 return -EIO; 1108 return -EIO;
1364 return 0; 1109 return 0;
1365 } 1110 }
@@ -1434,6 +1179,28 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
1434 default: 1179 default:
1435 return -EINVAL; 1180 return -EINVAL;
1436 } 1181 }
1182
1183read_val:
1184 down(&ide_setting_sem);
1185 spin_lock_irqsave(&ide_lock, flags);
1186 err = *val;
1187 spin_unlock_irqrestore(&ide_lock, flags);
1188 up(&ide_setting_sem);
1189 return err >= 0 ? put_user(err, (long __user *)arg) : err;
1190
1191set_val:
1192 if (bdev != bdev->bd_contains)
1193 err = -EINVAL;
1194 else {
1195 if (!capable(CAP_SYS_ADMIN))
1196 err = -EACCES;
1197 else {
1198 down(&ide_setting_sem);
1199 err = setfunc(drive, arg);
1200 up(&ide_setting_sem);
1201 }
1202 }
1203 return err;
1437} 1204}
1438 1205
1439EXPORT_SYMBOL(generic_ide_ioctl); 1206EXPORT_SYMBOL(generic_ide_ioctl);
@@ -1566,13 +1333,13 @@ static int __init ide_setup(char *s)
1566 return 1; 1333 return 1;
1567 } 1334 }
1568 1335
1569#ifdef CONFIG_BLK_DEV_IDEPCI 1336#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
1570 if (!strcmp(s, "ide=reverse")) { 1337 if (!strcmp(s, "ide=reverse")) {
1571 ide_scan_direction = 1; 1338 ide_scan_direction = 1;
1572 printk(" : Enabled support for IDE inverse scan order.\n"); 1339 printk(" : Enabled support for IDE inverse scan order.\n");
1573 return 1; 1340 return 1;
1574 } 1341 }
1575#endif /* CONFIG_BLK_DEV_IDEPCI */ 1342#endif
1576 1343
1577#ifdef CONFIG_BLK_DEV_IDEACPI 1344#ifdef CONFIG_BLK_DEV_IDEACPI
1578 if (!strcmp(s, "ide=noacpi")) { 1345 if (!strcmp(s, "ide=noacpi")) {
@@ -1832,9 +1599,9 @@ extern void __init h8300_ide_init(void);
1832 */ 1599 */
1833static void __init probe_for_hwifs (void) 1600static void __init probe_for_hwifs (void)
1834{ 1601{
1835#ifdef CONFIG_BLK_DEV_IDEPCI 1602#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
1836 ide_scan_pcibus(ide_scan_direction); 1603 ide_scan_pcibus(ide_scan_direction);
1837#endif /* CONFIG_BLK_DEV_IDEPCI */ 1604#endif
1838 1605
1839#ifdef CONFIG_ETRAX_IDE 1606#ifdef CONFIG_ETRAX_IDE
1840 { 1607 {
@@ -1892,54 +1659,6 @@ static void __init probe_for_hwifs (void)
1892#endif 1659#endif
1893} 1660}
1894 1661
1895void ide_register_subdriver(ide_drive_t *drive, ide_driver_t *driver)
1896{
1897#ifdef CONFIG_PROC_FS
1898 ide_add_proc_entries(drive->proc, driver->proc, drive);
1899#endif
1900}
1901
1902EXPORT_SYMBOL(ide_register_subdriver);
1903
1904/**
1905 * ide_unregister_subdriver - disconnect drive from driver
1906 * @drive: drive to unplug
1907 * @driver: driver
1908 *
1909 * Disconnect a drive from the driver it was attached to and then
1910 * clean up the various proc files and other objects attached to it.
1911 *
1912 * Takes ide_setting_sem and ide_lock.
1913 * Caller must hold none of the locks.
1914 */
1915
1916void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver)
1917{
1918 unsigned long flags;
1919
1920#ifdef CONFIG_PROC_FS
1921 ide_remove_proc_entries(drive->proc, driver->proc);
1922#endif
1923 down(&ide_setting_sem);
1924 spin_lock_irqsave(&ide_lock, flags);
1925 /*
1926 * ide_setting_sem protects the settings list
1927 * ide_lock protects the use of settings
1928 *
1929 * so we need to hold both, ide_settings_sem because we want to
1930 * modify the settings list, and ide_lock because we cannot take
1931 * a setting out that is being used.
1932 *
1933 * OTOH both ide_{read,write}_setting are only ever used under
1934 * ide_setting_sem.
1935 */
1936 auto_remove_settings(drive);
1937 spin_unlock_irqrestore(&ide_lock, flags);
1938 up(&ide_setting_sem);
1939}
1940
1941EXPORT_SYMBOL(ide_unregister_subdriver);
1942
1943/* 1662/*
1944 * Probe module 1663 * Probe module
1945 */ 1664 */
@@ -2071,9 +1790,7 @@ static int __init ide_init(void)
2071 1790
2072 init_ide_data(); 1791 init_ide_data();
2073 1792
2074#ifdef CONFIG_PROC_FS 1793 proc_ide_create();
2075 proc_ide_root = proc_mkdir("ide", NULL);
2076#endif
2077 1794
2078#ifdef CONFIG_BLK_DEV_ALI14XX 1795#ifdef CONFIG_BLK_DEV_ALI14XX
2079 if (probe_ali14xx) 1796 if (probe_ali14xx)
@@ -2096,14 +1813,9 @@ static int __init ide_init(void)
2096 (void)qd65xx_init(); 1813 (void)qd65xx_init();
2097#endif 1814#endif
2098 1815
2099 initializing = 1;
2100 /* Probe for special PCI and other "known" interface chipsets. */ 1816 /* Probe for special PCI and other "known" interface chipsets. */
2101 probe_for_hwifs(); 1817 probe_for_hwifs();
2102 initializing = 0;
2103 1818
2104#ifdef CONFIG_PROC_FS
2105 proc_ide_create();
2106#endif
2107 return 0; 1819 return 0;
2108} 1820}
2109 1821
@@ -2143,9 +1855,7 @@ void __exit cleanup_module (void)
2143 pnpide_exit(); 1855 pnpide_exit();
2144#endif 1856#endif
2145 1857
2146#ifdef CONFIG_PROC_FS
2147 proc_ide_destroy(); 1858 proc_ide_destroy();
2148#endif
2149 1859
2150 bus_unregister(&ide_bus_type); 1860 bus_unregister(&ide_bus_type);
2151} 1861}
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c
index 91961aa03047..df17ed68c0bc 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/legacy/ali14xx.c
@@ -223,7 +223,8 @@ static int __init ali14xx_probe(void)
223 probe_hwif_init(hwif); 223 probe_hwif_init(hwif);
224 probe_hwif_init(mate); 224 probe_hwif_init(mate);
225 225
226 create_proc_ide_interfaces(); 226 ide_proc_register_port(hwif);
227 ide_proc_register_port(mate);
227 228
228 return 0; 229 return 0;
229} 230}
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 1ed224a01f79..101aee1711c4 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -213,7 +213,7 @@ fail_base2:
213 IRQ_AMIGA_PORTS); 213 IRQ_AMIGA_PORTS);
214 } 214 }
215 215
216 index = ide_register_hw(&hw, &hwif); 216 index = ide_register_hw(&hw, 1, &hwif);
217 if (index != -1) { 217 if (index != -1) {
218 hwif->mmio = 1; 218 hwif->mmio = 1;
219 printk("ide%d: ", index); 219 printk("ide%d: ", index);
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c
index 0219ffa64db6..36a3f0ac6162 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/legacy/dtc2278.c
@@ -138,7 +138,8 @@ static int __init dtc2278_probe(void)
138 probe_hwif_init(hwif); 138 probe_hwif_init(hwif);
139 probe_hwif_init(mate); 139 probe_hwif_init(mate);
140 140
141 create_proc_ide_interfaces(); 141 ide_proc_register_port(hwif);
142 ide_proc_register_port(mate);
142 143
143 return 0; 144 return 0;
144} 145}
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index a9f2cd5bb81e..e1e9d9d6893f 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -70,7 +70,7 @@ void __init falconide_init(void)
70 0, 0, NULL, 70 0, 0, NULL,
71// falconide_iops, 71// falconide_iops,
72 IRQ_MFP_IDE); 72 IRQ_MFP_IDE);
73 index = ide_register_hw(&hw, NULL); 73 index = ide_register_hw(&hw, 1, NULL);
74 74
75 if (index != -1) 75 if (index != -1)
76 printk("ide%d: Falcon IDE interface\n", index); 76 printk("ide%d: Falcon IDE interface\n", index);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index dcfadbbf55d8..0830a021bbb6 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -165,7 +165,7 @@ found:
165// &gayle_iops, 165// &gayle_iops,
166 IRQ_AMIGA_PORTS); 166 IRQ_AMIGA_PORTS);
167 167
168 index = ide_register_hw(&hw, &hwif); 168 index = ide_register_hw(&hw, 1, &hwif);
169 if (index != -1) { 169 if (index != -1) {
170 hwif->mmio = 1; 170 hwif->mmio = 1;
171 switch (i) { 171 switch (i) {
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index a2832643c522..c8f353b1296f 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -357,7 +357,8 @@ int __init ht6560b_init(void)
357 probe_hwif_init(hwif); 357 probe_hwif_init(hwif);
358 probe_hwif_init(mate); 358 probe_hwif_init(mate);
359 359
360 create_proc_ide_interfaces(); 360 ide_proc_register_port(hwif);
361 ide_proc_register_port(mate);
361 362
362 return 0; 363 return 0;
363 364
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index c6522a64d7ec..2f3977f195b7 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -153,7 +153,7 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
153 hw.irq = irq; 153 hw.irq = irq;
154 hw.chipset = ide_pci; 154 hw.chipset = ide_pci;
155 hw.dev = &handle->dev; 155 hw.dev = &handle->dev;
156 return ide_register_hw_with_fixup(&hw, NULL, ide_undecoded_slave); 156 return ide_register_hw_with_fixup(&hw, 0, NULL, ide_undecoded_slave);
157} 157}
158 158
159/*====================================================================== 159/*======================================================================
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 4c0079ad52ac..c211fc78345d 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -102,21 +102,21 @@ void macide_init(void)
102 0, 0, macide_ack_intr, 102 0, 0, macide_ack_intr,
103// quadra_ide_iops, 103// quadra_ide_iops,
104 IRQ_NUBUS_F); 104 IRQ_NUBUS_F);
105 index = ide_register_hw(&hw, &hwif); 105 index = ide_register_hw(&hw, 1, &hwif);
106 break; 106 break;
107 case MAC_IDE_PB: 107 case MAC_IDE_PB:
108 ide_setup_ports(&hw, IDE_BASE, macide_offsets, 108 ide_setup_ports(&hw, IDE_BASE, macide_offsets,
109 0, 0, macide_ack_intr, 109 0, 0, macide_ack_intr,
110// macide_pb_iops, 110// macide_pb_iops,
111 IRQ_NUBUS_C); 111 IRQ_NUBUS_C);
112 index = ide_register_hw(&hw, &hwif); 112 index = ide_register_hw(&hw, 1, &hwif);
113 break; 113 break;
114 case MAC_IDE_BABOON: 114 case MAC_IDE_BABOON:
115 ide_setup_ports(&hw, BABOON_BASE, macide_offsets, 115 ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
116 0, 0, NULL, 116 0, 0, NULL,
117// macide_baboon_iops, 117// macide_baboon_iops,
118 IRQ_BABOON_1); 118 IRQ_BABOON_1);
119 index = ide_register_hw(&hw, &hwif); 119 index = ide_register_hw(&hw, 1, &hwif);
120 if (index == -1) break; 120 if (index == -1) break;
121 if (macintosh_config->ident == MAC_MODEL_PB190) { 121 if (macintosh_config->ident == MAC_MODEL_PB190) {
122 122
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 74f08124eabb..e628a983ce33 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -142,7 +142,7 @@ void q40ide_init(void)
142 0, NULL, 142 0, NULL,
143// m68kide_iops, 143// m68kide_iops,
144 q40ide_default_irq(pcide_bases[i])); 144 q40ide_default_irq(pcide_bases[i]));
145 index = ide_register_hw(&hw, &hwif); 145 index = ide_register_hw(&hw, 1, &hwif);
146 // **FIXME** 146 // **FIXME**
147 if (index != -1) 147 if (index != -1)
148 hwif->mmio = 1; 148 hwif->mmio = 1;
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index 2fb8f50f1293..d1414a75b523 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -427,7 +427,7 @@ static int __init qd_probe(int base)
427 qd_setup(hwif, base, config, QD6500_DEF_DATA, QD6500_DEF_DATA, 427 qd_setup(hwif, base, config, QD6500_DEF_DATA, QD6500_DEF_DATA,
428 &qd6500_tune_drive); 428 &qd6500_tune_drive);
429 429
430 create_proc_ide_interfaces(); 430 ide_proc_register_port(hwif);
431 431
432 return 1; 432 return 1;
433 } 433 }
@@ -459,7 +459,7 @@ static int __init qd_probe(int base)
459 &qd6580_tune_drive); 459 &qd6580_tune_drive);
460 qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT); 460 qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT);
461 461
462 create_proc_ide_interfaces(); 462 ide_proc_register_port(hwif);
463 463
464 return 1; 464 return 1;
465 } else { 465 } else {
@@ -479,7 +479,8 @@ static int __init qd_probe(int base)
479 &qd6580_tune_drive); 479 &qd6580_tune_drive);
480 qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT); 480 qd_write_reg(QD_DEF_CONTR,QD_CONTROL_PORT);
481 481
482 create_proc_ide_interfaces(); 482 ide_proc_register_port(hwif);
483 ide_proc_register_port(mate);
483 484
484 return 0; /* no other qd65xx possible */ 485 return 0; /* no other qd65xx possible */
485 } 486 }
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c
index ca7974455578..ddc403a0bd82 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/legacy/umc8672.c
@@ -160,7 +160,8 @@ static int __init umc8672_probe(void)
160 probe_hwif_init(hwif); 160 probe_hwif_init(hwif);
161 probe_hwif_init(mate); 161 probe_hwif_init(mate);
162 162
163 create_proc_ide_interfaces(); 163 ide_proc_register_port(hwif);
164 ide_proc_register_port(mate);
164 165
165 return 0; 166 return 0;
166} 167}
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index d54d9fe92a7d..ca95e990862e 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -760,6 +760,9 @@ static int au_ide_probe(struct device *dev)
760#endif 760#endif
761 761
762 probe_hwif_init(hwif); 762 probe_hwif_init(hwif);
763
764 ide_proc_register_port(hwif);
765
763 dev_set_drvdata(dev, hwif); 766 dev_set_drvdata(dev, hwif);
764 767
765 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode ); 768 printk(KERN_INFO "Au1xxx IDE(builtin) configured for %s\n", mode );
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 81fa06851b27..6e935d7c63fd 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -129,6 +129,9 @@ static int __devinit swarm_ide_probe(struct device *dev)
129 hwif->irq = hwif->hw.irq; 129 hwif->irq = hwif->hw.irq;
130 130
131 probe_hwif_init(hwif); 131 probe_hwif_init(hwif);
132
133 ide_proc_register_port(hwif);
134
132 dev_set_drvdata(dev, hwif); 135 dev_set_drvdata(dev, hwif);
133 136
134 return 0; 137 return 0;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index 73bdf64dbbfc..b173bc66ce1e 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -87,38 +87,12 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr
87 return chipset_table->ultra_settings; 87 return chipset_table->ultra_settings;
88} 88}
89 89
90static u8 aec62xx_ratemask (ide_drive_t *drive)
91{
92 ide_hwif_t *hwif = HWIF(drive);
93 u8 mode;
94
95 switch(hwif->pci_dev->device) {
96 case PCI_DEVICE_ID_ARTOP_ATP865:
97 case PCI_DEVICE_ID_ARTOP_ATP865R:
98 mode = (inb(hwif->channel ?
99 hwif->mate->dma_status :
100 hwif->dma_status) & 0x10) ? 4 : 3;
101 break;
102 case PCI_DEVICE_ID_ARTOP_ATP860:
103 case PCI_DEVICE_ID_ARTOP_ATP860R:
104 mode = 2;
105 break;
106 case PCI_DEVICE_ID_ARTOP_ATP850UF:
107 default:
108 return 1;
109 }
110
111 if (!eighty_ninty_three(drive))
112 mode = min(mode, (u8)1);
113 return mode;
114}
115
116static int aec6210_tune_chipset (ide_drive_t *drive, u8 xferspeed) 90static int aec6210_tune_chipset (ide_drive_t *drive, u8 xferspeed)
117{ 91{
118 ide_hwif_t *hwif = HWIF(drive); 92 ide_hwif_t *hwif = HWIF(drive);
119 struct pci_dev *dev = hwif->pci_dev; 93 struct pci_dev *dev = hwif->pci_dev;
120 u16 d_conf = 0; 94 u16 d_conf = 0;
121 u8 speed = ide_rate_filter(aec62xx_ratemask(drive), xferspeed); 95 u8 speed = ide_rate_filter(drive, xferspeed);
122 u8 ultra = 0, ultra_conf = 0; 96 u8 ultra = 0, ultra_conf = 0;
123 u8 tmp0 = 0, tmp1 = 0, tmp2 = 0; 97 u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
124 unsigned long flags; 98 unsigned long flags;
@@ -145,7 +119,7 @@ static int aec6260_tune_chipset (ide_drive_t *drive, u8 xferspeed)
145{ 119{
146 ide_hwif_t *hwif = HWIF(drive); 120 ide_hwif_t *hwif = HWIF(drive);
147 struct pci_dev *dev = hwif->pci_dev; 121 struct pci_dev *dev = hwif->pci_dev;
148 u8 speed = ide_rate_filter(aec62xx_ratemask(drive), xferspeed); 122 u8 speed = ide_rate_filter(drive, xferspeed);
149 u8 unit = (drive->select.b.unit & 0x01); 123 u8 unit = (drive->select.b.unit & 0x01);
150 u8 tmp1 = 0, tmp2 = 0; 124 u8 tmp1 = 0, tmp2 = 0;
151 u8 ultra = 0, drive_conf = 0, ultra_conf = 0; 125 u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
@@ -181,17 +155,6 @@ static int aec62xx_tune_chipset (ide_drive_t *drive, u8 speed)
181 } 155 }
182} 156}
183 157
184static int config_chipset_for_dma (ide_drive_t *drive)
185{
186 u8 speed = ide_dma_speed(drive, aec62xx_ratemask(drive));
187
188 if (!(speed))
189 return 0;
190
191 (void) aec62xx_tune_chipset(drive, speed);
192 return ide_dma_enable(drive);
193}
194
195static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio) 158static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
196{ 159{
197 pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 160 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
@@ -200,7 +163,7 @@ static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
200 163
201static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive) 164static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
202{ 165{
203 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 166 if (ide_tune_dma(drive))
204 return 0; 167 return 0;
205 168
206 if (ide_use_fast_pio(drive)) 169 if (ide_use_fast_pio(drive))
@@ -261,11 +224,13 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch
261 224
262static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif) 225static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
263{ 226{
227 struct pci_dev *dev = hwif->pci_dev;
228
264 hwif->autodma = 0; 229 hwif->autodma = 0;
265 hwif->tuneproc = &aec62xx_tune_drive; 230 hwif->tuneproc = &aec62xx_tune_drive;
266 hwif->speedproc = &aec62xx_tune_chipset; 231 hwif->speedproc = &aec62xx_tune_chipset;
267 232
268 if (hwif->pci_dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) 233 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
269 hwif->serialized = hwif->channel; 234 hwif->serialized = hwif->channel;
270 235
271 if (hwif->mate) 236 if (hwif->mate)
@@ -277,7 +242,15 @@ static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
277 return; 242 return;
278 } 243 }
279 244
280 hwif->ultra_mask = 0x7f; 245 hwif->ultra_mask = hwif->cds->udma_mask;
246
247 /* atp865 and atp865r */
248 if (hwif->ultra_mask == 0x3f) {
249 /* check bit 0x10 of DMA status register */
250 if (inb(pci_resource_start(dev, 4) + 2) & 0x10)
251 hwif->ultra_mask = 0x7f; /* udma0-6 */
252 }
253
281 hwif->mwdma_mask = 0x07; 254 hwif->mwdma_mask = 0x07;
282 255
283 hwif->ide_dma_check = &aec62xx_config_drive_xfer_rate; 256 hwif->ide_dma_check = &aec62xx_config_drive_xfer_rate;
@@ -344,6 +317,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
344 .autodma = AUTODMA, 317 .autodma = AUTODMA,
345 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 318 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
346 .bootable = OFF_BOARD, 319 .bootable = OFF_BOARD,
320 .udma_mask = 0x07, /* udma0-2 */
347 },{ /* 1 */ 321 },{ /* 1 */
348 .name = "AEC6260", 322 .name = "AEC6260",
349 .init_setup = init_setup_aec62xx, 323 .init_setup = init_setup_aec62xx,
@@ -353,6 +327,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
353 .channels = 2, 327 .channels = 2,
354 .autodma = NOAUTODMA, 328 .autodma = NOAUTODMA,
355 .bootable = OFF_BOARD, 329 .bootable = OFF_BOARD,
330 .udma_mask = 0x1f, /* udma0-4 */
356 },{ /* 2 */ 331 },{ /* 2 */
357 .name = "AEC6260R", 332 .name = "AEC6260R",
358 .init_setup = init_setup_aec62xx, 333 .init_setup = init_setup_aec62xx,
@@ -363,6 +338,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
363 .autodma = AUTODMA, 338 .autodma = AUTODMA,
364 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 339 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
365 .bootable = NEVER_BOARD, 340 .bootable = NEVER_BOARD,
341 .udma_mask = 0x1f, /* udma0-4 */
366 },{ /* 3 */ 342 },{ /* 3 */
367 .name = "AEC6X80", 343 .name = "AEC6X80",
368 .init_setup = init_setup_aec6x80, 344 .init_setup = init_setup_aec6x80,
@@ -372,6 +348,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
372 .channels = 2, 348 .channels = 2,
373 .autodma = AUTODMA, 349 .autodma = AUTODMA,
374 .bootable = OFF_BOARD, 350 .bootable = OFF_BOARD,
351 .udma_mask = 0x3f, /* udma0-5 */
375 },{ /* 4 */ 352 },{ /* 4 */
376 .name = "AEC6X80R", 353 .name = "AEC6X80R",
377 .init_setup = init_setup_aec6x80, 354 .init_setup = init_setup_aec6x80,
@@ -382,6 +359,7 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
382 .autodma = AUTODMA, 359 .autodma = AUTODMA,
383 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 360 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
384 .bootable = OFF_BOARD, 361 .bootable = OFF_BOARD,
362 .udma_mask = 0x3f, /* udma0-5 */
385 } 363 }
386}; 364};
387 365
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 946a12746cb5..428efdae0c7b 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -50,7 +50,7 @@ static u8 m5229_revision;
50static u8 chip_is_1543c_e; 50static u8 chip_is_1543c_e;
51static struct pci_dev *isa_dev; 51static struct pci_dev *isa_dev;
52 52
53#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) 53#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
54#include <linux/stat.h> 54#include <linux/stat.h>
55#include <linux/proc_fs.h> 55#include <linux/proc_fs.h>
56 56
@@ -278,7 +278,7 @@ static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
278 278
279 return p-buffer; /* => must be less than 4k! */ 279 return p-buffer; /* => must be less than 4k! */
280} 280}
281#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */ 281#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
282 282
283/** 283/**
284 * ali15x3_tune_pio - set up chipset for PIO mode 284 * ali15x3_tune_pio - set up chipset for PIO mode
@@ -378,74 +378,31 @@ static void ali15x3_tune_drive (ide_drive_t *drive, u8 pio)
378} 378}
379 379
380/** 380/**
381 * ali15x3_can_ultra - check for ultra DMA support 381 * ali_udma_filter - compute UDMA mask
382 * @drive: drive to do the check 382 * @drive: IDE device
383 * 383 *
384 * Check the drive and controller revisions. Return 0 if UDMA is 384 * Return available UDMA modes.
385 * not available, or 1 if UDMA can be used. The actual rules for 385 *
386 * the ALi are 386 * The actual rules for the ALi are:
387 * No UDMA on revisions <= 0x20 387 * No UDMA on revisions <= 0x20
388 * Disk only for revisions < 0xC2 388 * Disk only for revisions < 0xC2
389 * Not WDC drives for revisions < 0xC2 389 * Not WDC drives for revisions < 0xC2
390 * 390 *
391 * FIXME: WDC ifdef needs to die 391 * FIXME: WDC ifdef needs to die
392 */ 392 */
393
394static u8 ali15x3_can_ultra (ide_drive_t *drive)
395{
396#ifndef CONFIG_WDC_ALI15X3
397 struct hd_driveid *id = drive->id;
398#endif /* CONFIG_WDC_ALI15X3 */
399 393
400 if (m5229_revision <= 0x20) { 394static u8 ali_udma_filter(ide_drive_t *drive)
401 return 0;
402 } else if ((m5229_revision < 0xC2) &&
403#ifndef CONFIG_WDC_ALI15X3
404 ((chip_is_1543c_e && strstr(id->model, "WDC ")) ||
405 (drive->media!=ide_disk))) {
406#else /* CONFIG_WDC_ALI15X3 */
407 (drive->media!=ide_disk)) {
408#endif /* CONFIG_WDC_ALI15X3 */
409 return 0;
410 } else {
411 return 1;
412 }
413}
414
415/**
416 * ali15x3_ratemask - generate DMA mode list
417 * @drive: drive to compute against
418 *
419 * Generate a list of the available DMA modes for the drive.
420 * FIXME: this function contains lots of bogus masking we can dump
421 *
422 * Return the highest available mode (UDMA33, UDMA66, UDMA100,..)
423 */
424
425static u8 ali15x3_ratemask (ide_drive_t *drive)
426{ 395{
427 u8 mode = 0, can_ultra = ali15x3_can_ultra(drive); 396 if (m5229_revision > 0x20 && m5229_revision < 0xC2) {
428 397 if (drive->media != ide_disk)
429 if (m5229_revision > 0xC4 && can_ultra) { 398 return 0;
430 mode = 4; 399#ifndef CONFIG_WDC_ALI15X3
431 } else if (m5229_revision == 0xC4 && can_ultra) { 400 if (chip_is_1543c_e && strstr(drive->id->model, "WDC "))
432 mode = 3; 401 return 0;
433 } else if (m5229_revision >= 0xC2 && can_ultra) { 402#endif
434 mode = 2;
435 } else if (can_ultra) {
436 return 1;
437 } else {
438 return 0;
439 } 403 }
440 404
441 /* 405 return drive->hwif->ultra_mask;
442 * If the drive sees no suitable cable then UDMA 33
443 * is the highest permitted mode
444 */
445
446 if (!eighty_ninty_three(drive))
447 mode = min(mode, (u8)1);
448 return mode;
449} 406}
450 407
451/** 408/**
@@ -461,7 +418,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed)
461{ 418{
462 ide_hwif_t *hwif = HWIF(drive); 419 ide_hwif_t *hwif = HWIF(drive);
463 struct pci_dev *dev = hwif->pci_dev; 420 struct pci_dev *dev = hwif->pci_dev;
464 u8 speed = ide_rate_filter(ali15x3_ratemask(drive), xferspeed); 421 u8 speed = ide_rate_filter(drive, xferspeed);
465 u8 speed1 = speed; 422 u8 speed1 = speed;
466 u8 unit = (drive->select.b.unit & 0x01); 423 u8 unit = (drive->select.b.unit & 0x01);
467 u8 tmpbyte = 0x00; 424 u8 tmpbyte = 0x00;
@@ -511,7 +468,7 @@ static int ali15x3_tune_chipset (ide_drive_t *drive, u8 xferspeed)
511 468
512static int config_chipset_for_dma (ide_drive_t *drive) 469static int config_chipset_for_dma (ide_drive_t *drive)
513{ 470{
514 u8 speed = ide_dma_speed(drive, ali15x3_ratemask(drive)); 471 u8 speed = ide_max_dma_mode(drive);
515 472
516 if (!(speed)) 473 if (!(speed))
517 return 0; 474 return 0;
@@ -609,13 +566,13 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
609 566
610 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 567 isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
611 568
612#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) 569#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
613 if (!ali_proc) { 570 if (!ali_proc) {
614 ali_proc = 1; 571 ali_proc = 1;
615 bmide_dev = dev; 572 bmide_dev = dev;
616 ide_pci_create_host_proc("ali", ali_get_info); 573 ide_pci_create_host_proc("ali", ali_get_info);
617 } 574 }
618#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_PROC_FS) */ 575#endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
619 576
620 local_irq_save(flags); 577 local_irq_save(flags);
621 578
@@ -771,6 +728,7 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
771 hwif->autodma = 0; 728 hwif->autodma = 0;
772 hwif->tuneproc = &ali15x3_tune_drive; 729 hwif->tuneproc = &ali15x3_tune_drive;
773 hwif->speedproc = &ali15x3_tune_chipset; 730 hwif->speedproc = &ali15x3_tune_chipset;
731 hwif->udma_filter = &ali_udma_filter;
774 732
775 /* don't use LBA48 DMA on ALi devices before rev 0xC5 */ 733 /* don't use LBA48 DMA on ALi devices before rev 0xC5 */
776 hwif->no_lba48_dma = (m5229_revision <= 0xC4) ? 1 : 0; 734 hwif->no_lba48_dma = (m5229_revision <= 0xC4) ? 1 : 0;
@@ -783,8 +741,17 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
783 741
784 hwif->atapi_dma = 1; 742 hwif->atapi_dma = 1;
785 743
786 if (m5229_revision > 0x20) 744 if (m5229_revision <= 0x20)
787 hwif->ultra_mask = 0x7f; 745 hwif->ultra_mask = 0x00; /* no udma */
746 else if (m5229_revision < 0xC2)
747 hwif->ultra_mask = 0x07; /* udma0-2 */
748 else if (m5229_revision == 0xC2 || m5229_revision == 0xC3)
749 hwif->ultra_mask = 0x1f; /* udma0-4 */
750 else if (m5229_revision == 0xC4)
751 hwif->ultra_mask = 0x3f; /* udma0-5 */
752 else
753 hwif->ultra_mask = 0x7f; /* udma0-6 */
754
788 hwif->mwdma_mask = 0x07; 755 hwif->mwdma_mask = 0x07;
789 hwif->swdma_mask = 0x07; 756 hwif->swdma_mask = 0x07;
790 757
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 7989bdd842a2..becb1a5648b0 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -92,7 +92,7 @@ static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3
92 * AMD /proc entry. 92 * AMD /proc entry.
93 */ 93 */
94 94
95#ifdef CONFIG_PROC_FS 95#ifdef CONFIG_IDE_PROC_FS
96 96
97#include <linux/stat.h> 97#include <linux/stat.h>
98#include <linux/proc_fs.h> 98#include <linux/proc_fs.h>
@@ -402,14 +402,14 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
402 * Register /proc/ide/amd74xx entry 402 * Register /proc/ide/amd74xx entry
403 */ 403 */
404 404
405#if defined(DISPLAY_AMD_TIMINGS) && defined(CONFIG_PROC_FS) 405#if defined(DISPLAY_AMD_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
406 if (!amd74xx_proc) { 406 if (!amd74xx_proc) {
407 amd_base = pci_resource_start(dev, 4); 407 amd_base = pci_resource_start(dev, 4);
408 bmide_dev = dev; 408 bmide_dev = dev;
409 ide_pci_create_host_proc("amd74xx", amd74xx_get_info); 409 ide_pci_create_host_proc("amd74xx", amd74xx_get_info);
410 amd74xx_proc = 1; 410 amd74xx_proc = 1;
411 } 411 }
412#endif /* DISPLAY_AMD_TIMINGS && CONFIG_PROC_FS */ 412#endif /* DISPLAY_AMD_TIMINGS && CONFIG_IDE_PROC_FS */
413 413
414 return dev->irq; 414 return dev->irq;
415} 415}
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 2d48af32e3f4..0e52ad722a72 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -49,22 +49,6 @@ static int save_mdma_mode[4];
49static DEFINE_SPINLOCK(atiixp_lock); 49static DEFINE_SPINLOCK(atiixp_lock);
50 50
51/** 51/**
52 * atiixp_ratemask - compute rate mask for ATIIXP IDE
53 * @drive: IDE drive to compute for
54 *
55 * Returns the available modes for the ATIIXP IDE controller.
56 */
57
58static u8 atiixp_ratemask(ide_drive_t *drive)
59{
60 u8 mode = 3;
61
62 if (!eighty_ninty_three(drive))
63 mode = min(mode, (u8)1);
64 return mode;
65}
66
67/**
68 * atiixp_dma_2_pio - return the PIO mode matching DMA 52 * atiixp_dma_2_pio - return the PIO mode matching DMA
69 * @xfer_rate: transfer speed 53 * @xfer_rate: transfer speed
70 * 54 *
@@ -189,7 +173,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
189 u16 tmp16; 173 u16 tmp16;
190 u8 speed, pio; 174 u8 speed, pio;
191 175
192 speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed); 176 speed = ide_rate_filter(drive, xferspeed);
193 177
194 spin_lock_irqsave(&atiixp_lock, flags); 178 spin_lock_irqsave(&atiixp_lock, flags);
195 179
@@ -223,26 +207,6 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
223} 207}
224 208
225/** 209/**
226 * atiixp_config_drive_for_dma - configure drive for DMA
227 * @drive: IDE drive to configure
228 *
229 * Set up a ATIIXP interface channel for the best available speed.
230 * We prefer UDMA if it is available and then MWDMA. If DMA is
231 * not available we switch to PIO and return 0.
232 */
233
234static int atiixp_config_drive_for_dma(ide_drive_t *drive)
235{
236 u8 speed = ide_dma_speed(drive, atiixp_ratemask(drive));
237
238 if (!speed)
239 return 0;
240
241 (void) atiixp_speedproc(drive, speed);
242 return ide_dma_enable(drive);
243}
244
245/**
246 * atiixp_dma_check - set up an IDE device 210 * atiixp_dma_check - set up an IDE device
247 * @drive: IDE drive to configure 211 * @drive: IDE drive to configure
248 * 212 *
@@ -256,7 +220,7 @@ static int atiixp_dma_check(ide_drive_t *drive)
256 220
257 drive->init_speed = 0; 221 drive->init_speed = 0;
258 222
259 if (ide_use_dma(drive) && atiixp_config_drive_for_dma(drive)) 223 if (ide_tune_dma(drive))
260 return 0; 224 return 0;
261 225
262 if (ide_use_fast_pio(drive)) { 226 if (ide_use_fast_pio(drive)) {
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 77f51ab6d439..61ea96b5555c 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -74,7 +74,7 @@
74#define UDIDETCR1 0x7B 74#define UDIDETCR1 0x7B
75#define DTPR1 0x7C 75#define DTPR1 0x7C
76 76
77#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) 77#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
78#include <linux/stat.h> 78#include <linux/stat.h>
79#include <linux/proc_fs.h> 79#include <linux/proc_fs.h>
80 80
@@ -165,7 +165,7 @@ static int cmd64x_get_info (char *buffer, char **addr, off_t offset, int count)
165 return p-buffer; /* => must be less than 4k! */ 165 return p-buffer; /* => must be less than 4k! */
166} 166}
167 167
168#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) */ 168#endif /* defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
169 169
170static u8 quantize_timing(int timing, int quant) 170static u8 quantize_timing(int timing, int quant)
171{ 171{
@@ -292,55 +292,6 @@ static void cmd64x_tune_drive (ide_drive_t *drive, u8 pio)
292 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio); 292 (void) ide_config_drive_speed(drive, XFER_PIO_0 + pio);
293} 293}
294 294
295static u8 cmd64x_ratemask (ide_drive_t *drive)
296{
297 struct pci_dev *dev = HWIF(drive)->pci_dev;
298 u8 mode = 0;
299
300 switch(dev->device) {
301 case PCI_DEVICE_ID_CMD_649:
302 mode = 3;
303 break;
304 case PCI_DEVICE_ID_CMD_648:
305 mode = 2;
306 break;
307 case PCI_DEVICE_ID_CMD_643:
308 return 0;
309
310 case PCI_DEVICE_ID_CMD_646:
311 {
312 unsigned int class_rev = 0;
313 pci_read_config_dword(dev,
314 PCI_CLASS_REVISION, &class_rev);
315 class_rev &= 0xff;
316 /*
317 * UltraDMA only supported on PCI646U and PCI646U2, which
318 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
319 * Actually, although the CMD tech support people won't
320 * tell me the details, the 0x03 revision cannot support
321 * UDMA correctly without hardware modifications, and even
322 * then it only works with Quantum disks due to some
323 * hold time assumptions in the 646U part which are fixed
324 * in the 646U2.
325 *
326 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
327 */
328 switch(class_rev) {
329 case 0x07:
330 case 0x05:
331 return 1;
332 case 0x03:
333 case 0x01:
334 default:
335 return 0;
336 }
337 }
338 }
339 if (!eighty_ninty_three(drive))
340 mode = min(mode, (u8)1);
341 return mode;
342}
343
344static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed) 295static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed)
345{ 296{
346 ide_hwif_t *hwif = HWIF(drive); 297 ide_hwif_t *hwif = HWIF(drive);
@@ -348,7 +299,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed)
348 u8 unit = drive->dn & 0x01; 299 u8 unit = drive->dn & 0x01;
349 u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0; 300 u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
350 301
351 speed = ide_rate_filter(cmd64x_ratemask(drive), speed); 302 speed = ide_rate_filter(drive, speed);
352 303
353 if (speed >= XFER_SW_DMA_0) { 304 if (speed >= XFER_SW_DMA_0) {
354 (void) pci_read_config_byte(dev, pciU, &regU); 305 (void) pci_read_config_byte(dev, pciU, &regU);
@@ -403,7 +354,7 @@ static int cmd64x_tune_chipset (ide_drive_t *drive, u8 speed)
403 354
404static int config_chipset_for_dma (ide_drive_t *drive) 355static int config_chipset_for_dma (ide_drive_t *drive)
405{ 356{
406 u8 speed = ide_dma_speed(drive, cmd64x_ratemask(drive)); 357 u8 speed = ide_max_dma_mode(drive);
407 358
408 if (!speed) 359 if (!speed)
409 return 0; 360 return 0;
@@ -597,7 +548,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
597 (void) pci_write_config_byte(dev, UDIDETCR0, 0xf0); 548 (void) pci_write_config_byte(dev, UDIDETCR0, 0xf0);
598#endif /* CONFIG_PPC */ 549#endif /* CONFIG_PPC */
599 550
600#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_PROC_FS) 551#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
601 552
602 cmd_devs[n_cmd_devs++] = dev; 553 cmd_devs[n_cmd_devs++] = dev;
603 554
@@ -605,7 +556,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
605 cmd64x_proc = 1; 556 cmd64x_proc = 1;
606 ide_pci_create_host_proc("cmd64x", cmd64x_get_info); 557 ide_pci_create_host_proc("cmd64x", cmd64x_get_info);
607 } 558 }
608#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_PROC_FS */ 559#endif /* DISPLAY_CMD64X_TIMINGS && CONFIG_IDE_PROC_FS */
609 560
610 return 0; 561 return 0;
611} 562}
@@ -644,15 +595,24 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
644 595
645 hwif->atapi_dma = 1; 596 hwif->atapi_dma = 1;
646 597
647 hwif->ultra_mask = 0x3f; 598 hwif->ultra_mask = hwif->cds->udma_mask;
648 hwif->mwdma_mask = 0x07; 599
600 /*
601 * UltraDMA only supported on PCI646U and PCI646U2, which
602 * correspond to revisions 0x03, 0x05 and 0x07 respectively.
603 * Actually, although the CMD tech support people won't
604 * tell me the details, the 0x03 revision cannot support
605 * UDMA correctly without hardware modifications, and even
606 * then it only works with Quantum disks due to some
607 * hold time assumptions in the 646U part which are fixed
608 * in the 646U2.
609 *
610 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
611 */
612 if (dev->device == PCI_DEVICE_ID_CMD_646 && class_rev < 5)
613 hwif->ultra_mask = 0x00;
649 614
650 if (dev->device == PCI_DEVICE_ID_CMD_643) 615 hwif->mwdma_mask = 0x07;
651 hwif->ultra_mask = 0x80;
652 if (dev->device == PCI_DEVICE_ID_CMD_646)
653 hwif->ultra_mask = (class_rev > 0x04) ? 0x07 : 0x80;
654 if (dev->device == PCI_DEVICE_ID_CMD_648)
655 hwif->ultra_mask = 0x1f;
656 616
657 hwif->ide_dma_check = &cmd64x_config_drive_for_dma; 617 hwif->ide_dma_check = &cmd64x_config_drive_for_dma;
658 if (!(hwif->udma_four)) 618 if (!(hwif->udma_four))
@@ -716,6 +676,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
716 .autodma = AUTODMA, 676 .autodma = AUTODMA,
717 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, 677 .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
718 .bootable = ON_BOARD, 678 .bootable = ON_BOARD,
679 .udma_mask = 0x00, /* no udma */
719 },{ /* 1 */ 680 },{ /* 1 */
720 .name = "CMD646", 681 .name = "CMD646",
721 .init_setup = init_setup_cmd646, 682 .init_setup = init_setup_cmd646,
@@ -725,6 +686,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
725 .autodma = AUTODMA, 686 .autodma = AUTODMA,
726 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 687 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
727 .bootable = ON_BOARD, 688 .bootable = ON_BOARD,
689 .udma_mask = 0x07, /* udma0-2 */
728 },{ /* 2 */ 690 },{ /* 2 */
729 .name = "CMD648", 691 .name = "CMD648",
730 .init_setup = init_setup_cmd64x, 692 .init_setup = init_setup_cmd64x,
@@ -734,6 +696,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
734 .autodma = AUTODMA, 696 .autodma = AUTODMA,
735 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 697 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
736 .bootable = ON_BOARD, 698 .bootable = ON_BOARD,
699 .udma_mask = 0x1f, /* udma0-4 */
737 },{ /* 3 */ 700 },{ /* 3 */
738 .name = "CMD649", 701 .name = "CMD649",
739 .init_setup = init_setup_cmd64x, 702 .init_setup = init_setup_cmd64x,
@@ -743,6 +706,7 @@ static ide_pci_device_t cmd64x_chipsets[] __devinitdata = {
743 .autodma = AUTODMA, 706 .autodma = AUTODMA,
744 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, 707 .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
745 .bootable = ON_BOARD, 708 .bootable = ON_BOARD,
709 .udma_mask = 0x3f, /* udma0-5 */
746 } 710 }
747}; 711};
748 712
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 400859a839f7..3b88a3a56116 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -213,6 +213,7 @@ static ide_pci_device_t cyrix_chipsets[] __devinitdata = {
213 213
214static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) 214static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
215{ 215{
216 ide_hwif_t *hwif = NULL, *mate = NULL;
216 ata_index_t index; 217 ata_index_t index;
217 ide_pci_device_t *d = &cyrix_chipsets[id->driver_data]; 218 ide_pci_device_t *d = &cyrix_chipsets[id->driver_data];
218 219
@@ -239,10 +240,21 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
239 240
240 ide_pci_setup_ports(dev, d, 14, &index); 241 ide_pci_setup_ports(dev, d, 14, &index);
241 242
242 if((index.b.low & 0xf0) != 0xf0) 243 if ((index.b.low & 0xf0) != 0xf0)
243 probe_hwif_init(&ide_hwifs[index.b.low]); 244 hwif = &ide_hwifs[index.b.low];
244 if((index.b.high & 0xf0) != 0xf0) 245 if ((index.b.high & 0xf0) != 0xf0)
245 probe_hwif_init(&ide_hwifs[index.b.high]); 246 mate = &ide_hwifs[index.b.high];
247
248 if (hwif)
249 probe_hwif_init(hwif);
250 if (mate)
251 probe_hwif_init(mate);
252
253 if (hwif)
254 ide_proc_register_port(hwif);
255 if (mate)
256 ide_proc_register_port(mate);
257
246 return 0; 258 return 0;
247} 259}
248 260
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 45f43efbf92c..41925c47ef05 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -127,20 +127,6 @@ static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
127 } 127 }
128} 128}
129 129
130static u8 cs5535_ratemask(ide_drive_t *drive)
131{
132 /* eighty93 will return 1 if it's 80core and capable of
133 exceeding udma2, 0 otherwise. we need ratemask to set
134 the max speed and if we can > udma2 then we return 2
135 which selects speed_max as udma4 which is the 5535's max
136 speed, and 1 selects udma2 which is the max for 40c */
137 if (!eighty_ninty_three(drive))
138 return 1;
139
140 return 2;
141}
142
143
144/**** 130/****
145 * cs5535_set_drive - Configure the drive to the new speed 131 * cs5535_set_drive - Configure the drive to the new speed
146 * @drive: Drive to set up 132 * @drive: Drive to set up
@@ -151,7 +137,7 @@ static u8 cs5535_ratemask(ide_drive_t *drive)
151 */ 137 */
152static int cs5535_set_drive(ide_drive_t *drive, u8 speed) 138static int cs5535_set_drive(ide_drive_t *drive, u8 speed)
153{ 139{
154 speed = ide_rate_filter(cs5535_ratemask(drive), speed); 140 speed = ide_rate_filter(drive, speed);
155 ide_config_drive_speed(drive, speed); 141 ide_config_drive_speed(drive, speed);
156 cs5535_set_speed(drive, speed); 142 cs5535_set_speed(drive, speed);
157 143
@@ -178,28 +164,13 @@ static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed)
178 cs5535_set_speed(drive, xferspeed); 164 cs5535_set_speed(drive, xferspeed);
179} 165}
180 166
181static int cs5535_config_drive_for_dma(ide_drive_t *drive)
182{
183 u8 speed;
184
185 speed = ide_dma_speed(drive, cs5535_ratemask(drive));
186
187 /* If no DMA speed was available then let dma_check hit pio */
188 if (!speed) {
189 return 0;
190 }
191
192 cs5535_set_drive(drive, speed);
193 return ide_dma_enable(drive);
194}
195
196static int cs5535_dma_check(ide_drive_t *drive) 167static int cs5535_dma_check(ide_drive_t *drive)
197{ 168{
198 u8 speed; 169 u8 speed;
199 170
200 drive->init_speed = 0; 171 drive->init_speed = 0;
201 172
202 if (ide_use_dma(drive) && cs5535_config_drive_for_dma(drive)) 173 if (ide_tune_dma(drive))
203 return 0; 174 return 0;
204 175
205 if (ide_use_fast_pio(drive)) { 176 if (ide_use_fast_pio(drive)) {
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c
index dd7ec37fdeab..46f4a888c037 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/pci/delkin_cb.c
@@ -80,7 +80,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
80 hw.irq = dev->irq; 80 hw.irq = dev->irq;
81 hw.chipset = ide_pci; /* this enables IRQ sharing */ 81 hw.chipset = ide_pci; /* this enables IRQ sharing */
82 82
83 rc = ide_register_hw_with_fixup(&hw, &hwif, ide_undecoded_slave); 83 rc = ide_register_hw_with_fixup(&hw, 0, &hwif, ide_undecoded_slave);
84 if (rc < 0) { 84 if (rc < 0) {
85 printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc); 85 printk(KERN_ERR "delkin_cb: ide_register_hw failed (%d)\n", rc);
86 pci_disable_device(dev); 86 pci_disable_device(dev);
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index 924eaa3a5708..2c24c3de8846 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -43,15 +43,10 @@
43 43
44#define HPT343_DEBUG_DRIVE_INFO 0 44#define HPT343_DEBUG_DRIVE_INFO 0
45 45
46static u8 hpt34x_ratemask (ide_drive_t *drive)
47{
48 return 1;
49}
50
51static int hpt34x_tune_chipset (ide_drive_t *drive, u8 xferspeed) 46static int hpt34x_tune_chipset (ide_drive_t *drive, u8 xferspeed)
52{ 47{
53 struct pci_dev *dev = HWIF(drive)->pci_dev; 48 struct pci_dev *dev = HWIF(drive)->pci_dev;
54 u8 speed = ide_rate_filter(hpt34x_ratemask(drive), xferspeed); 49 u8 speed = ide_rate_filter(drive, xferspeed);
55 u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0; 50 u32 reg1= 0, tmp1 = 0, reg2 = 0, tmp2 = 0;
56 u8 hi_speed, lo_speed; 51 u8 hi_speed, lo_speed;
57 52
@@ -89,29 +84,11 @@ static void hpt34x_tune_drive (ide_drive_t *drive, u8 pio)
89 (void) hpt34x_tune_chipset(drive, (XFER_PIO_0 + pio)); 84 (void) hpt34x_tune_chipset(drive, (XFER_PIO_0 + pio));
90} 85}
91 86
92/*
93 * This allows the configuration of ide_pci chipset registers
94 * for cards that learn about the drive's UDMA, DMA, PIO capabilities
95 * after the drive is reported by the OS. Initially for designed for
96 * HPT343 UDMA chipset by HighPoint|Triones Technologies, Inc.
97 */
98
99static int config_chipset_for_dma (ide_drive_t *drive)
100{
101 u8 speed = ide_dma_speed(drive, hpt34x_ratemask(drive));
102
103 if (!(speed))
104 return 0;
105
106 (void) hpt34x_tune_chipset(drive, speed);
107 return ide_dma_enable(drive);
108}
109
110static int hpt34x_config_drive_xfer_rate (ide_drive_t *drive) 87static int hpt34x_config_drive_xfer_rate (ide_drive_t *drive)
111{ 88{
112 drive->init_speed = 0; 89 drive->init_speed = 0;
113 90
114 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 91 if (ide_tune_dma(drive))
115#ifndef CONFIG_HPT34X_AUTODMA 92#ifndef CONFIG_HPT34X_AUTODMA
116 return -1; 93 return -1;
117#else 94#else
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index cf9d344d19f8..fcbc5605b38e 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -514,43 +514,31 @@ static int check_in_drive_list(ide_drive_t *drive, const char **list)
514 return 0; 514 return 0;
515} 515}
516 516
517static u8 hpt3xx_ratemask(ide_drive_t *drive)
518{
519 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
520 u8 mode = info->max_mode;
521
522 if (!eighty_ninty_three(drive) && mode)
523 mode = min(mode, (u8)1);
524 return mode;
525}
526
527/* 517/*
528 * Note for the future; the SATA hpt37x we must set 518 * Note for the future; the SATA hpt37x we must set
529 * either PIO or UDMA modes 0,4,5 519 * either PIO or UDMA modes 0,4,5
530 */ 520 */
531 521
532static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed) 522static u8 hpt3xx_udma_filter(ide_drive_t *drive)
533{ 523{
534 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev); 524 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
535 u8 chip_type = info->chip_type; 525 u8 chip_type = info->chip_type;
536 u8 mode = hpt3xx_ratemask(drive); 526 u8 mode = info->max_mode;
537 527 u8 mask;
538 if (drive->media != ide_disk)
539 return min(speed, (u8)XFER_PIO_4);
540 528
541 switch (mode) { 529 switch (mode) {
542 case 0x04: 530 case 0x04:
543 speed = min_t(u8, speed, XFER_UDMA_6); 531 mask = 0x7f;
544 break; 532 break;
545 case 0x03: 533 case 0x03:
546 speed = min_t(u8, speed, XFER_UDMA_5); 534 mask = 0x3f;
547 if (chip_type >= HPT374) 535 if (chip_type >= HPT374)
548 break; 536 break;
549 if (!check_in_drive_list(drive, bad_ata100_5)) 537 if (!check_in_drive_list(drive, bad_ata100_5))
550 goto check_bad_ata33; 538 goto check_bad_ata33;
551 /* fall thru */ 539 /* fall thru */
552 case 0x02: 540 case 0x02:
553 speed = min_t(u8, speed, XFER_UDMA_4); 541 mask = 0x1f;
554 542
555 /* 543 /*
556 * CHECK ME, Does this need to be changed to HPT374 ?? 544 * CHECK ME, Does this need to be changed to HPT374 ??
@@ -561,13 +549,13 @@ static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
561 !check_in_drive_list(drive, bad_ata66_4)) 549 !check_in_drive_list(drive, bad_ata66_4))
562 goto check_bad_ata33; 550 goto check_bad_ata33;
563 551
564 speed = min_t(u8, speed, XFER_UDMA_3); 552 mask = 0x0f;
565 if (HPT366_ALLOW_ATA66_3 && 553 if (HPT366_ALLOW_ATA66_3 &&
566 !check_in_drive_list(drive, bad_ata66_3)) 554 !check_in_drive_list(drive, bad_ata66_3))
567 goto check_bad_ata33; 555 goto check_bad_ata33;
568 /* fall thru */ 556 /* fall thru */
569 case 0x01: 557 case 0x01:
570 speed = min_t(u8, speed, XFER_UDMA_2); 558 mask = 0x07;
571 559
572 check_bad_ata33: 560 check_bad_ata33:
573 if (chip_type >= HPT370A) 561 if (chip_type >= HPT370A)
@@ -577,10 +565,10 @@ static u8 hpt3xx_ratefilter(ide_drive_t *drive, u8 speed)
577 /* fall thru */ 565 /* fall thru */
578 case 0x00: 566 case 0x00:
579 default: 567 default:
580 speed = min_t(u8, speed, XFER_MW_DMA_2); 568 mask = 0x00;
581 break; 569 break;
582 } 570 }
583 return speed; 571 return mask;
584} 572}
585 573
586static u32 get_speed_setting(u8 speed, struct hpt_info *info) 574static u32 get_speed_setting(u8 speed, struct hpt_info *info)
@@ -608,12 +596,19 @@ static int hpt36x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
608 ide_hwif_t *hwif = HWIF(drive); 596 ide_hwif_t *hwif = HWIF(drive);
609 struct pci_dev *dev = hwif->pci_dev; 597 struct pci_dev *dev = hwif->pci_dev;
610 struct hpt_info *info = pci_get_drvdata(dev); 598 struct hpt_info *info = pci_get_drvdata(dev);
611 u8 speed = hpt3xx_ratefilter(drive, xferspeed); 599 u8 speed = ide_rate_filter(drive, xferspeed);
612 u8 itr_addr = drive->dn ? 0x44 : 0x40; 600 u8 itr_addr = drive->dn ? 0x44 : 0x40;
613 u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
614 (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff);
615 u32 new_itr = get_speed_setting(speed, info);
616 u32 old_itr = 0; 601 u32 old_itr = 0;
602 u32 itr_mask, new_itr;
603
604 /* TODO: move this to ide_rate_filter() [ check ->atapi_dma ] */
605 if (drive->media != ide_disk)
606 speed = min_t(u8, speed, XFER_PIO_4);
607
608 itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 :
609 (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff);
610
611 new_itr = get_speed_setting(speed, info);
617 612
618 /* 613 /*
619 * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well) 614 * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well)
@@ -633,12 +628,19 @@ static int hpt37x_tune_chipset(ide_drive_t *drive, u8 xferspeed)
633 ide_hwif_t *hwif = HWIF(drive); 628 ide_hwif_t *hwif = HWIF(drive);
634 struct pci_dev *dev = hwif->pci_dev; 629 struct pci_dev *dev = hwif->pci_dev;
635 struct hpt_info *info = pci_get_drvdata(dev); 630 struct hpt_info *info = pci_get_drvdata(dev);
636 u8 speed = hpt3xx_ratefilter(drive, xferspeed); 631 u8 speed = ide_rate_filter(drive, xferspeed);
637 u8 itr_addr = 0x40 + (drive->dn * 4); 632 u8 itr_addr = 0x40 + (drive->dn * 4);
638 u32 itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
639 (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff);
640 u32 new_itr = get_speed_setting(speed, info);
641 u32 old_itr = 0; 633 u32 old_itr = 0;
634 u32 itr_mask, new_itr;
635
636 /* TODO: move this to ide_rate_filter() [ check ->atapi_dma ] */
637 if (drive->media != ide_disk)
638 speed = min_t(u8, speed, XFER_PIO_4);
639
640 itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 :
641 (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff);
642
643 new_itr = get_speed_setting(speed, info);
642 644
643 pci_read_config_dword(dev, itr_addr, &old_itr); 645 pci_read_config_dword(dev, itr_addr, &old_itr);
644 new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask); 646 new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask);
@@ -667,24 +669,6 @@ static void hpt3xx_tune_drive(ide_drive_t *drive, u8 pio)
667 (void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio); 669 (void) hpt3xx_tune_chipset (drive, XFER_PIO_0 + pio);
668} 670}
669 671
670/*
671 * This allows the configuration of ide_pci chipset registers
672 * for cards that learn about the drive's UDMA, DMA, PIO capabilities
673 * after the drive is reported by the OS. Initially designed for
674 * HPT366 UDMA chipset by HighPoint|Triones Technologies, Inc.
675 *
676 */
677static int config_chipset_for_dma(ide_drive_t *drive)
678{
679 u8 speed = ide_dma_speed(drive, hpt3xx_ratemask(drive));
680
681 if (!speed)
682 return 0;
683
684 (void) hpt3xx_tune_chipset(drive, speed);
685 return ide_dma_enable(drive);
686}
687
688static int hpt3xx_quirkproc(ide_drive_t *drive) 672static int hpt3xx_quirkproc(ide_drive_t *drive)
689{ 673{
690 struct hd_driveid *id = drive->id; 674 struct hd_driveid *id = drive->id;
@@ -739,7 +723,7 @@ static int hpt366_config_drive_xfer_rate(ide_drive_t *drive)
739{ 723{
740 drive->init_speed = 0; 724 drive->init_speed = 0;
741 725
742 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 726 if (ide_tune_dma(drive))
743 return 0; 727 return 0;
744 728
745 if (ide_use_fast_pio(drive)) 729 if (ide_use_fast_pio(drive))
@@ -1271,6 +1255,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1271 hwif->intrproc = &hpt3xx_intrproc; 1255 hwif->intrproc = &hpt3xx_intrproc;
1272 hwif->maskproc = &hpt3xx_maskproc; 1256 hwif->maskproc = &hpt3xx_maskproc;
1273 hwif->busproc = &hpt3xx_busproc; 1257 hwif->busproc = &hpt3xx_busproc;
1258 hwif->udma_filter = &hpt3xx_udma_filter;
1274 1259
1275 /* 1260 /*
1276 * HPT3xxN chips have some complications: 1261 * HPT3xxN chips have some complications:
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index 424f00bb160d..c04a02687b95 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -17,22 +17,6 @@
17 17
18#include <asm/io.h> 18#include <asm/io.h>
19 19
20/*
21 * it8213_ratemask - Compute available modes
22 * @drive: IDE drive
23 *
24 * Compute the available speeds for the devices on the interface. This
25 * is all modes to ATA133 clipped by drive cable setup.
26 */
27
28static u8 it8213_ratemask (ide_drive_t *drive)
29{
30 u8 mode = 4;
31 if (!eighty_ninty_three(drive))
32 mode = min_t(u8, mode, 1);
33 return mode;
34}
35
36/** 20/**
37 * it8213_dma_2_pio - return the PIO mode matching DMA 21 * it8213_dma_2_pio - return the PIO mode matching DMA
38 * @xfer_rate: transfer speed 22 * @xfer_rate: transfer speed
@@ -145,7 +129,7 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
145 ide_hwif_t *hwif = HWIF(drive); 129 ide_hwif_t *hwif = HWIF(drive);
146 struct pci_dev *dev = hwif->pci_dev; 130 struct pci_dev *dev = hwif->pci_dev;
147 u8 maslave = 0x40; 131 u8 maslave = 0x40;
148 u8 speed = ide_rate_filter(it8213_ratemask(drive), xferspeed); 132 u8 speed = ide_rate_filter(drive, xferspeed);
149 int a_speed = 3 << (drive->dn * 4); 133 int a_speed = 3 << (drive->dn * 4);
150 int u_flag = 1 << drive->dn; 134 int u_flag = 1 << drive->dn;
151 int v_flag = 0x01 << drive->dn; 135 int v_flag = 0x01 << drive->dn;
@@ -213,25 +197,6 @@ static int it8213_tune_chipset (ide_drive_t *drive, u8 xferspeed)
213 return ide_config_drive_speed(drive, speed); 197 return ide_config_drive_speed(drive, speed);
214} 198}
215 199
216/*
217 * config_chipset_for_dma - configure for DMA
218 * @drive: drive to configure
219 *
220 * Called by the IDE layer when it wants the timings set up.
221 */
222
223static int config_chipset_for_dma (ide_drive_t *drive)
224{
225 u8 speed = ide_dma_speed(drive, it8213_ratemask(drive));
226
227 if (!speed)
228 return 0;
229
230 it8213_tune_chipset(drive, speed);
231
232 return ide_dma_enable(drive);
233}
234
235/** 200/**
236 * it8213_configure_drive_for_dma - set up for DMA transfers 201 * it8213_configure_drive_for_dma - set up for DMA transfers
237 * @drive: drive we are going to set up 202 * @drive: drive we are going to set up
@@ -246,7 +211,7 @@ static int it8213_config_drive_for_dma (ide_drive_t *drive)
246{ 211{
247 u8 pio; 212 u8 pio;
248 213
249 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 214 if (ide_tune_dma(drive))
250 return 0; 215 return 0;
251 216
252 pio = ide_get_best_pio_mode(drive, 255, 4, NULL); 217 pio = ide_get_best_pio_mode(drive, 255, 4, NULL);
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 4e1254813ee0..442f658c6ae7 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -229,22 +229,6 @@ static void it821x_clock_strategy(ide_drive_t *drive)
229} 229}
230 230
231/** 231/**
232 * it821x_ratemask - Compute available modes
233 * @drive: IDE drive
234 *
235 * Compute the available speeds for the devices on the interface. This
236 * is all modes to ATA133 clipped by drive cable setup.
237 */
238
239static u8 it821x_ratemask (ide_drive_t *drive)
240{
241 u8 mode = 4;
242 if (!eighty_ninty_three(drive))
243 mode = min(mode, (u8)1);
244 return mode;
245}
246
247/**
248 * it821x_tunepio - tune a drive 232 * it821x_tunepio - tune a drive
249 * @drive: drive to tune 233 * @drive: drive to tune
250 * @pio: the desired PIO mode 234 * @pio: the desired PIO mode
@@ -438,7 +422,7 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed)
438 422
439 ide_hwif_t *hwif = drive->hwif; 423 ide_hwif_t *hwif = drive->hwif;
440 struct it821x_dev *itdev = ide_get_hwifdata(hwif); 424 struct it821x_dev *itdev = ide_get_hwifdata(hwif);
441 u8 speed = ide_rate_filter(it821x_ratemask(drive), xferspeed); 425 u8 speed = ide_rate_filter(drive, xferspeed);
442 426
443 switch (speed) { 427 switch (speed) {
444 case XFER_PIO_4: 428 case XFER_PIO_4:
@@ -488,7 +472,7 @@ static int it821x_tune_chipset (ide_drive_t *drive, byte xferspeed)
488 472
489static int config_chipset_for_dma (ide_drive_t *drive) 473static int config_chipset_for_dma (ide_drive_t *drive)
490{ 474{
491 u8 speed = ide_dma_speed(drive, it821x_ratemask(drive)); 475 u8 speed = ide_max_dma_mode(drive);
492 476
493 if (speed == 0) 477 if (speed == 0)
494 return 0; 478 return 0;
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index be4fc96c29e0..76ed25147229 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -22,22 +22,6 @@ typedef enum {
22} port_type; 22} port_type;
23 23
24/** 24/**
25 * jmicron_ratemask - Compute available modes
26 * @drive: IDE drive
27 *
28 * Compute the available speeds for the devices on the interface. This
29 * is all modes to ATA133 clipped by drive cable setup.
30 */
31
32static u8 jmicron_ratemask(ide_drive_t *drive)
33{
34 u8 mode = 4;
35 if (!eighty_ninty_three(drive))
36 mode = min(mode, (u8)1);
37 return mode;
38}
39
40/**
41 * ata66_jmicron - Cable check 25 * ata66_jmicron - Cable check
42 * @hwif: IDE port 26 * @hwif: IDE port
43 * 27 *
@@ -129,32 +113,12 @@ static void config_jmicron_chipset_for_pio (ide_drive_t *drive, byte set_speed)
129 113
130static int jmicron_tune_chipset (ide_drive_t *drive, byte xferspeed) 114static int jmicron_tune_chipset (ide_drive_t *drive, byte xferspeed)
131{ 115{
132 116 u8 speed = ide_rate_filter(drive, xferspeed);
133 u8 speed = ide_rate_filter(jmicron_ratemask(drive), xferspeed);
134 117
135 return ide_config_drive_speed(drive, speed); 118 return ide_config_drive_speed(drive, speed);
136} 119}
137 120
138/** 121/**
139 * config_chipset_for_dma - configure for DMA
140 * @drive: drive to configure
141 *
142 * As the JMicron snoops for timings all we actually need to do is
143 * make sure we don't set an invalid mode.
144 */
145
146static int config_chipset_for_dma (ide_drive_t *drive)
147{
148 u8 speed = ide_dma_speed(drive, jmicron_ratemask(drive));
149
150 if (!speed)
151 return 0;
152
153 jmicron_tune_chipset(drive, speed);
154 return ide_dma_enable(drive);
155}
156
157/**
158 * jmicron_configure_drive_for_dma - set up for DMA transfers 122 * jmicron_configure_drive_for_dma - set up for DMA transfers
159 * @drive: drive we are going to set up 123 * @drive: drive we are going to set up
160 * 124 *
@@ -164,7 +128,7 @@ static int config_chipset_for_dma (ide_drive_t *drive)
164 128
165static int jmicron_config_drive_for_dma (ide_drive_t *drive) 129static int jmicron_config_drive_for_dma (ide_drive_t *drive)
166{ 130{
167 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 131 if (ide_tune_dma(drive))
168 return 0; 132 return 0;
169 133
170 config_jmicron_chipset_for_pio(drive, 1); 134 config_jmicron_chipset_for_pio(drive, 1);
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 2cdd629c653d..65b1e124edf7 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -37,8 +37,6 @@
37#include <asm/pci-bridge.h> 37#include <asm/pci-bridge.h>
38#endif 38#endif
39 39
40#define PDC202_DEBUG_CABLE 0
41
42#undef DEBUG 40#undef DEBUG
43 41
44#ifdef DEBUG 42#ifdef DEBUG
@@ -82,16 +80,6 @@ static u8 max_dma_rate(struct pci_dev *pdev)
82 return mode; 80 return mode;
83} 81}
84 82
85static u8 pdcnew_ratemask(ide_drive_t *drive)
86{
87 u8 mode = max_dma_rate(HWIF(drive)->pci_dev);
88
89 if (!eighty_ninty_three(drive))
90 mode = min_t(u8, mode, 1);
91
92 return mode;
93}
94
95/** 83/**
96 * get_indexed_reg - Get indexed register 84 * get_indexed_reg - Get indexed register
97 * @hwif: for the port address 85 * @hwif: for the port address
@@ -164,7 +152,7 @@ static int pdcnew_tune_chipset(ide_drive_t *drive, u8 speed)
164 u8 adj = (drive->dn & 1) ? 0x08 : 0x00; 152 u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
165 int err; 153 int err;
166 154
167 speed = ide_rate_filter(pdcnew_ratemask(drive), speed); 155 speed = ide_rate_filter(drive, speed);
168 156
169 /* 157 /*
170 * Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will 158 * Issue SETFEATURES_XFER to the drive first. PDC202xx hardware will
@@ -244,17 +232,8 @@ static int config_chipset_for_dma(ide_drive_t *drive)
244{ 232{
245 struct hd_driveid *id = drive->id; 233 struct hd_driveid *id = drive->id;
246 ide_hwif_t *hwif = HWIF(drive); 234 ide_hwif_t *hwif = HWIF(drive);
247 u8 ultra_66 = (id->dma_ultra & 0x0078) ? 1 : 0;
248 u8 cable = pdcnew_cable_detect(hwif);
249 u8 speed; 235 u8 speed;
250 236
251 if (ultra_66 && cable) {
252 printk(KERN_WARNING "Warning: %s channel "
253 "requires an 80-pin cable for operation.\n",
254 hwif->channel ? "Secondary" : "Primary");
255 printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name);
256 }
257
258 if (id->capability & 4) { 237 if (id->capability & 4) {
259 /* 238 /*
260 * Set IORDY_EN & PREFETCH_EN (this seems to have 239 * Set IORDY_EN & PREFETCH_EN (this seems to have
@@ -267,7 +246,7 @@ static int config_chipset_for_dma(ide_drive_t *drive)
267 set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03); 246 set_indexed_reg(hwif, 0x13 + adj, tmp | 0x03);
268 } 247 }
269 248
270 speed = ide_dma_speed(drive, pdcnew_ratemask(drive)); 249 speed = ide_max_dma_mode(drive);
271 250
272 if (!speed) 251 if (!speed)
273 return 0; 252 return 0;
@@ -543,7 +522,8 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
543 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 522 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
544 523
545 hwif->atapi_dma = 1; 524 hwif->atapi_dma = 1;
546 hwif->ultra_mask = 0x7f; 525
526 hwif->ultra_mask = hwif->cds->udma_mask;
547 hwif->mwdma_mask = 0x07; 527 hwif->mwdma_mask = 0x07;
548 528
549 hwif->err_stops_fifo = 1; 529 hwif->err_stops_fifo = 1;
@@ -556,11 +536,6 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
556 if (!noautodma) 536 if (!noautodma)
557 hwif->autodma = 1; 537 hwif->autodma = 1;
558 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; 538 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
559
560#if PDC202_DEBUG_CABLE
561 printk(KERN_DEBUG "%s: %s-pin cable\n",
562 hwif->name, hwif->udma_four ? "80" : "40");
563#endif /* PDC202_DEBUG_CABLE */
564} 539}
565 540
566static int __devinit init_setup_pdcnew(struct pci_dev *dev, ide_pci_device_t *d) 541static int __devinit init_setup_pdcnew(struct pci_dev *dev, ide_pci_device_t *d)
@@ -619,6 +594,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
619 .channels = 2, 594 .channels = 2,
620 .autodma = AUTODMA, 595 .autodma = AUTODMA,
621 .bootable = OFF_BOARD, 596 .bootable = OFF_BOARD,
597 .udma_mask = 0x3f, /* udma0-5 */
622 },{ /* 1 */ 598 },{ /* 1 */
623 .name = "PDC20269", 599 .name = "PDC20269",
624 .init_setup = init_setup_pdcnew, 600 .init_setup = init_setup_pdcnew,
@@ -627,6 +603,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
627 .channels = 2, 603 .channels = 2,
628 .autodma = AUTODMA, 604 .autodma = AUTODMA,
629 .bootable = OFF_BOARD, 605 .bootable = OFF_BOARD,
606 .udma_mask = 0x7f, /* udma0-6*/
630 },{ /* 2 */ 607 },{ /* 2 */
631 .name = "PDC20270", 608 .name = "PDC20270",
632 .init_setup = init_setup_pdc20270, 609 .init_setup = init_setup_pdc20270,
@@ -635,6 +612,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
635 .channels = 2, 612 .channels = 2,
636 .autodma = AUTODMA, 613 .autodma = AUTODMA,
637 .bootable = OFF_BOARD, 614 .bootable = OFF_BOARD,
615 .udma_mask = 0x3f, /* udma0-5 */
638 },{ /* 3 */ 616 },{ /* 3 */
639 .name = "PDC20271", 617 .name = "PDC20271",
640 .init_setup = init_setup_pdcnew, 618 .init_setup = init_setup_pdcnew,
@@ -643,6 +621,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
643 .channels = 2, 621 .channels = 2,
644 .autodma = AUTODMA, 622 .autodma = AUTODMA,
645 .bootable = OFF_BOARD, 623 .bootable = OFF_BOARD,
624 .udma_mask = 0x7f, /* udma0-6*/
646 },{ /* 4 */ 625 },{ /* 4 */
647 .name = "PDC20275", 626 .name = "PDC20275",
648 .init_setup = init_setup_pdcnew, 627 .init_setup = init_setup_pdcnew,
@@ -651,6 +630,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
651 .channels = 2, 630 .channels = 2,
652 .autodma = AUTODMA, 631 .autodma = AUTODMA,
653 .bootable = OFF_BOARD, 632 .bootable = OFF_BOARD,
633 .udma_mask = 0x7f, /* udma0-6*/
654 },{ /* 5 */ 634 },{ /* 5 */
655 .name = "PDC20276", 635 .name = "PDC20276",
656 .init_setup = init_setup_pdc20276, 636 .init_setup = init_setup_pdc20276,
@@ -659,6 +639,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
659 .channels = 2, 639 .channels = 2,
660 .autodma = AUTODMA, 640 .autodma = AUTODMA,
661 .bootable = OFF_BOARD, 641 .bootable = OFF_BOARD,
642 .udma_mask = 0x7f, /* udma0-6*/
662 },{ /* 6 */ 643 },{ /* 6 */
663 .name = "PDC20277", 644 .name = "PDC20277",
664 .init_setup = init_setup_pdcnew, 645 .init_setup = init_setup_pdcnew,
@@ -667,6 +648,7 @@ static ide_pci_device_t pdcnew_chipsets[] __devinitdata = {
667 .channels = 2, 648 .channels = 2,
668 .autodma = AUTODMA, 649 .autodma = AUTODMA,
669 .bootable = OFF_BOARD, 650 .bootable = OFF_BOARD,
651 .udma_mask = 0x7f, /* udma0-6*/
670 } 652 }
671}; 653};
672 654
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index a7a639fe1eaf..7146fe3f6ba7 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -46,7 +46,6 @@
46#include <asm/io.h> 46#include <asm/io.h>
47#include <asm/irq.h> 47#include <asm/irq.h>
48 48
49#define PDC202_DEBUG_CABLE 0
50#define PDC202XX_DEBUG_DRIVE_INFO 0 49#define PDC202XX_DEBUG_DRIVE_INFO 0
51 50
52static const char *pdc_quirk_drives[] = { 51static const char *pdc_quirk_drives[] = {
@@ -101,35 +100,12 @@ static const char *pdc_quirk_drives[] = {
101#define MC1 0x02 /* DMA"C" timing */ 100#define MC1 0x02 /* DMA"C" timing */
102#define MC0 0x01 /* DMA"C" timing */ 101#define MC0 0x01 /* DMA"C" timing */
103 102
104static u8 pdc202xx_ratemask (ide_drive_t *drive)
105{
106 u8 mode;
107
108 switch(HWIF(drive)->pci_dev->device) {
109 case PCI_DEVICE_ID_PROMISE_20267:
110 case PCI_DEVICE_ID_PROMISE_20265:
111 mode = 3;
112 break;
113 case PCI_DEVICE_ID_PROMISE_20263:
114 case PCI_DEVICE_ID_PROMISE_20262:
115 mode = 2;
116 break;
117 case PCI_DEVICE_ID_PROMISE_20246:
118 return 1;
119 default:
120 return 0;
121 }
122 if (!eighty_ninty_three(drive))
123 mode = min(mode, (u8)1);
124 return mode;
125}
126
127static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed) 103static int pdc202xx_tune_chipset (ide_drive_t *drive, u8 xferspeed)
128{ 104{
129 ide_hwif_t *hwif = HWIF(drive); 105 ide_hwif_t *hwif = HWIF(drive);
130 struct pci_dev *dev = hwif->pci_dev; 106 struct pci_dev *dev = hwif->pci_dev;
131 u8 drive_pci = 0x60 + (drive->dn << 2); 107 u8 drive_pci = 0x60 + (drive->dn << 2);
132 u8 speed = ide_rate_filter(pdc202xx_ratemask(drive), xferspeed); 108 u8 speed = ide_rate_filter(drive, xferspeed);
133 109
134 u32 drive_conf; 110 u32 drive_conf;
135 u8 AP, BP, CP, DP; 111 u8 AP, BP, CP, DP;
@@ -261,20 +237,7 @@ static int config_chipset_for_dma (ide_drive_t *drive)
261 u32 drive_conf = 0; 237 u32 drive_conf = 0;
262 u8 drive_pci = 0x60 + (drive->dn << 2); 238 u8 drive_pci = 0x60 + (drive->dn << 2);
263 u8 test1 = 0, test2 = 0, speed = -1; 239 u8 test1 = 0, test2 = 0, speed = -1;
264 u8 AP = 0, cable = 0; 240 u8 AP = 0;
265
266 u8 ultra_66 = ((id->dma_ultra & 0x0010) ||
267 (id->dma_ultra & 0x0008)) ? 1 : 0;
268
269 if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
270 cable = pdc202xx_old_cable_detect(hwif);
271 else
272 ultra_66 = 0;
273
274 if (ultra_66 && cable) {
275 printk(KERN_WARNING "Warning: %s channel requires an 80-pin cable for operation.\n", hwif->channel ? "Secondary":"Primary");
276 printk(KERN_WARNING "%s reduced to Ultra33 mode.\n", drive->name);
277 }
278 241
279 if (dev->device != PCI_DEVICE_ID_PROMISE_20246) 242 if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
280 pdc_old_disable_66MHz_clock(drive->hwif); 243 pdc_old_disable_66MHz_clock(drive->hwif);
@@ -308,7 +271,7 @@ chipset_is_set:
308 if (drive->media == ide_disk) /* PREFETCH_EN */ 271 if (drive->media == ide_disk) /* PREFETCH_EN */
309 pci_write_config_byte(dev, (drive_pci), AP|PREFETCH_EN); 272 pci_write_config_byte(dev, (drive_pci), AP|PREFETCH_EN);
310 273
311 speed = ide_dma_speed(drive, pdc202xx_ratemask(drive)); 274 speed = ide_max_dma_mode(drive);
312 275
313 if (!(speed)) { 276 if (!(speed)) {
314 /* restore original pci-config space */ 277 /* restore original pci-config space */
@@ -478,7 +441,7 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
478 441
479 hwif->drives[0].autotune = hwif->drives[1].autotune = 1; 442 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
480 443
481 hwif->ultra_mask = 0x3f; 444 hwif->ultra_mask = hwif->cds->udma_mask;
482 hwif->mwdma_mask = 0x07; 445 hwif->mwdma_mask = 0x07;
483 hwif->swdma_mask = 0x07; 446 hwif->swdma_mask = 0x07;
484 hwif->atapi_dma = 1; 447 hwif->atapi_dma = 1;
@@ -500,10 +463,6 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
500 if (!noautodma) 463 if (!noautodma)
501 hwif->autodma = 1; 464 hwif->autodma = 1;
502 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma; 465 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
503#if PDC202_DEBUG_CABLE
504 printk(KERN_DEBUG "%s: %s-pin cable\n",
505 hwif->name, hwif->udma_four ? "80" : "40");
506#endif /* PDC202_DEBUG_CABLE */
507} 466}
508 467
509static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase) 468static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
@@ -587,6 +546,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
587 .autodma = AUTODMA, 546 .autodma = AUTODMA,
588 .bootable = OFF_BOARD, 547 .bootable = OFF_BOARD,
589 .extra = 16, 548 .extra = 16,
549 .udma_mask = 0x07, /* udma0-2 */
590 },{ /* 1 */ 550 },{ /* 1 */
591 .name = "PDC20262", 551 .name = "PDC20262",
592 .init_setup = init_setup_pdc202ata4, 552 .init_setup = init_setup_pdc202ata4,
@@ -597,6 +557,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
597 .autodma = AUTODMA, 557 .autodma = AUTODMA,
598 .bootable = OFF_BOARD, 558 .bootable = OFF_BOARD,
599 .extra = 48, 559 .extra = 48,
560 .udma_mask = 0x1f, /* udma0-4 */
600 },{ /* 2 */ 561 },{ /* 2 */
601 .name = "PDC20263", 562 .name = "PDC20263",
602 .init_setup = init_setup_pdc202ata4, 563 .init_setup = init_setup_pdc202ata4,
@@ -607,6 +568,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
607 .autodma = AUTODMA, 568 .autodma = AUTODMA,
608 .bootable = OFF_BOARD, 569 .bootable = OFF_BOARD,
609 .extra = 48, 570 .extra = 48,
571 .udma_mask = 0x1f, /* udma0-4 */
610 },{ /* 3 */ 572 },{ /* 3 */
611 .name = "PDC20265", 573 .name = "PDC20265",
612 .init_setup = init_setup_pdc20265, 574 .init_setup = init_setup_pdc20265,
@@ -617,6 +579,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
617 .autodma = AUTODMA, 579 .autodma = AUTODMA,
618 .bootable = OFF_BOARD, 580 .bootable = OFF_BOARD,
619 .extra = 48, 581 .extra = 48,
582 .udma_mask = 0x3f, /* udma0-5 */
620 },{ /* 4 */ 583 },{ /* 4 */
621 .name = "PDC20267", 584 .name = "PDC20267",
622 .init_setup = init_setup_pdc202xx, 585 .init_setup = init_setup_pdc202xx,
@@ -627,6 +590,7 @@ static ide_pci_device_t pdc202xx_chipsets[] __devinitdata = {
627 .autodma = AUTODMA, 590 .autodma = AUTODMA,
628 .bootable = OFF_BOARD, 591 .bootable = OFF_BOARD,
629 .extra = 48, 592 .extra = 48,
593 .udma_mask = 0x3f, /* udma0-5 */
630 } 594 }
631}; 595};
632 596
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 061d300ab8be..8b219dd63024 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -106,68 +106,6 @@
106static int no_piix_dma; 106static int no_piix_dma;
107 107
108/** 108/**
109 * piix_ratemask - compute rate mask for PIIX IDE
110 * @drive: IDE drive to compute for
111 *
112 * Returns the available modes for the PIIX IDE controller.
113 */
114
115static u8 piix_ratemask (ide_drive_t *drive)
116{
117 struct pci_dev *dev = HWIF(drive)->pci_dev;
118 u8 mode;
119
120 switch(dev->device) {
121 case PCI_DEVICE_ID_INTEL_82801EB_1:
122 mode = 3;
123 break;
124 /* UDMA 100 capable */
125 case PCI_DEVICE_ID_INTEL_82801BA_8:
126 case PCI_DEVICE_ID_INTEL_82801BA_9:
127 case PCI_DEVICE_ID_INTEL_82801CA_10:
128 case PCI_DEVICE_ID_INTEL_82801CA_11:
129 case PCI_DEVICE_ID_INTEL_82801E_11:
130 case PCI_DEVICE_ID_INTEL_82801DB_1:
131 case PCI_DEVICE_ID_INTEL_82801DB_10:
132 case PCI_DEVICE_ID_INTEL_82801DB_11:
133 case PCI_DEVICE_ID_INTEL_82801EB_11:
134 case PCI_DEVICE_ID_INTEL_ESB_2:
135 case PCI_DEVICE_ID_INTEL_ICH6_19:
136 case PCI_DEVICE_ID_INTEL_ICH7_21:
137 case PCI_DEVICE_ID_INTEL_ESB2_18:
138 case PCI_DEVICE_ID_INTEL_ICH8_6:
139 mode = 3;
140 break;
141 /* UDMA 66 capable */
142 case PCI_DEVICE_ID_INTEL_82801AA_1:
143 case PCI_DEVICE_ID_INTEL_82372FB_1:
144 mode = 2;
145 break;
146 /* UDMA 33 capable */
147 case PCI_DEVICE_ID_INTEL_82371AB:
148 case PCI_DEVICE_ID_INTEL_82443MX_1:
149 case PCI_DEVICE_ID_INTEL_82451NX:
150 case PCI_DEVICE_ID_INTEL_82801AB_1:
151 return 1;
152 /* Non UDMA capable (MWDMA2) */
153 case PCI_DEVICE_ID_INTEL_82371SB_1:
154 case PCI_DEVICE_ID_INTEL_82371FB_1:
155 case PCI_DEVICE_ID_INTEL_82371FB_0:
156 case PCI_DEVICE_ID_INTEL_82371MX:
157 default:
158 return 0;
159 }
160
161 /*
162 * If we are UDMA66 capable fall back to UDMA33
163 * if the drive cannot see an 80pin cable.
164 */
165 if (!eighty_ninty_three(drive))
166 mode = min_t(u8, mode, 1);
167 return mode;
168}
169
170/**
171 * piix_dma_2_pio - return the PIO mode matching DMA 109 * piix_dma_2_pio - return the PIO mode matching DMA
172 * @xfer_rate: transfer speed 110 * @xfer_rate: transfer speed
173 * 111 *
@@ -301,7 +239,7 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
301 ide_hwif_t *hwif = HWIF(drive); 239 ide_hwif_t *hwif = HWIF(drive);
302 struct pci_dev *dev = hwif->pci_dev; 240 struct pci_dev *dev = hwif->pci_dev;
303 u8 maslave = hwif->channel ? 0x42 : 0x40; 241 u8 maslave = hwif->channel ? 0x42 : 0x40;
304 u8 speed = ide_rate_filter(piix_ratemask(drive), xferspeed); 242 u8 speed = ide_rate_filter(drive, xferspeed);
305 int a_speed = 3 << (drive->dn * 4); 243 int a_speed = 3 << (drive->dn * 4);
306 int u_flag = 1 << drive->dn; 244 int u_flag = 1 << drive->dn;
307 int v_flag = 0x01 << drive->dn; 245 int v_flag = 0x01 << drive->dn;
@@ -366,30 +304,6 @@ static int piix_tune_chipset (ide_drive_t *drive, u8 xferspeed)
366} 304}
367 305
368/** 306/**
369 * piix_config_drive_for_dma - configure drive for DMA
370 * @drive: IDE drive to configure
371 *
372 * Set up a PIIX interface channel for the best available speed.
373 * We prefer UDMA if it is available and then MWDMA. If DMA is
374 * not available we switch to PIO and return 0.
375 */
376
377static int piix_config_drive_for_dma (ide_drive_t *drive)
378{
379 u8 speed = ide_dma_speed(drive, piix_ratemask(drive));
380
381 /*
382 * If no DMA speed was available or the chipset has DMA bugs
383 * then disable DMA and use PIO
384 */
385 if (!speed)
386 return 0;
387
388 (void) piix_tune_chipset(drive, speed);
389 return ide_dma_enable(drive);
390}
391
392/**
393 * piix_config_drive_xfer_rate - set up an IDE device 307 * piix_config_drive_xfer_rate - set up an IDE device
394 * @drive: IDE drive to configure 308 * @drive: IDE drive to configure
395 * 309 *
@@ -401,7 +315,7 @@ static int piix_config_drive_xfer_rate (ide_drive_t *drive)
401{ 315{
402 drive->init_speed = 0; 316 drive->init_speed = 0;
403 317
404 if (ide_use_dma(drive) && piix_config_drive_for_dma(drive)) 318 if (ide_tune_dma(drive))
405 return 0; 319 return 0;
406 320
407 if (ide_use_fast_pio(drive)) 321 if (ide_use_fast_pio(drive))
@@ -524,26 +438,14 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
524 hwif->ide_dma_clear_irq = &piix_dma_clear_irq; 438 hwif->ide_dma_clear_irq = &piix_dma_clear_irq;
525 439
526 hwif->atapi_dma = 1; 440 hwif->atapi_dma = 1;
527 hwif->ultra_mask = 0x3f; 441
442 hwif->ultra_mask = hwif->cds->udma_mask;
528 hwif->mwdma_mask = 0x06; 443 hwif->mwdma_mask = 0x06;
529 hwif->swdma_mask = 0x04; 444 hwif->swdma_mask = 0x04;
530 445
531 switch(hwif->pci_dev->device) { 446 if (hwif->ultra_mask & 0x78) {
532 case PCI_DEVICE_ID_INTEL_82371FB_0: 447 if (!hwif->udma_four)
533 case PCI_DEVICE_ID_INTEL_82371FB_1: 448 hwif->udma_four = piix_cable_detect(hwif);
534 case PCI_DEVICE_ID_INTEL_82371SB_1:
535 hwif->ultra_mask = 0x80;
536 break;
537 case PCI_DEVICE_ID_INTEL_82371AB:
538 case PCI_DEVICE_ID_INTEL_82443MX_1:
539 case PCI_DEVICE_ID_INTEL_82451NX:
540 case PCI_DEVICE_ID_INTEL_82801AB_1:
541 hwif->ultra_mask = 0x07;
542 break;
543 default:
544 if (!hwif->udma_four)
545 hwif->udma_four = piix_cable_detect(hwif);
546 break;
547 } 449 }
548 450
549 if (no_piix_dma) 451 if (no_piix_dma)
@@ -557,7 +459,7 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
557 hwif->drives[0].autodma = hwif->autodma; 459 hwif->drives[0].autodma = hwif->autodma;
558} 460}
559 461
560#define DECLARE_PIIX_DEV(name_str) \ 462#define DECLARE_PIIX_DEV(name_str, udma) \
561 { \ 463 { \
562 .name = name_str, \ 464 .name = name_str, \
563 .init_chipset = init_chipset_piix, \ 465 .init_chipset = init_chipset_piix, \
@@ -566,11 +468,12 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
566 .autodma = AUTODMA, \ 468 .autodma = AUTODMA, \
567 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 469 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
568 .bootable = ON_BOARD, \ 470 .bootable = ON_BOARD, \
471 .udma_mask = udma, \
569 } 472 }
570 473
571static ide_pci_device_t piix_pci_info[] __devinitdata = { 474static ide_pci_device_t piix_pci_info[] __devinitdata = {
572 /* 0 */ DECLARE_PIIX_DEV("PIIXa"), 475 /* 0 */ DECLARE_PIIX_DEV("PIIXa", 0x00), /* no udma */
573 /* 1 */ DECLARE_PIIX_DEV("PIIXb"), 476 /* 1 */ DECLARE_PIIX_DEV("PIIXb", 0x00), /* no udma */
574 477
575 /* 2 */ 478 /* 2 */
576 { /* 479 { /*
@@ -587,28 +490,28 @@ static ide_pci_device_t piix_pci_info[] __devinitdata = {
587 .flags = IDEPCI_FLAG_ISA_PORTS 490 .flags = IDEPCI_FLAG_ISA_PORTS
588 }, 491 },
589 492
590 /* 3 */ DECLARE_PIIX_DEV("PIIX3"), 493 /* 3 */ DECLARE_PIIX_DEV("PIIX3", 0x00), /* no udma */
591 /* 4 */ DECLARE_PIIX_DEV("PIIX4"), 494 /* 4 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */
592 /* 5 */ DECLARE_PIIX_DEV("ICH0"), 495 /* 5 */ DECLARE_PIIX_DEV("ICH0", 0x07), /* udma0-2 */
593 /* 6 */ DECLARE_PIIX_DEV("PIIX4"), 496 /* 6 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */
594 /* 7 */ DECLARE_PIIX_DEV("ICH"), 497 /* 7 */ DECLARE_PIIX_DEV("ICH", 0x1f), /* udma0-4 */
595 /* 8 */ DECLARE_PIIX_DEV("PIIX4"), 498 /* 8 */ DECLARE_PIIX_DEV("PIIX4", 0x1f), /* udma0-4 */
596 /* 9 */ DECLARE_PIIX_DEV("PIIX4"), 499 /* 9 */ DECLARE_PIIX_DEV("PIIX4", 0x07), /* udma0-2 */
597 /* 10 */ DECLARE_PIIX_DEV("ICH2"), 500 /* 10 */ DECLARE_PIIX_DEV("ICH2", 0x3f), /* udma0-5 */
598 /* 11 */ DECLARE_PIIX_DEV("ICH2M"), 501 /* 11 */ DECLARE_PIIX_DEV("ICH2M", 0x3f), /* udma0-5 */
599 /* 12 */ DECLARE_PIIX_DEV("ICH3M"), 502 /* 12 */ DECLARE_PIIX_DEV("ICH3M", 0x3f), /* udma0-5 */
600 /* 13 */ DECLARE_PIIX_DEV("ICH3"), 503 /* 13 */ DECLARE_PIIX_DEV("ICH3", 0x3f), /* udma0-5 */
601 /* 14 */ DECLARE_PIIX_DEV("ICH4"), 504 /* 14 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */
602 /* 15 */ DECLARE_PIIX_DEV("ICH5"), 505 /* 15 */ DECLARE_PIIX_DEV("ICH5", 0x3f), /* udma0-5 */
603 /* 16 */ DECLARE_PIIX_DEV("C-ICH"), 506 /* 16 */ DECLARE_PIIX_DEV("C-ICH", 0x3f), /* udma0-5 */
604 /* 17 */ DECLARE_PIIX_DEV("ICH4"), 507 /* 17 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */
605 /* 18 */ DECLARE_PIIX_DEV("ICH5-SATA"), 508 /* 18 */ DECLARE_PIIX_DEV("ICH5-SATA", 0x3f), /* udma0-5 */
606 /* 19 */ DECLARE_PIIX_DEV("ICH5"), 509 /* 19 */ DECLARE_PIIX_DEV("ICH5", 0x3f), /* udma0-5 */
607 /* 20 */ DECLARE_PIIX_DEV("ICH6"), 510 /* 20 */ DECLARE_PIIX_DEV("ICH6", 0x3f), /* udma0-5 */
608 /* 21 */ DECLARE_PIIX_DEV("ICH7"), 511 /* 21 */ DECLARE_PIIX_DEV("ICH7", 0x3f), /* udma0-5 */
609 /* 22 */ DECLARE_PIIX_DEV("ICH4"), 512 /* 22 */ DECLARE_PIIX_DEV("ICH4", 0x3f), /* udma0-5 */
610 /* 23 */ DECLARE_PIIX_DEV("ESB2"), 513 /* 23 */ DECLARE_PIIX_DEV("ESB2", 0x3f), /* udma0-5 */
611 /* 24 */ DECLARE_PIIX_DEV("ICH8M"), 514 /* 24 */ DECLARE_PIIX_DEV("ICH8M", 0x3f), /* udma0-5 */
612}; 515};
613 516
614/** 517/**
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index f84bf791f72e..cbf936325355 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -190,23 +190,6 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
190} 190}
191 191
192/** 192/**
193 * scc_ratemask - Compute available modes
194 * @drive: IDE drive
195 *
196 * Compute the available speeds for the devices on the interface.
197 * Enforce UDMA33 as a limit if there is no 80pin cable present.
198 */
199
200static u8 scc_ratemask(ide_drive_t *drive)
201{
202 u8 mode = 4;
203
204 if (!eighty_ninty_three(drive))
205 mode = min(mode, (u8)1);
206 return mode;
207}
208
209/**
210 * scc_tuneproc - tune a drive PIO mode 193 * scc_tuneproc - tune a drive PIO mode
211 * @drive: drive to tune 194 * @drive: drive to tune
212 * @mode_wanted: the target operating mode 195 * @mode_wanted: the target operating mode
@@ -273,7 +256,7 @@ static void scc_tuneproc(ide_drive_t *drive, byte mode_wanted)
273static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed) 256static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed)
274{ 257{
275 ide_hwif_t *hwif = HWIF(drive); 258 ide_hwif_t *hwif = HWIF(drive);
276 u8 speed = ide_rate_filter(scc_ratemask(drive), xferspeed); 259 u8 speed = ide_rate_filter(drive, xferspeed);
277 struct scc_ports *ports = ide_get_hwifdata(hwif); 260 struct scc_ports *ports = ide_get_hwifdata(hwif);
278 unsigned long ctl_base = ports->ctl; 261 unsigned long ctl_base = ports->ctl;
279 unsigned long cckctrl_port = ctl_base + 0xff0; 262 unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -347,7 +330,7 @@ static int scc_tune_chipset(ide_drive_t *drive, byte xferspeed)
347 330
348static int scc_config_chipset_for_dma(ide_drive_t *drive) 331static int scc_config_chipset_for_dma(ide_drive_t *drive)
349{ 332{
350 u8 speed = ide_dma_speed(drive, scc_ratemask(drive)); 333 u8 speed = ide_max_dma_mode(drive);
351 334
352 if (!speed) 335 if (!speed)
353 return 0; 336 return 0;
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index dbcd37a0c652..2fa6d92d16cc 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -65,16 +65,16 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
65 return 0; 65 return 0;
66} 66}
67 67
68static u8 svwks_ratemask (ide_drive_t *drive) 68static u8 svwks_udma_filter(ide_drive_t *drive)
69{ 69{
70 struct pci_dev *dev = HWIF(drive)->pci_dev; 70 struct pci_dev *dev = HWIF(drive)->pci_dev;
71 u8 mode = 0; 71 u8 mask = 0;
72 72
73 if (!svwks_revision) 73 if (!svwks_revision)
74 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); 74 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
75 75
76 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) 76 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
77 return 2; 77 return 0x1f;
78 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 78 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
79 u32 reg = 0; 79 u32 reg = 0;
80 if (isa_dev) 80 if (isa_dev)
@@ -86,25 +86,31 @@ static u8 svwks_ratemask (ide_drive_t *drive)
86 if(drive->media == ide_disk) 86 if(drive->media == ide_disk)
87 return 0; 87 return 0;
88 /* Check the OSB4 DMA33 enable bit */ 88 /* Check the OSB4 DMA33 enable bit */
89 return ((reg & 0x00004000) == 0x00004000) ? 1 : 0; 89 return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
90 } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) { 90 } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) {
91 return 1; 91 return 0x07;
92 } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) { 92 } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) {
93 u8 btr = 0; 93 u8 btr = 0, mode;
94 pci_read_config_byte(dev, 0x5A, &btr); 94 pci_read_config_byte(dev, 0x5A, &btr);
95 mode = btr & 0x3; 95 mode = btr & 0x3;
96 if (!eighty_ninty_three(drive)) 96
97 mode = min(mode, (u8)1);
98 /* If someone decides to do UDMA133 on CSB5 the same 97 /* If someone decides to do UDMA133 on CSB5 the same
99 issue will bite so be inclusive */ 98 issue will bite so be inclusive */
100 if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100)) 99 if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100))
101 mode = 2; 100 mode = 2;
101
102 switch(mode) {
103 case 2: mask = 0x1f; break;
104 case 1: mask = 0x07; break;
105 default: mask = 0x00; break;
106 }
102 } 107 }
103 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 108 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
104 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && 109 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
105 (!(PCI_FUNC(dev->devfn) & 1))) 110 (!(PCI_FUNC(dev->devfn) & 1)))
106 mode = 2; 111 mask = 0x1f;
107 return mode; 112
113 return mask;
108} 114}
109 115
110static u8 svwks_csb_check (struct pci_dev *dev) 116static u8 svwks_csb_check (struct pci_dev *dev)
@@ -141,7 +147,7 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
141 if (xferspeed == 255) /* PIO auto-tuning */ 147 if (xferspeed == 255) /* PIO auto-tuning */
142 speed = XFER_PIO_0 + pio; 148 speed = XFER_PIO_0 + pio;
143 else 149 else
144 speed = ide_rate_filter(svwks_ratemask(drive), xferspeed); 150 speed = ide_rate_filter(drive, xferspeed);
145 151
146 /* If we are about to put a disk into UDMA mode we screwed up. 152 /* If we are about to put a disk into UDMA mode we screwed up.
147 Our code assumes we never _ever_ do this on an OSB4 */ 153 Our code assumes we never _ever_ do this on an OSB4 */
@@ -304,7 +310,7 @@ static void svwks_tune_drive (ide_drive_t *drive, u8 pio)
304 310
305static int config_chipset_for_dma (ide_drive_t *drive) 311static int config_chipset_for_dma (ide_drive_t *drive)
306{ 312{
307 u8 speed = ide_dma_speed(drive, svwks_ratemask(drive)); 313 u8 speed = ide_max_dma_mode(drive);
308 314
309 if (!(speed)) 315 if (!(speed))
310 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL); 316 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
@@ -500,6 +506,7 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
500 506
501 hwif->tuneproc = &svwks_tune_drive; 507 hwif->tuneproc = &svwks_tune_drive;
502 hwif->speedproc = &svwks_tune_chipset; 508 hwif->speedproc = &svwks_tune_chipset;
509 hwif->udma_filter = &svwks_udma_filter;
503 510
504 hwif->atapi_dma = 1; 511 hwif->atapi_dma = 1;
505 512
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index fd09b295a69d..d3185e29a38e 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -692,7 +692,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
692 return -EIO; 692 return -EIO;
693 693
694 /* Create /proc/ide entries */ 694 /* Create /proc/ide entries */
695 create_proc_ide_interfaces(); 695 ide_proc_register_port(hwif);
696 696
697 return 0; 697 return 0;
698} 698}
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index c0188de3cc66..d09e74c2996e 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -122,45 +122,41 @@ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
122} 122}
123 123
124/** 124/**
125 * siimage_ratemask - Compute available modes 125 * sil_udma_filter - compute UDMA mask
126 * @drive: IDE drive 126 * @drive: IDE device
127 *
128 * Compute the available UDMA speeds for the device on the interface.
127 * 129 *
128 * Compute the available speeds for the devices on the interface.
129 * For the CMD680 this depends on the clocking mode (scsc), for the 130 * For the CMD680 this depends on the clocking mode (scsc), for the
130 * SI3312 SATA controller life is a bit simpler. Enforce UDMA33 131 * SI3112 SATA controller life is a bit simpler.
131 * as a limit if there is no 80pin cable present.
132 */ 132 */
133 133
134static byte siimage_ratemask (ide_drive_t *drive) 134static u8 sil_udma_filter(ide_drive_t *drive)
135{ 135{
136 ide_hwif_t *hwif = HWIF(drive); 136 ide_hwif_t *hwif = drive->hwif;
137 u8 mode = 0, scsc = 0;
138 unsigned long base = (unsigned long) hwif->hwif_data; 137 unsigned long base = (unsigned long) hwif->hwif_data;
138 u8 mask = 0, scsc = 0;
139 139
140 if (hwif->mmio) 140 if (hwif->mmio)
141 scsc = hwif->INB(base + 0x4A); 141 scsc = hwif->INB(base + 0x4A);
142 else 142 else
143 pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc); 143 pci_read_config_byte(hwif->pci_dev, 0x8A, &scsc);
144 144
145 if(is_sata(hwif)) 145 if (is_sata(hwif)) {
146 { 146 mask = strstr(drive->id->model, "Maxtor") ? 0x3f : 0x7f;
147 if(strstr(drive->id->model, "Maxtor")) 147 goto out;
148 return 3;
149 return 4;
150 } 148 }
151 149
152 if ((scsc & 0x30) == 0x10) /* 133 */ 150 if ((scsc & 0x30) == 0x10) /* 133 */
153 mode = 4; 151 mask = 0x7f;
154 else if ((scsc & 0x30) == 0x20) /* 2xPCI */ 152 else if ((scsc & 0x30) == 0x20) /* 2xPCI */
155 mode = 4; 153 mask = 0x7f;
156 else if ((scsc & 0x30) == 0x00) /* 100 */ 154 else if ((scsc & 0x30) == 0x00) /* 100 */
157 mode = 3; 155 mask = 0x3f;
158 else /* Disabled ? */ 156 else /* Disabled ? */
159 BUG(); 157 BUG();
160 158out:
161 if (!eighty_ninty_three(drive)) 159 return mask;
162 mode = min(mode, (u8)1);
163 return mode;
164} 160}
165 161
166/** 162/**
@@ -306,7 +302,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
306 ide_hwif_t *hwif = HWIF(drive); 302 ide_hwif_t *hwif = HWIF(drive);
307 u16 ultra = 0, multi = 0; 303 u16 ultra = 0, multi = 0;
308 u8 mode = 0, unit = drive->select.b.unit; 304 u8 mode = 0, unit = drive->select.b.unit;
309 u8 speed = ide_rate_filter(siimage_ratemask(drive), xferspeed); 305 u8 speed = ide_rate_filter(drive, xferspeed);
310 unsigned long base = (unsigned long)hwif->hwif_data; 306 unsigned long base = (unsigned long)hwif->hwif_data;
311 u8 scsc = 0, addr_mask = ((hwif->channel) ? 307 u8 scsc = 0, addr_mask = ((hwif->channel) ?
312 ((hwif->mmio) ? 0xF4 : 0x84) : 308 ((hwif->mmio) ? 0xF4 : 0x84) :
@@ -389,7 +385,7 @@ static int siimage_tune_chipset (ide_drive_t *drive, byte xferspeed)
389 385
390static int config_chipset_for_dma (ide_drive_t *drive) 386static int config_chipset_for_dma (ide_drive_t *drive)
391{ 387{
392 u8 speed = ide_dma_speed(drive, siimage_ratemask(drive)); 388 u8 speed = ide_max_dma_mode(drive);
393 389
394 if (!speed) 390 if (!speed)
395 return 0; 391 return 0;
@@ -831,7 +827,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
831 827
832 /* 828 /*
833 * Now set up the hw. We have to do this ourselves as 829 * Now set up the hw. We have to do this ourselves as
834 * the MMIO layout isnt the same as the the standard port 830 * the MMIO layout isnt the same as the standard port
835 * based I/O 831 * based I/O
836 */ 832 */
837 833
@@ -989,6 +985,7 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
989 hwif->tuneproc = &siimage_tuneproc; 985 hwif->tuneproc = &siimage_tuneproc;
990 hwif->reset_poll = &siimage_reset_poll; 986 hwif->reset_poll = &siimage_reset_poll;
991 hwif->pre_reset = &siimage_pre_reset; 987 hwif->pre_reset = &siimage_pre_reset;
988 hwif->udma_filter = &sil_udma_filter;
992 989
993 if(is_sata(hwif)) { 990 if(is_sata(hwif)) {
994 static int first = 1; 991 static int first = 1;
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 2ba0669f36a1..2bde1b92784a 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -191,7 +191,7 @@ static char* chipset_capability[] = {
191 "ATA 133 (1st gen)", "ATA 133 (2nd gen)" 191 "ATA 133 (1st gen)", "ATA 133 (2nd gen)"
192}; 192};
193 193
194#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) 194#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
195#include <linux/stat.h> 195#include <linux/stat.h>
196#include <linux/proc_fs.h> 196#include <linux/proc_fs.h>
197 197
@@ -426,17 +426,7 @@ static int sis_get_info (char *buffer, char **addr, off_t offset, int count)
426 426
427 return len > count ? count : len; 427 return len > count ? count : len;
428} 428}
429#endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) */ 429#endif /* defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
430
431static u8 sis5513_ratemask (ide_drive_t *drive)
432{
433 u8 rates[] = { 0, 0, 1, 2, 3, 3, 4, 4 };
434 u8 mode = rates[chipset_family];
435
436 if (!eighty_ninty_three(drive))
437 mode = min(mode, (u8)1);
438 return mode;
439}
440 430
441/* 431/*
442 * Configuration functions 432 * Configuration functions
@@ -563,7 +553,7 @@ static int sis5513_tune_chipset (ide_drive_t *drive, u8 xferspeed)
563 u8 drive_pci, reg, speed; 553 u8 drive_pci, reg, speed;
564 u32 regdw; 554 u32 regdw;
565 555
566 speed = ide_rate_filter(sis5513_ratemask(drive), xferspeed); 556 speed = ide_rate_filter(drive, xferspeed);
567 557
568 /* See config_art_rwp_pio for drive pci config registers */ 558 /* See config_art_rwp_pio for drive pci config registers */
569 drive_pci = 0x40; 559 drive_pci = 0x40;
@@ -648,32 +638,13 @@ static void sis5513_tune_drive (ide_drive_t *drive, u8 pio)
648 (void) config_chipset_for_pio(drive, pio); 638 (void) config_chipset_for_pio(drive, pio);
649} 639}
650 640
651/*
652 * ((id->hw_config & 0x4000|0x2000) && (HWIF(drive)->udma_four))
653 */
654static int config_chipset_for_dma (ide_drive_t *drive)
655{
656 u8 speed = ide_dma_speed(drive, sis5513_ratemask(drive));
657
658#ifdef DEBUG
659 printk("SIS5513: config_chipset_for_dma, drive %d, ultra %x\n",
660 drive->dn, drive->id->dma_ultra);
661#endif
662
663 if (!(speed))
664 return 0;
665
666 sis5513_tune_chipset(drive, speed);
667 return ide_dma_enable(drive);
668}
669
670static int sis5513_config_xfer_rate(ide_drive_t *drive) 641static int sis5513_config_xfer_rate(ide_drive_t *drive)
671{ 642{
672 config_art_rwp_pio(drive, 5); 643 config_art_rwp_pio(drive, 5);
673 644
674 drive->init_speed = 0; 645 drive->init_speed = 0;
675 646
676 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 647 if (ide_tune_dma(drive))
677 return 0; 648 return 0;
678 649
679 if (ide_use_fast_pio(drive)) 650 if (ide_use_fast_pio(drive))
@@ -826,7 +797,7 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
826 break; 797 break;
827 } 798 }
828 799
829#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_PROC_FS) 800#if defined(DISPLAY_SIS_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
830 if (!sis_proc) { 801 if (!sis_proc) {
831 sis_proc = 1; 802 sis_proc = 1;
832 bmide_dev = dev; 803 bmide_dev = dev;
@@ -858,6 +829,8 @@ static unsigned int __devinit ata66_sis5513 (ide_hwif_t *hwif)
858 829
859static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) 830static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
860{ 831{
832 u8 udma_rates[] = { 0x00, 0x00, 0x07, 0x1f, 0x3f, 0x3f, 0x7f, 0x7f };
833
861 hwif->autodma = 0; 834 hwif->autodma = 0;
862 835
863 if (!hwif->irq) 836 if (!hwif->irq)
@@ -873,7 +846,8 @@ static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
873 } 846 }
874 847
875 hwif->atapi_dma = 1; 848 hwif->atapi_dma = 1;
876 hwif->ultra_mask = 0x7f; 849
850 hwif->ultra_mask = udma_rates[chipset_family];
877 hwif->mwdma_mask = 0x07; 851 hwif->mwdma_mask = 0x07;
878 hwif->swdma_mask = 0x07; 852 hwif->swdma_mask = 0x07;
879 853
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index 852ccb36da1d..c40f291f91e0 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -21,15 +21,6 @@
21 21
22#include <asm/io.h> 22#include <asm/io.h>
23 23
24static u8 slc90e66_ratemask (ide_drive_t *drive)
25{
26 u8 mode = 2;
27
28 if (!eighty_ninty_three(drive))
29 mode = min_t(u8, mode, 1);
30 return mode;
31}
32
33static u8 slc90e66_dma_2_pio (u8 xfer_rate) { 24static u8 slc90e66_dma_2_pio (u8 xfer_rate) {
34 switch(xfer_rate) { 25 switch(xfer_rate) {
35 case XFER_UDMA_4: 26 case XFER_UDMA_4:
@@ -122,7 +113,7 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
122 ide_hwif_t *hwif = HWIF(drive); 113 ide_hwif_t *hwif = HWIF(drive);
123 struct pci_dev *dev = hwif->pci_dev; 114 struct pci_dev *dev = hwif->pci_dev;
124 u8 maslave = hwif->channel ? 0x42 : 0x40; 115 u8 maslave = hwif->channel ? 0x42 : 0x40;
125 u8 speed = ide_rate_filter(slc90e66_ratemask(drive), xferspeed); 116 u8 speed = ide_rate_filter(drive, xferspeed);
126 int sitre = 0, a_speed = 7 << (drive->dn * 4); 117 int sitre = 0, a_speed = 7 << (drive->dn * 4);
127 int u_speed = 0, u_flag = 1 << drive->dn; 118 int u_speed = 0, u_flag = 1 << drive->dn;
128 u16 reg4042, reg44, reg48, reg4a; 119 u16 reg4042, reg44, reg48, reg4a;
@@ -169,22 +160,11 @@ static int slc90e66_tune_chipset (ide_drive_t *drive, u8 xferspeed)
169 return ide_config_drive_speed(drive, speed); 160 return ide_config_drive_speed(drive, speed);
170} 161}
171 162
172static int slc90e66_config_drive_for_dma (ide_drive_t *drive)
173{
174 u8 speed = ide_dma_speed(drive, slc90e66_ratemask(drive));
175
176 if (!speed)
177 return 0;
178
179 (void) slc90e66_tune_chipset(drive, speed);
180 return ide_dma_enable(drive);
181}
182
183static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive) 163static int slc90e66_config_drive_xfer_rate (ide_drive_t *drive)
184{ 164{
185 drive->init_speed = 0; 165 drive->init_speed = 0;
186 166
187 if (ide_use_dma(drive) && slc90e66_config_drive_for_dma(drive)) 167 if (ide_tune_dma(drive))
188 return 0; 168 return 0;
189 169
190 if (ide_use_fast_pio(drive)) 170 if (ide_use_fast_pio(drive))
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 0b6d81d6ce48..cee619bb2eaf 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -13,18 +13,13 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/ide.h> 14#include <linux/ide.h>
15 15
16static inline u8 tc86c001_ratemask(ide_drive_t *drive)
17{
18 return eighty_ninty_three(drive) ? 2 : 1;
19}
20
21static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed) 16static int tc86c001_tune_chipset(ide_drive_t *drive, u8 speed)
22{ 17{
23 ide_hwif_t *hwif = HWIF(drive); 18 ide_hwif_t *hwif = HWIF(drive);
24 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00); 19 unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
25 u16 mode, scr = hwif->INW(scr_port); 20 u16 mode, scr = hwif->INW(scr_port);
26 21
27 speed = ide_rate_filter(tc86c001_ratemask(drive), speed); 22 speed = ide_rate_filter(drive, speed);
28 23
29 switch (speed) { 24 switch (speed) {
30 case XFER_UDMA_4: mode = 0x00c0; break; 25 case XFER_UDMA_4: mode = 0x00c0; break;
@@ -172,20 +167,9 @@ static int tc86c001_busproc(ide_drive_t *drive, int state)
172 return 0; 167 return 0;
173} 168}
174 169
175static int config_chipset_for_dma(ide_drive_t *drive)
176{
177 u8 speed = ide_dma_speed(drive, tc86c001_ratemask(drive));
178
179 if (!speed)
180 return 0;
181
182 (void) tc86c001_tune_chipset(drive, speed);
183 return ide_dma_enable(drive);
184}
185
186static int tc86c001_config_drive_xfer_rate(ide_drive_t *drive) 170static int tc86c001_config_drive_xfer_rate(ide_drive_t *drive)
187{ 171{
188 if (ide_use_dma(drive) && config_chipset_for_dma(drive)) 172 if (ide_tune_dma(drive))
189 return 0; 173 return 0;
190 174
191 if (ide_use_fast_pio(drive)) 175 if (ide_use_fast_pio(drive))
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/pci/triflex.c
index 5e06179c3469..35e8c612638f 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/pci/triflex.c
@@ -48,7 +48,7 @@ static int triflex_tune_chipset(ide_drive_t *drive, u8 xferspeed)
48 u16 timing = 0; 48 u16 timing = 0;
49 u32 triflex_timings = 0; 49 u32 triflex_timings = 0;
50 u8 unit = (drive->select.b.unit & 0x01); 50 u8 unit = (drive->select.b.unit & 0x01);
51 u8 speed = ide_rate_filter(0, xferspeed); 51 u8 speed = ide_rate_filter(drive, xferspeed);
52 52
53 pci_read_config_dword(dev, channel_offset, &triflex_timings); 53 pci_read_config_dword(dev, channel_offset, &triflex_timings);
54 54
@@ -100,20 +100,9 @@ static void triflex_tune_drive(ide_drive_t *drive, u8 pio)
100 (void) triflex_tune_chipset(drive, (XFER_PIO_0 + use_pio)); 100 (void) triflex_tune_chipset(drive, (XFER_PIO_0 + use_pio));
101} 101}
102 102
103static int triflex_config_drive_for_dma(ide_drive_t *drive)
104{
105 int speed = ide_dma_speed(drive, 0); /* No ultra speeds */
106
107 if (!speed)
108 return 0;
109
110 (void) triflex_tune_chipset(drive, speed);
111 return ide_dma_enable(drive);
112}
113
114static int triflex_config_drive_xfer_rate(ide_drive_t *drive) 103static int triflex_config_drive_xfer_rate(ide_drive_t *drive)
115{ 104{
116 if (ide_use_dma(drive) && triflex_config_drive_for_dma(drive)) 105 if (ide_tune_dma(drive))
117 return 0; 106 return 0;
118 107
119 triflex_tune_drive(drive, 255); 108 triflex_tune_drive(drive, 255);
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index a49ebe44babd..45fc36f0f219 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1276,6 +1276,8 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1276 /* We probe the hwif now */ 1276 /* We probe the hwif now */
1277 probe_hwif_init(hwif); 1277 probe_hwif_init(hwif);
1278 1278
1279 ide_proc_register_port(hwif);
1280
1279 return 0; 1281 return 0;
1280} 1282}
1281 1283
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 118fb3205ca8..67035ba4bf5e 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -702,6 +702,7 @@ out:
702 702
703int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d) 703int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d)
704{ 704{
705 ide_hwif_t *hwif = NULL, *mate = NULL;
705 ata_index_t index_list; 706 ata_index_t index_list;
706 int ret; 707 int ret;
707 708
@@ -710,11 +711,19 @@ int ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t *d)
710 goto out; 711 goto out;
711 712
712 if ((index_list.b.low & 0xf0) != 0xf0) 713 if ((index_list.b.low & 0xf0) != 0xf0)
713 probe_hwif_init_with_fixup(&ide_hwifs[index_list.b.low], d->fixup); 714 hwif = &ide_hwifs[index_list.b.low];
714 if ((index_list.b.high & 0xf0) != 0xf0) 715 if ((index_list.b.high & 0xf0) != 0xf0)
715 probe_hwif_init_with_fixup(&ide_hwifs[index_list.b.high], d->fixup); 716 mate = &ide_hwifs[index_list.b.high];
716 717
717 create_proc_ide_interfaces(); 718 if (hwif)
719 probe_hwif_init_with_fixup(hwif, d->fixup);
720 if (mate)
721 probe_hwif_init_with_fixup(mate, d->fixup);
722
723 if (hwif)
724 ide_proc_register_port(hwif);
725 if (mate)
726 ide_proc_register_port(mate);
718out: 727out:
719 return ret; 728 return ret;
720} 729}
@@ -748,13 +757,22 @@ int ide_setup_pci_devices(struct pci_dev *dev1, struct pci_dev *dev2,
748 } 757 }
749 } 758 }
750 759
751 create_proc_ide_interfaces(); 760 for (i = 0; i < 2; i++) {
761 u8 idx[2] = { index_list[i].b.low, index_list[i].b.high };
762 int j;
763
764 for (j = 0; j < 2; j++) {
765 if ((idx[j] & 0xf0) != 0xf0)
766 ide_proc_register_port(ide_hwifs + idx[j]);
767 }
768 }
752out: 769out:
753 return ret; 770 return ret;
754} 771}
755 772
756EXPORT_SYMBOL_GPL(ide_setup_pci_devices); 773EXPORT_SYMBOL_GPL(ide_setup_pci_devices);
757 774
775#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
758/* 776/*
759 * Module interfaces 777 * Module interfaces
760 */ 778 */
@@ -861,3 +879,4 @@ void __init ide_scan_pcibus (int scan_direction)
861 __pci_register_driver(d, d->driver.owner, d->driver.mod_name); 879 __pci_register_driver(d, d->driver.owner, d->driver.mod_name);
862 } 880 }
863} 881}
882#endif
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 61d7809a5a26..8012b3b0ce75 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,4 +1,7 @@
1menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
2 depends on PCI || BROKEN
3
4source "drivers/firewire/Kconfig"
2 5
3config IEEE1394 6config IEEE1394
4 tristate "IEEE 1394 (FireWire) support" 7 tristate "IEEE 1394 (FireWire) support"
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 6a1a0572275e..835937e38529 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1702,7 +1702,7 @@ static int nodemgr_host_thread(void *__hi)
1702 generation = get_hpsb_generation(host); 1702 generation = get_hpsb_generation(host);
1703 1703
1704 /* If we get a reset before we are done waiting, then 1704 /* If we get a reset before we are done waiting, then
1705 * start the the waiting over again */ 1705 * start the waiting over again */
1706 if (generation != g) 1706 if (generation != g)
1707 g = generation, i = 0; 1707 g = generation, i = 0;
1708 } 1708 }
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 66b36de9fa6f..994decc7bcf2 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,4 +1,5 @@
1menu "InfiniBand support" 1menu "InfiniBand support"
2 depends on HAS_IOMEM
2 3
3config INFINIBAND 4config INFINIBAND
4 depends on PCI || BROKEN 5 depends on PCI || BROKEN
@@ -29,6 +30,11 @@ config INFINIBAND_USER_ACCESS
29 libibverbs, libibcm and a hardware driver library from 30 libibverbs, libibcm and a hardware driver library from
30 <http://www.openib.org>. 31 <http://www.openib.org>.
31 32
33config INFINIBAND_USER_MEM
34 bool
35 depends on INFINIBAND_USER_ACCESS != n
36 default y
37
32config INFINIBAND_ADDR_TRANS 38config INFINIBAND_ADDR_TRANS
33 bool 39 bool
34 depends on INFINIBAND && INET 40 depends on INFINIBAND && INET
@@ -40,6 +46,8 @@ source "drivers/infiniband/hw/ehca/Kconfig"
40source "drivers/infiniband/hw/amso1100/Kconfig" 46source "drivers/infiniband/hw/amso1100/Kconfig"
41source "drivers/infiniband/hw/cxgb3/Kconfig" 47source "drivers/infiniband/hw/cxgb3/Kconfig"
42 48
49source "drivers/infiniband/hw/mlx4/Kconfig"
50
43source "drivers/infiniband/ulp/ipoib/Kconfig" 51source "drivers/infiniband/ulp/ipoib/Kconfig"
44 52
45source "drivers/infiniband/ulp/srp/Kconfig" 53source "drivers/infiniband/ulp/srp/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index da2066c4f22c..75f325e40b54 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ 4obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ 5obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ 6obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
7obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
7obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ 8obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
8obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ 9obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
9obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ 10obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 189e5d4b9b17..cb1ab3ea4998 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
9 9
10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 10ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
11 device.o fmr_pool.o cache.o 11 device.o fmr_pool.o cache.o
12ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
12 13
13ib_mad-y := mad.o smi.o agent.o mad_rmpp.o 14ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
14 15
@@ -28,5 +29,4 @@ ib_umad-y := user_mad.o
28 29
29ib_ucm-y := ucm.o 30ib_ucm-y := ucm.o
30 31
31ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o \ 32ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o
32 uverbs_marshall.o
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 7fabb425b033..592c90aa3183 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -613,6 +613,8 @@ static void __exit ib_core_cleanup(void)
613{ 613{
614 ib_cache_cleanup(); 614 ib_cache_cleanup();
615 ib_sysfs_cleanup(); 615 ib_sysfs_cleanup();
616 /* Make sure that any pending umem accounting work is done. */
617 flush_scheduled_work();
616} 618}
617 619
618module_init(ib_core_init); 620module_init(ib_core_init);
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/umem.c
index c95fe952abd5..f32ca5fbb26b 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/umem.c
@@ -39,13 +39,6 @@
39 39
40#include "uverbs.h" 40#include "uverbs.h"
41 41
42struct ib_umem_account_work {
43 struct work_struct work;
44 struct mm_struct *mm;
45 unsigned long diff;
46};
47
48
49static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) 42static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
50{ 43{
51 struct ib_umem_chunk *chunk, *tmp; 44 struct ib_umem_chunk *chunk, *tmp;
@@ -64,35 +57,56 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
64 } 57 }
65} 58}
66 59
67int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, 60/**
68 void *addr, size_t size, int write) 61 * ib_umem_get - Pin and DMA map userspace memory.
62 * @context: userspace context to pin memory for
63 * @addr: userspace virtual address to start at
64 * @size: length of region to pin
65 * @access: IB_ACCESS_xxx flags for memory being pinned
66 */
67struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
68 size_t size, int access)
69{ 69{
70 struct ib_umem *umem;
70 struct page **page_list; 71 struct page **page_list;
71 struct ib_umem_chunk *chunk; 72 struct ib_umem_chunk *chunk;
72 unsigned long locked; 73 unsigned long locked;
73 unsigned long lock_limit; 74 unsigned long lock_limit;
74 unsigned long cur_base; 75 unsigned long cur_base;
75 unsigned long npages; 76 unsigned long npages;
76 int ret = 0; 77 int ret;
77 int off; 78 int off;
78 int i; 79 int i;
79 80
80 if (!can_do_mlock()) 81 if (!can_do_mlock())
81 return -EPERM; 82 return ERR_PTR(-EPERM);
82 83
83 page_list = (struct page **) __get_free_page(GFP_KERNEL); 84 umem = kmalloc(sizeof *umem, GFP_KERNEL);
84 if (!page_list) 85 if (!umem)
85 return -ENOMEM; 86 return ERR_PTR(-ENOMEM);
87
88 umem->context = context;
89 umem->length = size;
90 umem->offset = addr & ~PAGE_MASK;
91 umem->page_size = PAGE_SIZE;
92 /*
93 * We ask for writable memory if any access flags other than
94 * "remote read" are set. "Local write" and "remote write"
95 * obviously require write access. "Remote atomic" can do
96 * things like fetch and add, which will modify memory, and
97 * "MW bind" can change permissions by binding a window.
98 */
99 umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
86 100
87 mem->user_base = (unsigned long) addr; 101 INIT_LIST_HEAD(&umem->chunk_list);
88 mem->length = size;
89 mem->offset = (unsigned long) addr & ~PAGE_MASK;
90 mem->page_size = PAGE_SIZE;
91 mem->writable = write;
92 102
93 INIT_LIST_HEAD(&mem->chunk_list); 103 page_list = (struct page **) __get_free_page(GFP_KERNEL);
104 if (!page_list) {
105 kfree(umem);
106 return ERR_PTR(-ENOMEM);
107 }
94 108
95 npages = PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT; 109 npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
96 110
97 down_write(&current->mm->mmap_sem); 111 down_write(&current->mm->mmap_sem);
98 112
@@ -104,13 +118,13 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
104 goto out; 118 goto out;
105 } 119 }
106 120
107 cur_base = (unsigned long) addr & PAGE_MASK; 121 cur_base = addr & PAGE_MASK;
108 122
109 while (npages) { 123 while (npages) {
110 ret = get_user_pages(current, current->mm, cur_base, 124 ret = get_user_pages(current, current->mm, cur_base,
111 min_t(int, npages, 125 min_t(int, npages,
112 PAGE_SIZE / sizeof (struct page *)), 126 PAGE_SIZE / sizeof (struct page *)),
113 1, !write, page_list, NULL); 127 1, !umem->writable, page_list, NULL);
114 128
115 if (ret < 0) 129 if (ret < 0)
116 goto out; 130 goto out;
@@ -136,7 +150,7 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 150 chunk->page_list[i].length = PAGE_SIZE;
137 } 151 }
138 152
139 chunk->nmap = ib_dma_map_sg(dev, 153 chunk->nmap = ib_dma_map_sg(context->device,
140 &chunk->page_list[0], 154 &chunk->page_list[0],
141 chunk->nents, 155 chunk->nents,
142 DMA_BIDIRECTIONAL); 156 DMA_BIDIRECTIONAL);
@@ -151,75 +165,94 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
151 165
152 ret -= chunk->nents; 166 ret -= chunk->nents;
153 off += chunk->nents; 167 off += chunk->nents;
154 list_add_tail(&chunk->list, &mem->chunk_list); 168 list_add_tail(&chunk->list, &umem->chunk_list);
155 } 169 }
156 170
157 ret = 0; 171 ret = 0;
158 } 172 }
159 173
160out: 174out:
161 if (ret < 0) 175 if (ret < 0) {
162 __ib_umem_release(dev, mem, 0); 176 __ib_umem_release(context->device, umem, 0);
163 else 177 kfree(umem);
178 } else
164 current->mm->locked_vm = locked; 179 current->mm->locked_vm = locked;
165 180
166 up_write(&current->mm->mmap_sem); 181 up_write(&current->mm->mmap_sem);
167 free_page((unsigned long) page_list); 182 free_page((unsigned long) page_list);
168 183
169 return ret; 184 return ret < 0 ? ERR_PTR(ret) : umem;
170} 185}
186EXPORT_SYMBOL(ib_umem_get);
171 187
172void ib_umem_release(struct ib_device *dev, struct ib_umem *umem) 188static void ib_umem_account(struct work_struct *work)
173{ 189{
174 __ib_umem_release(dev, umem, 1); 190 struct ib_umem *umem = container_of(work, struct ib_umem, work);
175
176 down_write(&current->mm->mmap_sem);
177 current->mm->locked_vm -=
178 PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
179 up_write(&current->mm->mmap_sem);
180}
181 191
182static void ib_umem_account(struct work_struct *_work) 192 down_write(&umem->mm->mmap_sem);
183{ 193 umem->mm->locked_vm -= umem->diff;
184 struct ib_umem_account_work *work = 194 up_write(&umem->mm->mmap_sem);
185 container_of(_work, struct ib_umem_account_work, work); 195 mmput(umem->mm);
186 196 kfree(umem);
187 down_write(&work->mm->mmap_sem);
188 work->mm->locked_vm -= work->diff;
189 up_write(&work->mm->mmap_sem);
190 mmput(work->mm);
191 kfree(work);
192} 197}
193 198
194void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) 199/**
200 * ib_umem_release - release memory pinned with ib_umem_get
201 * @umem: umem struct to release
202 */
203void ib_umem_release(struct ib_umem *umem)
195{ 204{
196 struct ib_umem_account_work *work; 205 struct ib_ucontext *context = umem->context;
197 struct mm_struct *mm; 206 struct mm_struct *mm;
207 unsigned long diff;
198 208
199 __ib_umem_release(dev, umem, 1); 209 __ib_umem_release(umem->context->device, umem, 1);
200 210
201 mm = get_task_mm(current); 211 mm = get_task_mm(current);
202 if (!mm) 212 if (!mm)
203 return; 213 return;
204 214
215 diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
216
205 /* 217 /*
206 * We may be called with the mm's mmap_sem already held. This 218 * We may be called with the mm's mmap_sem already held. This
207 * can happen when a userspace munmap() is the call that drops 219 * can happen when a userspace munmap() is the call that drops
208 * the last reference to our file and calls our release 220 * the last reference to our file and calls our release
209 * method. If there are memory regions to destroy, we'll end 221 * method. If there are memory regions to destroy, we'll end
210 * up here and not be able to take the mmap_sem. Therefore we 222 * up here and not be able to take the mmap_sem. In that case
211 * defer the vm_locked accounting to the system workqueue. 223 * we defer the vm_locked accounting to the system workqueue.
212 */ 224 */
225 if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
226 INIT_WORK(&umem->work, ib_umem_account);
227 umem->mm = mm;
228 umem->diff = diff;
213 229
214 work = kmalloc(sizeof *work, GFP_KERNEL); 230 schedule_work(&umem->work);
215 if (!work) {
216 mmput(mm);
217 return; 231 return;
218 } 232 } else
233 down_write(&mm->mmap_sem);
234
235 current->mm->locked_vm -= diff;
236 up_write(&mm->mmap_sem);
237 mmput(mm);
238 kfree(umem);
239}
240EXPORT_SYMBOL(ib_umem_release);
241
242int ib_umem_page_count(struct ib_umem *umem)
243{
244 struct ib_umem_chunk *chunk;
245 int shift;
246 int i;
247 int n;
248
249 shift = ilog2(umem->page_size);
219 250
220 INIT_WORK(&work->work, ib_umem_account); 251 n = 0;
221 work->mm = mm; 252 list_for_each_entry(chunk, &umem->chunk_list, list)
222 work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; 253 for (i = 0; i < chunk->nmap; ++i)
254 n += sg_dma_len(&chunk->page_list[i]) >> shift;
223 255
224 schedule_work(&work->work); 256 return n;
225} 257}
258EXPORT_SYMBOL(ib_umem_page_count);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 102a59c033ff..c33546f9e961 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -45,6 +45,7 @@
45#include <linux/completion.h> 45#include <linux/completion.h>
46 46
47#include <rdma/ib_verbs.h> 47#include <rdma/ib_verbs.h>
48#include <rdma/ib_umem.h>
48#include <rdma/ib_user_verbs.h> 49#include <rdma/ib_user_verbs.h>
49 50
50/* 51/*
@@ -163,11 +164,6 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
163void ib_uverbs_event_handler(struct ib_event_handler *handler, 164void ib_uverbs_event_handler(struct ib_event_handler *handler,
164 struct ib_event *event); 165 struct ib_event *event);
165 166
166int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
167 void *addr, size_t size, int write);
168void ib_umem_release(struct ib_device *dev, struct ib_umem *umem);
169void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
170
171#define IB_UVERBS_DECLARE_CMD(name) \ 167#define IB_UVERBS_DECLARE_CMD(name) \
172 ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ 168 ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
173 const char __user *buf, int in_len, \ 169 const char __user *buf, int in_len, \
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bab66769be14..01d70084aebe 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
6 * 6 *
@@ -295,6 +295,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
295 INIT_LIST_HEAD(&ucontext->qp_list); 295 INIT_LIST_HEAD(&ucontext->qp_list);
296 INIT_LIST_HEAD(&ucontext->srq_list); 296 INIT_LIST_HEAD(&ucontext->srq_list);
297 INIT_LIST_HEAD(&ucontext->ah_list); 297 INIT_LIST_HEAD(&ucontext->ah_list);
298 ucontext->closing = 0;
298 299
299 resp.num_comp_vectors = file->device->num_comp_vectors; 300 resp.num_comp_vectors = file->device->num_comp_vectors;
300 301
@@ -573,7 +574,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
573 struct ib_uverbs_reg_mr cmd; 574 struct ib_uverbs_reg_mr cmd;
574 struct ib_uverbs_reg_mr_resp resp; 575 struct ib_uverbs_reg_mr_resp resp;
575 struct ib_udata udata; 576 struct ib_udata udata;
576 struct ib_umem_object *obj; 577 struct ib_uobject *uobj;
577 struct ib_pd *pd; 578 struct ib_pd *pd;
578 struct ib_mr *mr; 579 struct ib_mr *mr;
579 int ret; 580 int ret;
@@ -599,35 +600,21 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
599 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE)) 600 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
600 return -EINVAL; 601 return -EINVAL;
601 602
602 obj = kmalloc(sizeof *obj, GFP_KERNEL); 603 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
603 if (!obj) 604 if (!uobj)
604 return -ENOMEM; 605 return -ENOMEM;
605 606
606 init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key); 607 init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
607 down_write(&obj->uobject.mutex); 608 down_write(&uobj->mutex);
608
609 /*
610 * We ask for writable memory if any access flags other than
611 * "remote read" are set. "Local write" and "remote write"
612 * obviously require write access. "Remote atomic" can do
613 * things like fetch and add, which will modify memory, and
614 * "MW bind" can change permissions by binding a window.
615 */
616 ret = ib_umem_get(file->device->ib_dev, &obj->umem,
617 (void *) (unsigned long) cmd.start, cmd.length,
618 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
619 if (ret)
620 goto err_free;
621
622 obj->umem.virt_base = cmd.hca_va;
623 609
624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 610 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
625 if (!pd) { 611 if (!pd) {
626 ret = -EINVAL; 612 ret = -EINVAL;
627 goto err_release; 613 goto err_free;
628 } 614 }
629 615
630 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 616 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
617 cmd.access_flags, &udata);
631 if (IS_ERR(mr)) { 618 if (IS_ERR(mr)) {
632 ret = PTR_ERR(mr); 619 ret = PTR_ERR(mr);
633 goto err_put; 620 goto err_put;
@@ -635,19 +622,19 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
635 622
636 mr->device = pd->device; 623 mr->device = pd->device;
637 mr->pd = pd; 624 mr->pd = pd;
638 mr->uobject = &obj->uobject; 625 mr->uobject = uobj;
639 atomic_inc(&pd->usecnt); 626 atomic_inc(&pd->usecnt);
640 atomic_set(&mr->usecnt, 0); 627 atomic_set(&mr->usecnt, 0);
641 628
642 obj->uobject.object = mr; 629 uobj->object = mr;
643 ret = idr_add_uobj(&ib_uverbs_mr_idr, &obj->uobject); 630 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
644 if (ret) 631 if (ret)
645 goto err_unreg; 632 goto err_unreg;
646 633
647 memset(&resp, 0, sizeof resp); 634 memset(&resp, 0, sizeof resp);
648 resp.lkey = mr->lkey; 635 resp.lkey = mr->lkey;
649 resp.rkey = mr->rkey; 636 resp.rkey = mr->rkey;
650 resp.mr_handle = obj->uobject.id; 637 resp.mr_handle = uobj->id;
651 638
652 if (copy_to_user((void __user *) (unsigned long) cmd.response, 639 if (copy_to_user((void __user *) (unsigned long) cmd.response,
653 &resp, sizeof resp)) { 640 &resp, sizeof resp)) {
@@ -658,17 +645,17 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
658 put_pd_read(pd); 645 put_pd_read(pd);
659 646
660 mutex_lock(&file->mutex); 647 mutex_lock(&file->mutex);
661 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 648 list_add_tail(&uobj->list, &file->ucontext->mr_list);
662 mutex_unlock(&file->mutex); 649 mutex_unlock(&file->mutex);
663 650
664 obj->uobject.live = 1; 651 uobj->live = 1;
665 652
666 up_write(&obj->uobject.mutex); 653 up_write(&uobj->mutex);
667 654
668 return in_len; 655 return in_len;
669 656
670err_copy: 657err_copy:
671 idr_remove_uobj(&ib_uverbs_mr_idr, &obj->uobject); 658 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
672 659
673err_unreg: 660err_unreg:
674 ib_dereg_mr(mr); 661 ib_dereg_mr(mr);
@@ -676,11 +663,8 @@ err_unreg:
676err_put: 663err_put:
677 put_pd_read(pd); 664 put_pd_read(pd);
678 665
679err_release:
680 ib_umem_release(file->device->ib_dev, &obj->umem);
681
682err_free: 666err_free:
683 put_uobj_write(&obj->uobject); 667 put_uobj_write(uobj);
684 return ret; 668 return ret;
685} 669}
686 670
@@ -691,7 +675,6 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
691 struct ib_uverbs_dereg_mr cmd; 675 struct ib_uverbs_dereg_mr cmd;
692 struct ib_mr *mr; 676 struct ib_mr *mr;
693 struct ib_uobject *uobj; 677 struct ib_uobject *uobj;
694 struct ib_umem_object *memobj;
695 int ret = -EINVAL; 678 int ret = -EINVAL;
696 679
697 if (copy_from_user(&cmd, buf, sizeof cmd)) 680 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -701,8 +684,7 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
701 if (!uobj) 684 if (!uobj)
702 return -EINVAL; 685 return -EINVAL;
703 686
704 memobj = container_of(uobj, struct ib_umem_object, uobject); 687 mr = uobj->object;
705 mr = uobj->object;
706 688
707 ret = ib_dereg_mr(mr); 689 ret = ib_dereg_mr(mr);
708 if (!ret) 690 if (!ret)
@@ -719,8 +701,6 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
719 list_del(&uobj->list); 701 list_del(&uobj->list);
720 mutex_unlock(&file->mutex); 702 mutex_unlock(&file->mutex);
721 703
722 ib_umem_release(file->device->ib_dev, &memobj->umem);
723
724 put_uobj(uobj); 704 put_uobj(uobj);
725 705
726 return in_len; 706 return in_len;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d44e54799651..14d7ccd89195 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -183,6 +183,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
183 if (!context) 183 if (!context)
184 return 0; 184 return 0;
185 185
186 context->closing = 1;
187
186 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) { 188 list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
187 struct ib_ah *ah = uobj->object; 189 struct ib_ah *ah = uobj->object;
188 190
@@ -230,16 +232,10 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
230 232
231 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) { 233 list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
232 struct ib_mr *mr = uobj->object; 234 struct ib_mr *mr = uobj->object;
233 struct ib_device *mrdev = mr->device;
234 struct ib_umem_object *memobj;
235 235
236 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 236 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
237 ib_dereg_mr(mr); 237 ib_dereg_mr(mr);
238 238 kfree(uobj);
239 memobj = container_of(uobj, struct ib_umem_object, uobject);
240 ib_umem_release_on_close(mrdev, &memobj->umem);
241
242 kfree(memobj);
243 } 239 }
244 240
245 list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { 241 list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
@@ -906,7 +902,6 @@ static void __exit ib_uverbs_cleanup(void)
906 unregister_filesystem(&uverbs_event_fs); 902 unregister_filesystem(&uverbs_event_fs);
907 class_destroy(uverbs_class); 903 class_destroy(uverbs_class);
908 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); 904 unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
909 flush_scheduled_work();
910 idr_destroy(&ib_uverbs_pd_idr); 905 idr_destroy(&ib_uverbs_pd_idr);
911 idr_destroy(&ib_uverbs_mr_idr); 906 idr_destroy(&ib_uverbs_mr_idr);
912 idr_destroy(&ib_uverbs_mw_idr); 907 idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 109166223c09..997cf1530762 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -56,6 +56,7 @@
56#include <asm/byteorder.h> 56#include <asm/byteorder.h>
57 57
58#include <rdma/ib_smi.h> 58#include <rdma/ib_smi.h>
59#include <rdma/ib_umem.h>
59#include <rdma/ib_user_verbs.h> 60#include <rdma/ib_user_verbs.h>
60#include "c2.h" 61#include "c2.h"
61#include "c2_provider.h" 62#include "c2_provider.h"
@@ -396,6 +397,7 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
396 } 397 }
397 398
398 mr->pd = to_c2pd(ib_pd); 399 mr->pd = to_c2pd(ib_pd);
400 mr->umem = NULL;
399 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " 401 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
400 "*iova_start %llx, first pa %llx, last pa %llx\n", 402 "*iova_start %llx, first pa %llx, last pa %llx\n",
401 __FUNCTION__, page_shift, pbl_depth, total_len, 403 __FUNCTION__, page_shift, pbl_depth, total_len,
@@ -428,8 +430,8 @@ static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
428 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva); 430 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
429} 431}
430 432
431static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, 433static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
432 int acc, struct ib_udata *udata) 434 u64 virt, int acc, struct ib_udata *udata)
433{ 435{
434 u64 *pages; 436 u64 *pages;
435 u64 kva = 0; 437 u64 kva = 0;
@@ -441,15 +443,23 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
441 struct c2_mr *c2mr; 443 struct c2_mr *c2mr;
442 444
443 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 445 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
444 shift = ffs(region->page_size) - 1;
445 446
446 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); 447 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
447 if (!c2mr) 448 if (!c2mr)
448 return ERR_PTR(-ENOMEM); 449 return ERR_PTR(-ENOMEM);
449 c2mr->pd = c2pd; 450 c2mr->pd = c2pd;
450 451
452 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
453 if (IS_ERR(c2mr->umem)) {
454 err = PTR_ERR(c2mr->umem);
455 kfree(c2mr);
456 return ERR_PTR(err);
457 }
458
459 shift = ffs(c2mr->umem->page_size) - 1;
460
451 n = 0; 461 n = 0;
452 list_for_each_entry(chunk, &region->chunk_list, list) 462 list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
453 n += chunk->nents; 463 n += chunk->nents;
454 464
455 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 465 pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
@@ -459,35 +469,34 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
459 } 469 }
460 470
461 i = 0; 471 i = 0;
462 list_for_each_entry(chunk, &region->chunk_list, list) { 472 list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) {
463 for (j = 0; j < chunk->nmap; ++j) { 473 for (j = 0; j < chunk->nmap; ++j) {
464 len = sg_dma_len(&chunk->page_list[j]) >> shift; 474 len = sg_dma_len(&chunk->page_list[j]) >> shift;
465 for (k = 0; k < len; ++k) { 475 for (k = 0; k < len; ++k) {
466 pages[i++] = 476 pages[i++] =
467 sg_dma_address(&chunk->page_list[j]) + 477 sg_dma_address(&chunk->page_list[j]) +
468 (region->page_size * k); 478 (c2mr->umem->page_size * k);
469 } 479 }
470 } 480 }
471 } 481 }
472 482
473 kva = (u64)region->virt_base; 483 kva = virt;
474 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), 484 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
475 pages, 485 pages,
476 region->page_size, 486 c2mr->umem->page_size,
477 i, 487 i,
478 region->length, 488 length,
479 region->offset, 489 c2mr->umem->offset,
480 &kva, 490 &kva,
481 c2_convert_access(acc), 491 c2_convert_access(acc),
482 c2mr); 492 c2mr);
483 kfree(pages); 493 kfree(pages);
484 if (err) { 494 if (err)
485 kfree(c2mr); 495 goto err;
486 return ERR_PTR(err);
487 }
488 return &c2mr->ibmr; 496 return &c2mr->ibmr;
489 497
490err: 498err:
499 ib_umem_release(c2mr->umem);
491 kfree(c2mr); 500 kfree(c2mr);
492 return ERR_PTR(err); 501 return ERR_PTR(err);
493} 502}
@@ -502,8 +511,11 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
502 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); 511 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
503 if (err) 512 if (err)
504 pr_debug("c2_stag_dealloc failed: %d\n", err); 513 pr_debug("c2_stag_dealloc failed: %d\n", err);
505 else 514 else {
515 if (mr->umem)
516 ib_umem_release(mr->umem);
506 kfree(mr); 517 kfree(mr);
518 }
507 519
508 return err; 520 return err;
509} 521}
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h
index fc906223220f..1076df2ee96a 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.h
+++ b/drivers/infiniband/hw/amso1100/c2_provider.h
@@ -73,6 +73,7 @@ struct c2_pd {
73struct c2_mr { 73struct c2_mr {
74 struct ib_mr ibmr; 74 struct ib_mr ibmr;
75 struct c2_pd *pd; 75 struct c2_pd *pd;
76 struct ib_umem *umem;
76}; 77};
77 78
78struct c2_av; 79struct c2_av;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index a891493fd340..e7c2c3948037 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -47,6 +47,7 @@
47#include <rdma/iw_cm.h> 47#include <rdma/iw_cm.h>
48#include <rdma/ib_verbs.h> 48#include <rdma/ib_verbs.h>
49#include <rdma/ib_smi.h> 49#include <rdma/ib_smi.h>
50#include <rdma/ib_umem.h>
50#include <rdma/ib_user_verbs.h> 51#include <rdma/ib_user_verbs.h>
51 52
52#include "cxio_hal.h" 53#include "cxio_hal.h"
@@ -443,6 +444,8 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
443 remove_handle(rhp, &rhp->mmidr, mmid); 444 remove_handle(rhp, &rhp->mmidr, mmid);
444 if (mhp->kva) 445 if (mhp->kva)
445 kfree((void *) (unsigned long) mhp->kva); 446 kfree((void *) (unsigned long) mhp->kva);
447 if (mhp->umem)
448 ib_umem_release(mhp->umem);
446 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); 449 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp);
447 kfree(mhp); 450 kfree(mhp);
448 return 0; 451 return 0;
@@ -577,8 +580,8 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
577} 580}
578 581
579 582
580static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, 583static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
581 int acc, struct ib_udata *udata) 584 u64 virt, int acc, struct ib_udata *udata)
582{ 585{
583 __be64 *pages; 586 __be64 *pages;
584 int shift, n, len; 587 int shift, n, len;
@@ -591,7 +594,6 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
591 struct iwch_reg_user_mr_resp uresp; 594 struct iwch_reg_user_mr_resp uresp;
592 595
593 PDBG("%s ib_pd %p\n", __FUNCTION__, pd); 596 PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
594 shift = ffs(region->page_size) - 1;
595 597
596 php = to_iwch_pd(pd); 598 php = to_iwch_pd(pd);
597 rhp = php->rhp; 599 rhp = php->rhp;
@@ -599,8 +601,17 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
599 if (!mhp) 601 if (!mhp)
600 return ERR_PTR(-ENOMEM); 602 return ERR_PTR(-ENOMEM);
601 603
604 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
605 if (IS_ERR(mhp->umem)) {
606 err = PTR_ERR(mhp->umem);
607 kfree(mhp);
608 return ERR_PTR(err);
609 }
610
611 shift = ffs(mhp->umem->page_size) - 1;
612
602 n = 0; 613 n = 0;
603 list_for_each_entry(chunk, &region->chunk_list, list) 614 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
604 n += chunk->nents; 615 n += chunk->nents;
605 616
606 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 617 pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
@@ -611,13 +622,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
611 622
612 i = n = 0; 623 i = n = 0;
613 624
614 list_for_each_entry(chunk, &region->chunk_list, list) 625 list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
615 for (j = 0; j < chunk->nmap; ++j) { 626 for (j = 0; j < chunk->nmap; ++j) {
616 len = sg_dma_len(&chunk->page_list[j]) >> shift; 627 len = sg_dma_len(&chunk->page_list[j]) >> shift;
617 for (k = 0; k < len; ++k) { 628 for (k = 0; k < len; ++k) {
618 pages[i++] = cpu_to_be64(sg_dma_address( 629 pages[i++] = cpu_to_be64(sg_dma_address(
619 &chunk->page_list[j]) + 630 &chunk->page_list[j]) +
620 region->page_size * k); 631 mhp->umem->page_size * k);
621 } 632 }
622 } 633 }
623 634
@@ -625,9 +636,9 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
625 mhp->attr.pdid = php->pdid; 636 mhp->attr.pdid = php->pdid;
626 mhp->attr.zbva = 0; 637 mhp->attr.zbva = 0;
627 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 638 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
628 mhp->attr.va_fbo = region->virt_base; 639 mhp->attr.va_fbo = virt;
629 mhp->attr.page_size = shift - 12; 640 mhp->attr.page_size = shift - 12;
630 mhp->attr.len = (u32) region->length; 641 mhp->attr.len = (u32) length;
631 mhp->attr.pbl_size = i; 642 mhp->attr.pbl_size = i;
632 err = iwch_register_mem(rhp, php, mhp, shift, pages); 643 err = iwch_register_mem(rhp, php, mhp, shift, pages);
633 kfree(pages); 644 kfree(pages);
@@ -650,6 +661,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
650 return &mhp->ibmr; 661 return &mhp->ibmr;
651 662
652err: 663err:
664 ib_umem_release(mhp->umem);
653 kfree(mhp); 665 kfree(mhp);
654 return ERR_PTR(err); 666 return ERR_PTR(err);
655} 667}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 93bcc56756bd..48833f3f3bd0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -73,6 +73,7 @@ struct tpt_attributes {
73 73
74struct iwch_mr { 74struct iwch_mr {
75 struct ib_mr ibmr; 75 struct ib_mr ibmr;
76 struct ib_umem *umem;
76 struct iwch_dev *rhp; 77 struct iwch_dev *rhp;
77 u64 kva; 78 u64 kva;
78 struct tpt_attributes attr; 79 struct tpt_attributes attr;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 10fb8fbafa0c..f64d42b08674 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -176,6 +176,7 @@ struct ehca_mr {
176 struct ib_mr ib_mr; /* must always be first in ehca_mr */ 176 struct ib_mr ib_mr; /* must always be first in ehca_mr */
177 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */ 177 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
178 } ib; 178 } ib;
179 struct ib_umem *umem;
179 spinlock_t mrlock; 180 spinlock_t mrlock;
180 181
181 enum ehca_mr_flag flags; 182 enum ehca_mr_flag flags;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index f284be1c9166..82dda2faf4d0 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -745,6 +745,7 @@ static int comp_pool_callback(struct notifier_block *nfb,
745 745
746 switch (action) { 746 switch (action) {
747 case CPU_UP_PREPARE: 747 case CPU_UP_PREPARE:
748 case CPU_UP_PREPARE_FROZEN:
748 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 749 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
749 if(!create_comp_task(pool, cpu)) { 750 if(!create_comp_task(pool, cpu)) {
750 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 751 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
@@ -752,24 +753,29 @@ static int comp_pool_callback(struct notifier_block *nfb,
752 } 753 }
753 break; 754 break;
754 case CPU_UP_CANCELED: 755 case CPU_UP_CANCELED:
756 case CPU_UP_CANCELED_FROZEN:
755 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); 757 ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu);
756 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 758 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
757 kthread_bind(cct->task, any_online_cpu(cpu_online_map)); 759 kthread_bind(cct->task, any_online_cpu(cpu_online_map));
758 destroy_comp_task(pool, cpu); 760 destroy_comp_task(pool, cpu);
759 break; 761 break;
760 case CPU_ONLINE: 762 case CPU_ONLINE:
763 case CPU_ONLINE_FROZEN:
761 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); 764 ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu);
762 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); 765 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
763 kthread_bind(cct->task, cpu); 766 kthread_bind(cct->task, cpu);
764 wake_up_process(cct->task); 767 wake_up_process(cct->task);
765 break; 768 break;
766 case CPU_DOWN_PREPARE: 769 case CPU_DOWN_PREPARE:
770 case CPU_DOWN_PREPARE_FROZEN:
767 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); 771 ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu);
768 break; 772 break;
769 case CPU_DOWN_FAILED: 773 case CPU_DOWN_FAILED:
774 case CPU_DOWN_FAILED_FROZEN:
770 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); 775 ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu);
771 break; 776 break;
772 case CPU_DEAD: 777 case CPU_DEAD:
778 case CPU_DEAD_FROZEN:
773 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); 779 ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu);
774 destroy_comp_task(pool, cpu); 780 destroy_comp_task(pool, cpu);
775 take_over_work(pool, cpu); 781 take_over_work(pool, cpu);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index e14b029332c8..37e7fe0908cf 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -78,8 +78,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
78 int num_phys_buf, 78 int num_phys_buf,
79 int mr_access_flags, u64 *iova_start); 79 int mr_access_flags, u64 *iova_start);
80 80
81struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, 81struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
82 struct ib_umem *region,
83 int mr_access_flags, struct ib_udata *udata); 82 int mr_access_flags, struct ib_udata *udata);
84 83
85int ehca_rereg_phys_mr(struct ib_mr *mr, 84int ehca_rereg_phys_mr(struct ib_mr *mr,
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index d22ab563633f..84c5bb498563 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -39,6 +39,8 @@
39 * POSSIBILITY OF SUCH DAMAGE. 39 * POSSIBILITY OF SUCH DAMAGE.
40 */ 40 */
41 41
42#include <rdma/ib_umem.h>
43
42#include <asm/current.h> 44#include <asm/current.h>
43 45
44#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
@@ -238,10 +240,8 @@ reg_phys_mr_exit0:
238 240
239/*----------------------------------------------------------------------*/ 241/*----------------------------------------------------------------------*/
240 242
241struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, 243struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
242 struct ib_umem *region, 244 int mr_access_flags, struct ib_udata *udata)
243 int mr_access_flags,
244 struct ib_udata *udata)
245{ 245{
246 struct ib_mr *ib_mr; 246 struct ib_mr *ib_mr;
247 struct ehca_mr *e_mr; 247 struct ehca_mr *e_mr;
@@ -257,11 +257,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
257 ehca_gen_err("bad pd=%p", pd); 257 ehca_gen_err("bad pd=%p", pd);
258 return ERR_PTR(-EFAULT); 258 return ERR_PTR(-EFAULT);
259 } 259 }
260 if (!region) { 260
261 ehca_err(pd->device, "bad input values: region=%p", region);
262 ib_mr = ERR_PTR(-EINVAL);
263 goto reg_user_mr_exit0;
264 }
265 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && 261 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
266 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || 262 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
267 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && 263 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
@@ -275,17 +271,10 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
275 ib_mr = ERR_PTR(-EINVAL); 271 ib_mr = ERR_PTR(-EINVAL);
276 goto reg_user_mr_exit0; 272 goto reg_user_mr_exit0;
277 } 273 }
278 if (region->page_size != PAGE_SIZE) {
279 ehca_err(pd->device, "page size not supported, "
280 "region->page_size=%x", region->page_size);
281 ib_mr = ERR_PTR(-EINVAL);
282 goto reg_user_mr_exit0;
283 }
284 274
285 if ((region->length == 0) || 275 if (length == 0 || virt + length < virt) {
286 ((region->virt_base + region->length) < region->virt_base)) {
287 ehca_err(pd->device, "bad input values: length=%lx " 276 ehca_err(pd->device, "bad input values: length=%lx "
288 "virt_base=%lx", region->length, region->virt_base); 277 "virt_base=%lx", length, virt);
289 ib_mr = ERR_PTR(-EINVAL); 278 ib_mr = ERR_PTR(-EINVAL);
290 goto reg_user_mr_exit0; 279 goto reg_user_mr_exit0;
291 } 280 }
@@ -297,40 +286,55 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd,
297 goto reg_user_mr_exit0; 286 goto reg_user_mr_exit0;
298 } 287 }
299 288
289 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
290 mr_access_flags);
291 if (IS_ERR(e_mr->umem)) {
292 ib_mr = (void *) e_mr->umem;
293 goto reg_user_mr_exit1;
294 }
295
296 if (e_mr->umem->page_size != PAGE_SIZE) {
297 ehca_err(pd->device, "page size not supported, "
298 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
299 ib_mr = ERR_PTR(-EINVAL);
300 goto reg_user_mr_exit2;
301 }
302
300 /* determine number of MR pages */ 303 /* determine number of MR pages */
301 num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length + 304 num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
302 PAGE_SIZE - 1) / PAGE_SIZE); 305 PAGE_SIZE);
303 num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length + 306 num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
304 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); 307 EHCA_PAGESIZE);
305 308
306 /* register MR on HCA */ 309 /* register MR on HCA */
307 pginfo.type = EHCA_MR_PGI_USER; 310 pginfo.type = EHCA_MR_PGI_USER;
308 pginfo.num_pages = num_pages_mr; 311 pginfo.num_pages = num_pages_mr;
309 pginfo.num_4k = num_pages_4k; 312 pginfo.num_4k = num_pages_4k;
310 pginfo.region = region; 313 pginfo.region = e_mr->umem;
311 pginfo.next_4k = region->offset / EHCA_PAGESIZE; 314 pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE;
312 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, 315 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
313 (&region->chunk_list), 316 (&e_mr->umem->chunk_list),
314 list); 317 list);
315 318
316 ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base, 319 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
317 region->length, mr_access_flags, e_pd, &pginfo, 320 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
318 &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
319 if (ret) { 321 if (ret) {
320 ib_mr = ERR_PTR(ret); 322 ib_mr = ERR_PTR(ret);
321 goto reg_user_mr_exit1; 323 goto reg_user_mr_exit2;
322 } 324 }
323 325
324 /* successful registration of all pages */ 326 /* successful registration of all pages */
325 return &e_mr->ib.ib_mr; 327 return &e_mr->ib.ib_mr;
326 328
329reg_user_mr_exit2:
330 ib_umem_release(e_mr->umem);
327reg_user_mr_exit1: 331reg_user_mr_exit1:
328 ehca_mr_delete(e_mr); 332 ehca_mr_delete(e_mr);
329reg_user_mr_exit0: 333reg_user_mr_exit0:
330 if (IS_ERR(ib_mr)) 334 if (IS_ERR(ib_mr))
331 ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x" 335 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
332 " udata=%p", 336 " udata=%p",
333 PTR_ERR(ib_mr), pd, region, mr_access_flags, udata); 337 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
334 return ib_mr; 338 return ib_mr;
335} /* end ehca_reg_user_mr() */ 339} /* end ehca_reg_user_mr() */
336 340
@@ -596,6 +600,9 @@ int ehca_dereg_mr(struct ib_mr *mr)
596 goto dereg_mr_exit0; 600 goto dereg_mr_exit0;
597 } 601 }
598 602
603 if (e_mr->umem)
604 ib_umem_release(e_mr->umem);
605
599 /* successful deregistration */ 606 /* successful deregistration */
600 ehca_mr_delete(e_mr); 607 ehca_mr_delete(e_mr);
601 608
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index 31e70732e369..bdeef8d4f279 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -31,6 +31,7 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <rdma/ib_umem.h>
34#include <rdma/ib_pack.h> 35#include <rdma/ib_pack.h>
35#include <rdma/ib_smi.h> 36#include <rdma/ib_smi.h>
36 37
@@ -147,6 +148,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
147 mr->mr.offset = 0; 148 mr->mr.offset = 0;
148 mr->mr.access_flags = acc; 149 mr->mr.access_flags = acc;
149 mr->mr.max_segs = num_phys_buf; 150 mr->mr.max_segs = num_phys_buf;
151 mr->umem = NULL;
150 152
151 m = 0; 153 m = 0;
152 n = 0; 154 n = 0;
@@ -170,46 +172,56 @@ bail:
170/** 172/**
171 * ipath_reg_user_mr - register a userspace memory region 173 * ipath_reg_user_mr - register a userspace memory region
172 * @pd: protection domain for this memory region 174 * @pd: protection domain for this memory region
173 * @region: the user memory region 175 * @start: starting userspace address
176 * @length: length of region to register
177 * @virt_addr: virtual address to use (from HCA's point of view)
174 * @mr_access_flags: access flags for this memory region 178 * @mr_access_flags: access flags for this memory region
175 * @udata: unused by the InfiniPath driver 179 * @udata: unused by the InfiniPath driver
176 * 180 *
177 * Returns the memory region on success, otherwise returns an errno. 181 * Returns the memory region on success, otherwise returns an errno.
178 */ 182 */
179struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, 183struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
180 int mr_access_flags, struct ib_udata *udata) 184 u64 virt_addr, int mr_access_flags,
185 struct ib_udata *udata)
181{ 186{
182 struct ipath_mr *mr; 187 struct ipath_mr *mr;
188 struct ib_umem *umem;
183 struct ib_umem_chunk *chunk; 189 struct ib_umem_chunk *chunk;
184 int n, m, i; 190 int n, m, i;
185 struct ib_mr *ret; 191 struct ib_mr *ret;
186 192
187 if (region->length == 0) { 193 if (length == 0) {
188 ret = ERR_PTR(-EINVAL); 194 ret = ERR_PTR(-EINVAL);
189 goto bail; 195 goto bail;
190 } 196 }
191 197
198 umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags);
199 if (IS_ERR(umem))
200 return (void *) umem;
201
192 n = 0; 202 n = 0;
193 list_for_each_entry(chunk, &region->chunk_list, list) 203 list_for_each_entry(chunk, &umem->chunk_list, list)
194 n += chunk->nents; 204 n += chunk->nents;
195 205
196 mr = alloc_mr(n, &to_idev(pd->device)->lk_table); 206 mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
197 if (!mr) { 207 if (!mr) {
198 ret = ERR_PTR(-ENOMEM); 208 ret = ERR_PTR(-ENOMEM);
209 ib_umem_release(umem);
199 goto bail; 210 goto bail;
200 } 211 }
201 212
202 mr->mr.pd = pd; 213 mr->mr.pd = pd;
203 mr->mr.user_base = region->user_base; 214 mr->mr.user_base = start;
204 mr->mr.iova = region->virt_base; 215 mr->mr.iova = virt_addr;
205 mr->mr.length = region->length; 216 mr->mr.length = length;
206 mr->mr.offset = region->offset; 217 mr->mr.offset = umem->offset;
207 mr->mr.access_flags = mr_access_flags; 218 mr->mr.access_flags = mr_access_flags;
208 mr->mr.max_segs = n; 219 mr->mr.max_segs = n;
220 mr->umem = umem;
209 221
210 m = 0; 222 m = 0;
211 n = 0; 223 n = 0;
212 list_for_each_entry(chunk, &region->chunk_list, list) { 224 list_for_each_entry(chunk, &umem->chunk_list, list) {
213 for (i = 0; i < chunk->nents; i++) { 225 for (i = 0; i < chunk->nents; i++) {
214 void *vaddr; 226 void *vaddr;
215 227
@@ -219,7 +231,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
219 goto bail; 231 goto bail;
220 } 232 }
221 mr->mr.map[m]->segs[n].vaddr = vaddr; 233 mr->mr.map[m]->segs[n].vaddr = vaddr;
222 mr->mr.map[m]->segs[n].length = region->page_size; 234 mr->mr.map[m]->segs[n].length = umem->page_size;
223 n++; 235 n++;
224 if (n == IPATH_SEGSZ) { 236 if (n == IPATH_SEGSZ) {
225 m++; 237 m++;
@@ -253,6 +265,10 @@ int ipath_dereg_mr(struct ib_mr *ibmr)
253 i--; 265 i--;
254 kfree(mr->mr.map[i]); 266 kfree(mr->mr.map[i]);
255 } 267 }
268
269 if (mr->umem)
270 ib_umem_release(mr->umem);
271
256 kfree(mr); 272 kfree(mr);
257 return 0; 273 return 0;
258} 274}
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 7064fc222727..088b837ebea8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -251,6 +251,7 @@ struct ipath_sge {
251/* Memory region */ 251/* Memory region */
252struct ipath_mr { 252struct ipath_mr {
253 struct ib_mr ibmr; 253 struct ib_mr ibmr;
254 struct ib_umem *umem;
254 struct ipath_mregion mr; /* must be last */ 255 struct ipath_mregion mr; /* must be last */
255}; 256};
256 257
@@ -751,8 +752,8 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
751 struct ib_phys_buf *buffer_list, 752 struct ib_phys_buf *buffer_list,
752 int num_phys_buf, int acc, u64 *iova_start); 753 int num_phys_buf, int acc, u64 *iova_start);
753 754
754struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, 755struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
755 int mr_access_flags, 756 u64 virt_addr, int mr_access_flags,
756 struct ib_udata *udata); 757 struct ib_udata *udata);
757 758
758int ipath_dereg_mr(struct ib_mr *ibmr); 759int ipath_dereg_mr(struct ib_mr *ibmr);
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
new file mode 100644
index 000000000000..b8912cdb9663
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -0,0 +1,9 @@
1config MLX4_INFINIBAND
2 tristate "Mellanox ConnectX HCA support"
3 depends on INFINIBAND
4 select MLX4_CORE
5 ---help---
6 This driver provides low-level InfiniBand support for
7 Mellanox ConnectX PCI Express host channel adapters (HCAs).
8 This is required to use InfiniBand protocols such as
9 IP-over-IB or SRP with these devices.
diff --git a/drivers/infiniband/hw/mlx4/Makefile b/drivers/infiniband/hw/mlx4/Makefile
new file mode 100644
index 000000000000..70f09c7826da
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_MLX4_INFINIBAND) += mlx4_ib.o
2
3mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
new file mode 100644
index 000000000000..c75ac9463e20
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx4_ib.h"
34
35struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
36{
37 struct mlx4_dev *dev = to_mdev(pd->device)->dev;
38 struct mlx4_ib_ah *ah;
39
40 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
41 if (!ah)
42 return ERR_PTR(-ENOMEM);
43
44 memset(&ah->av, 0, sizeof ah->av);
45
46 ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
47 ah->av.g_slid = ah_attr->src_path_bits;
48 ah->av.dlid = cpu_to_be16(ah_attr->dlid);
49 if (ah_attr->static_rate) {
50 ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
51 while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
52 !(1 << ah->av.stat_rate & dev->caps.stat_rate_support))
53 --ah->av.stat_rate;
54 }
55 ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
56 if (ah_attr->ah_flags & IB_AH_GRH) {
57 ah->av.g_slid |= 0x80;
58 ah->av.gid_index = ah_attr->grh.sgid_index;
59 ah->av.hop_limit = ah_attr->grh.hop_limit;
60 ah->av.sl_tclass_flowlabel |=
61 cpu_to_be32((ah_attr->grh.traffic_class << 20) |
62 ah_attr->grh.flow_label);
63 memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
64 }
65
66 return &ah->ibah;
67}
68
69int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
70{
71 struct mlx4_ib_ah *ah = to_mah(ibah);
72
73 memset(ah_attr, 0, sizeof *ah_attr);
74 ah_attr->dlid = be16_to_cpu(ah->av.dlid);
75 ah_attr->sl = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
76 ah_attr->port_num = be32_to_cpu(ah->av.port_pd) >> 24;
77 if (ah->av.stat_rate)
78 ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET;
79 ah_attr->src_path_bits = ah->av.g_slid & 0x7F;
80
81 if (mlx4_ib_ah_grh_present(ah)) {
82 ah_attr->ah_flags = IB_AH_GRH;
83
84 ah_attr->grh.traffic_class =
85 be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20;
86 ah_attr->grh.flow_label =
87 be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff;
88 ah_attr->grh.hop_limit = ah->av.hop_limit;
89 ah_attr->grh.sgid_index = ah->av.gid_index;
90 memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16);
91 }
92
93 return 0;
94}
95
96int mlx4_ib_destroy_ah(struct ib_ah *ah)
97{
98 kfree(to_mah(ah));
99 return 0;
100}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
new file mode 100644
index 000000000000..b2a290c6703a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -0,0 +1,525 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx4/cq.h>
34#include <linux/mlx4/qp.h>
35
36#include "mlx4_ib.h"
37#include "user.h"
38
39static void mlx4_ib_cq_comp(struct mlx4_cq *cq)
40{
41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
42 ibcq->comp_handler(ibcq, ibcq->cq_context);
43}
44
45static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
46{
47 struct ib_event event;
48 struct ib_cq *ibcq;
49
50 if (type != MLX4_EVENT_TYPE_CQ_ERROR) {
51 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
52 "on CQ %06x\n", type, cq->cqn);
53 return;
54 }
55
56 ibcq = &to_mibcq(cq)->ibcq;
57 if (ibcq->event_handler) {
58 event.device = ibcq->device;
59 event.event = IB_EVENT_CQ_ERR;
60 event.element.cq = ibcq;
61 ibcq->event_handler(&event, ibcq->cq_context);
62 }
63}
64
65static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
66{
67 int offset = n * sizeof (struct mlx4_cqe);
68
69 if (buf->buf.nbufs == 1)
70 return buf->buf.u.direct.buf + offset;
71 else
72 return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf +
73 (offset & (PAGE_SIZE - 1));
74}
75
76static void *get_cqe(struct mlx4_ib_cq *cq, int n)
77{
78 return get_cqe_from_buf(&cq->buf, n);
79}
80
81static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
82{
83 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
84
85 return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
86 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
87}
88
89static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
90{
91 return get_sw_cqe(cq, cq->mcq.cons_index);
92}
93
94struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
95 struct ib_ucontext *context,
96 struct ib_udata *udata)
97{
98 struct mlx4_ib_dev *dev = to_mdev(ibdev);
99 struct mlx4_ib_cq *cq;
100 struct mlx4_uar *uar;
101 int buf_size;
102 int err;
103
104 if (entries < 1 || entries > dev->dev->caps.max_cqes)
105 return ERR_PTR(-EINVAL);
106
107 cq = kmalloc(sizeof *cq, GFP_KERNEL);
108 if (!cq)
109 return ERR_PTR(-ENOMEM);
110
111 entries = roundup_pow_of_two(entries + 1);
112 cq->ibcq.cqe = entries - 1;
113 buf_size = entries * sizeof (struct mlx4_cqe);
114 spin_lock_init(&cq->lock);
115
116 if (context) {
117 struct mlx4_ib_create_cq ucmd;
118
119 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
120 err = -EFAULT;
121 goto err_cq;
122 }
123
124 cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
125 IB_ACCESS_LOCAL_WRITE);
126 if (IS_ERR(cq->umem)) {
127 err = PTR_ERR(cq->umem);
128 goto err_cq;
129 }
130
131 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
132 ilog2(cq->umem->page_size), &cq->buf.mtt);
133 if (err)
134 goto err_buf;
135
136 err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
137 if (err)
138 goto err_mtt;
139
140 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
141 &cq->db);
142 if (err)
143 goto err_mtt;
144
145 uar = &to_mucontext(context)->uar;
146 } else {
147 err = mlx4_ib_db_alloc(dev, &cq->db, 1);
148 if (err)
149 goto err_cq;
150
151 cq->mcq.set_ci_db = cq->db.db;
152 cq->mcq.arm_db = cq->db.db + 1;
153 *cq->mcq.set_ci_db = 0;
154 *cq->mcq.arm_db = 0;
155
156 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
157 err = -ENOMEM;
158 goto err_db;
159 }
160
161 err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
162 &cq->buf.mtt);
163 if (err)
164 goto err_buf;
165
166 err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
167 if (err)
168 goto err_mtt;
169
170 uar = &dev->priv_uar;
171 }
172
173 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
174 cq->db.dma, &cq->mcq);
175 if (err)
176 goto err_dbmap;
177
178 cq->mcq.comp = mlx4_ib_cq_comp;
179 cq->mcq.event = mlx4_ib_cq_event;
180
181 if (context)
182 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
183 err = -EFAULT;
184 goto err_dbmap;
185 }
186
187 return &cq->ibcq;
188
189err_dbmap:
190 if (context)
191 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
192
193err_mtt:
194 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
195
196err_buf:
197 if (context)
198 ib_umem_release(cq->umem);
199 else
200 mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
201 &cq->buf.buf);
202
203err_db:
204 if (!context)
205 mlx4_ib_db_free(dev, &cq->db);
206
207err_cq:
208 kfree(cq);
209
210 return ERR_PTR(err);
211}
212
213int mlx4_ib_destroy_cq(struct ib_cq *cq)
214{
215 struct mlx4_ib_dev *dev = to_mdev(cq->device);
216 struct mlx4_ib_cq *mcq = to_mcq(cq);
217
218 mlx4_cq_free(dev->dev, &mcq->mcq);
219 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt);
220
221 if (cq->uobject) {
222 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
223 ib_umem_release(mcq->umem);
224 } else {
225 mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
226 &mcq->buf.buf);
227 mlx4_ib_db_free(dev, &mcq->db);
228 }
229
230 kfree(mcq);
231
232 return 0;
233}
234
235static void dump_cqe(void *cqe)
236{
237 __be32 *buf = cqe;
238
239 printk(KERN_DEBUG "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
240 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]),
241 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]),
242 be32_to_cpu(buf[6]), be32_to_cpu(buf[7]));
243}
244
245static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
246 struct ib_wc *wc)
247{
248 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
249 printk(KERN_DEBUG "local QP operation err "
250 "(QPN %06x, WQE index %x, vendor syndrome %02x, "
251 "opcode = %02x)\n",
252 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
253 cqe->vendor_err_syndrome,
254 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
255 dump_cqe(cqe);
256 }
257
258 switch (cqe->syndrome) {
259 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
260 wc->status = IB_WC_LOC_LEN_ERR;
261 break;
262 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
263 wc->status = IB_WC_LOC_QP_OP_ERR;
264 break;
265 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
266 wc->status = IB_WC_LOC_PROT_ERR;
267 break;
268 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
269 wc->status = IB_WC_WR_FLUSH_ERR;
270 break;
271 case MLX4_CQE_SYNDROME_MW_BIND_ERR:
272 wc->status = IB_WC_MW_BIND_ERR;
273 break;
274 case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
275 wc->status = IB_WC_BAD_RESP_ERR;
276 break;
277 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
278 wc->status = IB_WC_LOC_ACCESS_ERR;
279 break;
280 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
281 wc->status = IB_WC_REM_INV_REQ_ERR;
282 break;
283 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
284 wc->status = IB_WC_REM_ACCESS_ERR;
285 break;
286 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
287 wc->status = IB_WC_REM_OP_ERR;
288 break;
289 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
290 wc->status = IB_WC_RETRY_EXC_ERR;
291 break;
292 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
293 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
294 break;
295 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
296 wc->status = IB_WC_REM_ABORT_ERR;
297 break;
298 default:
299 wc->status = IB_WC_GENERAL_ERR;
300 break;
301 }
302
303 wc->vendor_err = cqe->vendor_err_syndrome;
304}
305
306static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
307 struct mlx4_ib_qp **cur_qp,
308 struct ib_wc *wc)
309{
310 struct mlx4_cqe *cqe;
311 struct mlx4_qp *mqp;
312 struct mlx4_ib_wq *wq;
313 struct mlx4_ib_srq *srq;
314 int is_send;
315 int is_error;
316 u16 wqe_ctr;
317
318 cqe = next_cqe_sw(cq);
319 if (!cqe)
320 return -EAGAIN;
321
322 ++cq->mcq.cons_index;
323
324 /*
325 * Make sure we read CQ entry contents after we've checked the
326 * ownership bit.
327 */
328 rmb();
329
330 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
331 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
332 MLX4_CQE_OPCODE_ERROR;
333
334 if (!*cur_qp ||
335 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
336 /*
337 * We do not have to take the QP table lock here,
338 * because CQs will be locked while QPs are removed
339 * from the table.
340 */
341 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
342 be32_to_cpu(cqe->my_qpn));
343 if (unlikely(!mqp)) {
344 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
345 cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff);
346 return -EINVAL;
347 }
348
349 *cur_qp = to_mibqp(mqp);
350 }
351
352 wc->qp = &(*cur_qp)->ibqp;
353
354 if (is_send) {
355 wq = &(*cur_qp)->sq;
356 wqe_ctr = be16_to_cpu(cqe->wqe_index);
357 wq->tail += wqe_ctr - (u16) wq->tail;
358 wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)];
359 ++wq->tail;
360 } else if ((*cur_qp)->ibqp.srq) {
361 srq = to_msrq((*cur_qp)->ibqp.srq);
362 wqe_ctr = be16_to_cpu(cqe->wqe_index);
363 wc->wr_id = srq->wrid[wqe_ctr];
364 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
365 } else {
366 wq = &(*cur_qp)->rq;
367 wc->wr_id = wq->wrid[wq->tail & (wq->max - 1)];
368 ++wq->tail;
369 }
370
371 if (unlikely(is_error)) {
372 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
373 return 0;
374 }
375
376 wc->status = IB_WC_SUCCESS;
377
378 if (is_send) {
379 wc->wc_flags = 0;
380 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
381 case MLX4_OPCODE_RDMA_WRITE_IMM:
382 wc->wc_flags |= IB_WC_WITH_IMM;
383 case MLX4_OPCODE_RDMA_WRITE:
384 wc->opcode = IB_WC_RDMA_WRITE;
385 break;
386 case MLX4_OPCODE_SEND_IMM:
387 wc->wc_flags |= IB_WC_WITH_IMM;
388 case MLX4_OPCODE_SEND:
389 wc->opcode = IB_WC_SEND;
390 break;
391 case MLX4_OPCODE_RDMA_READ:
392 wc->opcode = IB_WC_SEND;
393 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
394 break;
395 case MLX4_OPCODE_ATOMIC_CS:
396 wc->opcode = IB_WC_COMP_SWAP;
397 wc->byte_len = 8;
398 break;
399 case MLX4_OPCODE_ATOMIC_FA:
400 wc->opcode = IB_WC_FETCH_ADD;
401 wc->byte_len = 8;
402 break;
403 case MLX4_OPCODE_BIND_MW:
404 wc->opcode = IB_WC_BIND_MW;
405 break;
406 }
407 } else {
408 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
409
410 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
411 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
412 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
413 wc->wc_flags = IB_WC_WITH_IMM;
414 wc->imm_data = cqe->immed_rss_invalid;
415 break;
416 case MLX4_RECV_OPCODE_SEND:
417 wc->opcode = IB_WC_RECV;
418 wc->wc_flags = 0;
419 break;
420 case MLX4_RECV_OPCODE_SEND_IMM:
421 wc->opcode = IB_WC_RECV;
422 wc->wc_flags = IB_WC_WITH_IMM;
423 wc->imm_data = cqe->immed_rss_invalid;
424 break;
425 }
426
427 wc->slid = be16_to_cpu(cqe->rlid);
428 wc->sl = cqe->sl >> 4;
429 wc->src_qp = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff;
430 wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f;
431 wc->wc_flags |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ?
432 IB_WC_GRH : 0;
433 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) >> 16;
434 }
435
436 return 0;
437}
438
439int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
440{
441 struct mlx4_ib_cq *cq = to_mcq(ibcq);
442 struct mlx4_ib_qp *cur_qp = NULL;
443 unsigned long flags;
444 int npolled;
445 int err = 0;
446
447 spin_lock_irqsave(&cq->lock, flags);
448
449 for (npolled = 0; npolled < num_entries; ++npolled) {
450 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
451 if (err)
452 break;
453 }
454
455 if (npolled)
456 mlx4_cq_set_ci(&cq->mcq);
457
458 spin_unlock_irqrestore(&cq->lock, flags);
459
460 if (err == 0 || err == -EAGAIN)
461 return npolled;
462 else
463 return err;
464}
465
466int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
467{
468 mlx4_cq_arm(&to_mcq(ibcq)->mcq,
469 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
470 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT,
471 to_mdev(ibcq->device)->uar_map,
472 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock));
473
474 return 0;
475}
476
477void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
478{
479 u32 prod_index;
480 int nfreed = 0;
481 struct mlx4_cqe *cqe;
482
483 /*
484 * First we need to find the current producer index, so we
485 * know where to start cleaning from. It doesn't matter if HW
486 * adds new entries after this loop -- the QP we're worried
487 * about is already in RESET, so the new entries won't come
488 * from our QP and therefore don't need to be checked.
489 */
490 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
491 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
492 break;
493
494 /*
495 * Now sweep backwards through the CQ, removing CQ entries
496 * that match our QP by copying older entries on top of them.
497 */
498 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
499 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
500 if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) {
501 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
502 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
503 ++nfreed;
504 } else if (nfreed)
505 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
506 cqe, sizeof *cqe);
507 }
508
509 if (nfreed) {
510 cq->mcq.cons_index += nfreed;
511 /*
512 * Make sure update of buffer contents is done before
513 * updating consumer index.
514 */
515 wmb();
516 mlx4_cq_set_ci(&cq->mcq);
517 }
518}
519
520void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
521{
522 spin_lock_irq(&cq->lock);
523 __mlx4_ib_cq_clean(cq, qpn, srq);
524 spin_unlock_irq(&cq->lock);
525}
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
new file mode 100644
index 000000000000..1c36087aef14
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -0,0 +1,216 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/slab.h>
34
35#include "mlx4_ib.h"
36
37struct mlx4_ib_db_pgdir {
38 struct list_head list;
39 DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
40 DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
41 unsigned long *bits[2];
42 __be32 *db_page;
43 dma_addr_t db_dma;
44};
45
46static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
47{
48 struct mlx4_ib_db_pgdir *pgdir;
49
50 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
51 if (!pgdir)
52 return NULL;
53
54 bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
55 pgdir->bits[0] = pgdir->order0;
56 pgdir->bits[1] = pgdir->order1;
57 pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
58 PAGE_SIZE, &pgdir->db_dma,
59 GFP_KERNEL);
60 if (!pgdir->db_page) {
61 kfree(pgdir);
62 return NULL;
63 }
64
65 return pgdir;
66}
67
68static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
69 struct mlx4_ib_db *db, int order)
70{
71 int o;
72 int i;
73
74 for (o = order; o <= 1; ++o) {
75 i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
76 if (i < MLX4_IB_DB_PER_PAGE >> o)
77 goto found;
78 }
79
80 return -ENOMEM;
81
82found:
83 clear_bit(i, pgdir->bits[o]);
84
85 i <<= o;
86
87 if (o > order)
88 set_bit(i ^ 1, pgdir->bits[order]);
89
90 db->u.pgdir = pgdir;
91 db->index = i;
92 db->db = pgdir->db_page + db->index;
93 db->dma = pgdir->db_dma + db->index * 4;
94 db->order = order;
95
96 return 0;
97}
98
99int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
100{
101 struct mlx4_ib_db_pgdir *pgdir;
102 int ret = 0;
103
104 mutex_lock(&dev->pgdir_mutex);
105
106 list_for_each_entry(pgdir, &dev->pgdir_list, list)
107 if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
108 goto out;
109
110 pgdir = mlx4_ib_alloc_db_pgdir(dev);
111 if (!pgdir) {
112 ret = -ENOMEM;
113 goto out;
114 }
115
116 list_add(&pgdir->list, &dev->pgdir_list);
117
118 /* This should never fail -- we just allocated an empty page: */
119 WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
120
121out:
122 mutex_unlock(&dev->pgdir_mutex);
123
124 return ret;
125}
126
127void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
128{
129 int o;
130 int i;
131
132 mutex_lock(&dev->pgdir_mutex);
133
134 o = db->order;
135 i = db->index;
136
137 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
138 clear_bit(i ^ 1, db->u.pgdir->order0);
139 ++o;
140 }
141
142 i >>= o;
143 set_bit(i, db->u.pgdir->bits[o]);
144
145 if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
146 dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
147 db->u.pgdir->db_page, db->u.pgdir->db_dma);
148 list_del(&db->u.pgdir->list);
149 kfree(db->u.pgdir);
150 }
151
152 mutex_unlock(&dev->pgdir_mutex);
153}
154
155struct mlx4_ib_user_db_page {
156 struct list_head list;
157 struct ib_umem *umem;
158 unsigned long user_virt;
159 int refcnt;
160};
161
162int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
163 struct mlx4_ib_db *db)
164{
165 struct mlx4_ib_user_db_page *page;
166 struct ib_umem_chunk *chunk;
167 int err = 0;
168
169 mutex_lock(&context->db_page_mutex);
170
171 list_for_each_entry(page, &context->db_page_list, list)
172 if (page->user_virt == (virt & PAGE_MASK))
173 goto found;
174
175 page = kmalloc(sizeof *page, GFP_KERNEL);
176 if (!page) {
177 err = -ENOMEM;
178 goto out;
179 }
180
181 page->user_virt = (virt & PAGE_MASK);
182 page->refcnt = 0;
183 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
184 PAGE_SIZE, 0);
185 if (IS_ERR(page->umem)) {
186 err = PTR_ERR(page->umem);
187 kfree(page);
188 goto out;
189 }
190
191 list_add(&page->list, &context->db_page_list);
192
193found:
194 chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list);
195 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
196 db->u.user_page = page;
197 ++page->refcnt;
198
199out:
200 mutex_unlock(&context->db_page_mutex);
201
202 return err;
203}
204
205void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db)
206{
207 mutex_lock(&context->db_page_mutex);
208
209 if (!--db->u.user_page->refcnt) {
210 list_del(&db->u.user_page->list);
211 ib_umem_release(db->u.user_page->umem);
212 kfree(db->u.user_page);
213 }
214
215 mutex_unlock(&context->db_page_mutex);
216}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
new file mode 100644
index 000000000000..333091787c5f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -0,0 +1,339 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
35
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4_ib.h"
39
40enum {
41 MLX4_IB_VENDOR_CLASS1 = 0x9,
42 MLX4_IB_VENDOR_CLASS2 = 0xa
43};
44
45int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
46 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
47 void *in_mad, void *response_mad)
48{
49 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
50 void *inbox;
51 int err;
52 u32 in_modifier = port;
53 u8 op_modifier = 0;
54
55 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
56 if (IS_ERR(inmailbox))
57 return PTR_ERR(inmailbox);
58 inbox = inmailbox->buf;
59
60 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
61 if (IS_ERR(outmailbox)) {
62 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
63 return PTR_ERR(outmailbox);
64 }
65
66 memcpy(inbox, in_mad, 256);
67
68 /*
69 * Key check traps can't be generated unless we have in_wc to
70 * tell us where to send the trap.
71 */
72 if (ignore_mkey || !in_wc)
73 op_modifier |= 0x1;
74 if (ignore_bkey || !in_wc)
75 op_modifier |= 0x2;
76
77 if (in_wc) {
78 struct {
79 __be32 my_qpn;
80 u32 reserved1;
81 __be32 rqpn;
82 u8 sl;
83 u8 g_path;
84 u16 reserved2[2];
85 __be16 pkey;
86 u32 reserved3[11];
87 u8 grh[40];
88 } *ext_info;
89
90 memset(inbox + 256, 0, 256);
91 ext_info = inbox + 256;
92
93 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
94 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
95 ext_info->sl = in_wc->sl << 4;
96 ext_info->g_path = in_wc->dlid_path_bits |
97 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
98 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
99
100 if (in_grh)
101 memcpy(ext_info->grh, in_grh, 40);
102
103 op_modifier |= 0x4;
104
105 in_modifier |= in_wc->slid << 16;
106 }
107
108 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
109 in_modifier, op_modifier,
110 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
111
112 if (!err);
113 memcpy(response_mad, outmailbox->buf, 256);
114
115 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
116 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
117
118 return err;
119}
120
121static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
122{
123 struct ib_ah *new_ah;
124 struct ib_ah_attr ah_attr;
125
126 if (!dev->send_agent[port_num - 1][0])
127 return;
128
129 memset(&ah_attr, 0, sizeof ah_attr);
130 ah_attr.dlid = lid;
131 ah_attr.sl = sl;
132 ah_attr.port_num = port_num;
133
134 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
135 &ah_attr);
136 if (IS_ERR(new_ah))
137 return;
138
139 spin_lock(&dev->sm_lock);
140 if (dev->sm_ah[port_num - 1])
141 ib_destroy_ah(dev->sm_ah[port_num - 1]);
142 dev->sm_ah[port_num - 1] = new_ah;
143 spin_unlock(&dev->sm_lock);
144}
145
146/*
147 * Snoop SM MADs for port info and P_Key table sets, so we can
148 * synthesize LID change and P_Key change events.
149 */
150static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
151{
152 struct ib_event event;
153
154 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
155 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
156 mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
157 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
158 struct ib_port_info *pinfo =
159 (struct ib_port_info *) ((struct ib_smp *) mad)->data;
160
161 update_sm_ah(to_mdev(ibdev), port_num,
162 be16_to_cpu(pinfo->sm_lid),
163 pinfo->neighbormtu_mastersmsl & 0xf);
164
165 event.device = ibdev;
166 event.element.port_num = port_num;
167
168 if(pinfo->clientrereg_resv_subnetto & 0x80)
169 event.event = IB_EVENT_CLIENT_REREGISTER;
170 else
171 event.event = IB_EVENT_LID_CHANGE;
172
173 ib_dispatch_event(&event);
174 }
175
176 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
177 event.device = ibdev;
178 event.event = IB_EVENT_PKEY_CHANGE;
179 event.element.port_num = port_num;
180 ib_dispatch_event(&event);
181 }
182 }
183}
184
185static void node_desc_override(struct ib_device *dev,
186 struct ib_mad *mad)
187{
188 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
189 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
190 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
191 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
192 spin_lock(&to_mdev(dev)->sm_lock);
193 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
194 spin_unlock(&to_mdev(dev)->sm_lock);
195 }
196}
197
198static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
199{
200 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
201 struct ib_mad_send_buf *send_buf;
202 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
203 int ret;
204
205 if (agent) {
206 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
207 IB_MGMT_MAD_DATA, GFP_ATOMIC);
208 /*
209 * We rely here on the fact that MLX QPs don't use the
210 * address handle after the send is posted (this is
211 * wrong following the IB spec strictly, but we know
212 * it's OK for our devices).
213 */
214 spin_lock(&dev->sm_lock);
215 memcpy(send_buf->mad, mad, sizeof *mad);
216 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
217 ret = ib_post_send_mad(send_buf, NULL);
218 else
219 ret = -EINVAL;
220 spin_unlock(&dev->sm_lock);
221
222 if (ret)
223 ib_free_send_mad(send_buf);
224 }
225}
226
227int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
228 struct ib_wc *in_wc, struct ib_grh *in_grh,
229 struct ib_mad *in_mad, struct ib_mad *out_mad)
230{
231 u16 slid;
232 int err;
233
234 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
235
236 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
237 forward_trap(to_mdev(ibdev), port_num, in_mad);
238 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
239 }
240
241 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
242 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
243 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
244 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
245 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
246 return IB_MAD_RESULT_SUCCESS;
247
248 /*
249 * Don't process SMInfo queries or vendor-specific
250 * MADs -- the SMA can't handle them.
251 */
252 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
253 ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
254 IB_SMP_ATTR_VENDOR_MASK))
255 return IB_MAD_RESULT_SUCCESS;
256 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
257 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
258 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2) {
259 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
260 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
261 return IB_MAD_RESULT_SUCCESS;
262 } else
263 return IB_MAD_RESULT_SUCCESS;
264
265 err = mlx4_MAD_IFC(to_mdev(ibdev),
266 mad_flags & IB_MAD_IGNORE_MKEY,
267 mad_flags & IB_MAD_IGNORE_BKEY,
268 port_num, in_wc, in_grh, in_mad, out_mad);
269 if (err)
270 return IB_MAD_RESULT_FAILURE;
271
272 if (!out_mad->mad_hdr.status) {
273 smp_snoop(ibdev, port_num, in_mad);
274 node_desc_override(ibdev, out_mad);
275 }
276
277 /* set return bit in status of directed route responses */
278 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
279 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
280
281 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
282 /* no response for trap repress */
283 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
284
285 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
286}
287
288static void send_handler(struct ib_mad_agent *agent,
289 struct ib_mad_send_wc *mad_send_wc)
290{
291 ib_free_send_mad(mad_send_wc->send_buf);
292}
293
294int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
295{
296 struct ib_mad_agent *agent;
297 int p, q;
298 int ret;
299
300 for (p = 0; p < dev->dev->caps.num_ports; ++p)
301 for (q = 0; q <= 1; ++q) {
302 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
303 q ? IB_QPT_GSI : IB_QPT_SMI,
304 NULL, 0, send_handler,
305 NULL, NULL);
306 if (IS_ERR(agent)) {
307 ret = PTR_ERR(agent);
308 goto err;
309 }
310 dev->send_agent[p][q] = agent;
311 }
312
313 return 0;
314
315err:
316 for (p = 0; p < dev->dev->caps.num_ports; ++p)
317 for (q = 0; q <= 1; ++q)
318 if (dev->send_agent[p][q])
319 ib_unregister_mad_agent(dev->send_agent[p][q]);
320
321 return ret;
322}
323
324void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
325{
326 struct ib_mad_agent *agent;
327 int p, q;
328
329 for (p = 0; p < dev->dev->caps.num_ports; ++p) {
330 for (q = 0; q <= 1; ++q) {
331 agent = dev->send_agent[p][q];
332 dev->send_agent[p][q] = NULL;
333 ib_unregister_mad_agent(agent);
334 }
335
336 if (dev->sm_ah[p])
337 ib_destroy_ah(dev->sm_ah[p]);
338 }
339}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
new file mode 100644
index 000000000000..688ecb4c39f3
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -0,0 +1,651 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/errno.h>
36
37#include <rdma/ib_smi.h>
38#include <rdma/ib_user_verbs.h>
39
40#include <linux/mlx4/driver.h>
41#include <linux/mlx4/cmd.h>
42
43#include "mlx4_ib.h"
44#include "user.h"
45
46#define DRV_NAME "mlx4_ib"
47#define DRV_VERSION "0.01"
48#define DRV_RELDATE "May 1, 2006"
49
50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_VERSION(DRV_VERSION);
54
55static const char mlx4_ib_version[] __devinitdata =
56 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
57 DRV_VERSION " (" DRV_RELDATE ")\n";
58
59static void init_query_mad(struct ib_smp *mad)
60{
61 mad->base_version = 1;
62 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
63 mad->class_version = 1;
64 mad->method = IB_MGMT_METHOD_GET;
65}
66
67static int mlx4_ib_query_device(struct ib_device *ibdev,
68 struct ib_device_attr *props)
69{
70 struct mlx4_ib_dev *dev = to_mdev(ibdev);
71 struct ib_smp *in_mad = NULL;
72 struct ib_smp *out_mad = NULL;
73 int err = -ENOMEM;
74
75 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
76 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
77 if (!in_mad || !out_mad)
78 goto out;
79
80 init_query_mad(in_mad);
81 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
82
83 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
84 if (err)
85 goto out;
86
87 memset(props, 0, sizeof *props);
88
89 props->fw_ver = dev->dev->caps.fw_ver;
90 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
91 IB_DEVICE_PORT_ACTIVE_EVENT |
92 IB_DEVICE_SYS_IMAGE_GUID |
93 IB_DEVICE_RC_RNR_NAK_GEN;
94 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
95 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
96 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
97 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
98 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
99 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
100 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
101 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
102
103 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
104 0xffffff;
105 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
106 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
107 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
108
109 props->max_mr_size = ~0ull;
110 props->page_size_cap = dev->dev->caps.page_size_cap;
111 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
112 props->max_qp_wr = dev->dev->caps.max_wqes;
113 props->max_sge = min(dev->dev->caps.max_sq_sg,
114 dev->dev->caps.max_rq_sg);
115 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
116 props->max_cqe = dev->dev->caps.max_cqes;
117 props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
118 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
119 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
120 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
121 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
122 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
123 props->max_srq_wr = dev->dev->caps.max_srq_wqes;
124 props->max_srq_sge = dev->dev->caps.max_srq_sge;
125 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
126 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
127 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
128 props->max_pkeys = dev->dev->caps.pkey_table_len;
129 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
130 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
131 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
132 props->max_mcast_grp;
133 props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
134
135out:
136 kfree(in_mad);
137 kfree(out_mad);
138
139 return err;
140}
141
142static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
143 struct ib_port_attr *props)
144{
145 struct ib_smp *in_mad = NULL;
146 struct ib_smp *out_mad = NULL;
147 int err = -ENOMEM;
148
149 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
150 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
151 if (!in_mad || !out_mad)
152 goto out;
153
154 memset(props, 0, sizeof *props);
155
156 init_query_mad(in_mad);
157 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
158 in_mad->attr_mod = cpu_to_be32(port);
159
160 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
161 if (err)
162 goto out;
163
164 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
165 props->lmc = out_mad->data[34] & 0x7;
166 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
167 props->sm_sl = out_mad->data[36] & 0xf;
168 props->state = out_mad->data[32] & 0xf;
169 props->phys_state = out_mad->data[33] >> 4;
170 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
171 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len;
172 props->max_msg_sz = 0x80000000;
173 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len;
174 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
175 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
176 props->active_width = out_mad->data[31] & 0xf;
177 props->active_speed = out_mad->data[35] >> 4;
178 props->max_mtu = out_mad->data[41] & 0xf;
179 props->active_mtu = out_mad->data[36] >> 4;
180 props->subnet_timeout = out_mad->data[51] & 0x1f;
181 props->max_vl_num = out_mad->data[37] >> 4;
182 props->init_type_reply = out_mad->data[41] >> 4;
183
184out:
185 kfree(in_mad);
186 kfree(out_mad);
187
188 return err;
189}
190
191static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
192 union ib_gid *gid)
193{
194 struct ib_smp *in_mad = NULL;
195 struct ib_smp *out_mad = NULL;
196 int err = -ENOMEM;
197
198 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
199 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
200 if (!in_mad || !out_mad)
201 goto out;
202
203 init_query_mad(in_mad);
204 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
205 in_mad->attr_mod = cpu_to_be32(port);
206
207 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
208 if (err)
209 goto out;
210
211 memcpy(gid->raw, out_mad->data + 8, 8);
212
213 init_query_mad(in_mad);
214 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
215 in_mad->attr_mod = cpu_to_be32(index / 8);
216
217 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
218 if (err)
219 goto out;
220
221 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
222
223out:
224 kfree(in_mad);
225 kfree(out_mad);
226 return err;
227}
228
229static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
230 u16 *pkey)
231{
232 struct ib_smp *in_mad = NULL;
233 struct ib_smp *out_mad = NULL;
234 int err = -ENOMEM;
235
236 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
237 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
238 if (!in_mad || !out_mad)
239 goto out;
240
241 init_query_mad(in_mad);
242 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
243 in_mad->attr_mod = cpu_to_be32(index / 32);
244
245 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
246 if (err)
247 goto out;
248
249 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
250
251out:
252 kfree(in_mad);
253 kfree(out_mad);
254 return err;
255}
256
257static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
258 struct ib_device_modify *props)
259{
260 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
261 return -EOPNOTSUPP;
262
263 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
264 spin_lock(&to_mdev(ibdev)->sm_lock);
265 memcpy(ibdev->node_desc, props->node_desc, 64);
266 spin_unlock(&to_mdev(ibdev)->sm_lock);
267 }
268
269 return 0;
270}
271
272static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
273 u32 cap_mask)
274{
275 struct mlx4_cmd_mailbox *mailbox;
276 int err;
277
278 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
279 if (IS_ERR(mailbox))
280 return PTR_ERR(mailbox);
281
282 memset(mailbox->buf, 0, 256);
283 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
284 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
285
286 err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
287 MLX4_CMD_TIME_CLASS_B);
288
289 mlx4_free_cmd_mailbox(dev->dev, mailbox);
290 return err;
291}
292
293static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
294 struct ib_port_modify *props)
295{
296 struct ib_port_attr attr;
297 u32 cap_mask;
298 int err;
299
300 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
301
302 err = mlx4_ib_query_port(ibdev, port, &attr);
303 if (err)
304 goto out;
305
306 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
307 ~props->clr_port_cap_mask;
308
309 err = mlx4_SET_PORT(to_mdev(ibdev), port,
310 !!(mask & IB_PORT_RESET_QKEY_CNTR),
311 cap_mask);
312
313out:
314 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
315 return err;
316}
317
318static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
319 struct ib_udata *udata)
320{
321 struct mlx4_ib_dev *dev = to_mdev(ibdev);
322 struct mlx4_ib_ucontext *context;
323 struct mlx4_ib_alloc_ucontext_resp resp;
324 int err;
325
326 resp.qp_tab_size = dev->dev->caps.num_qps;
327 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
328 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
329
330 context = kmalloc(sizeof *context, GFP_KERNEL);
331 if (!context)
332 return ERR_PTR(-ENOMEM);
333
334 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
335 if (err) {
336 kfree(context);
337 return ERR_PTR(err);
338 }
339
340 INIT_LIST_HEAD(&context->db_page_list);
341 mutex_init(&context->db_page_mutex);
342
343 err = ib_copy_to_udata(udata, &resp, sizeof resp);
344 if (err) {
345 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
346 kfree(context);
347 return ERR_PTR(-EFAULT);
348 }
349
350 return &context->ibucontext;
351}
352
353static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
354{
355 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
356
357 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
358 kfree(context);
359
360 return 0;
361}
362
363static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
364{
365 struct mlx4_ib_dev *dev = to_mdev(context->device);
366
367 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
368 return -EINVAL;
369
370 if (vma->vm_pgoff == 0) {
371 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
372
373 if (io_remap_pfn_range(vma, vma->vm_start,
374 to_mucontext(context)->uar.pfn,
375 PAGE_SIZE, vma->vm_page_prot))
376 return -EAGAIN;
377 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
378 /* FIXME want pgprot_writecombine() for BlueFlame pages */
379 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
380
381 if (io_remap_pfn_range(vma, vma->vm_start,
382 to_mucontext(context)->uar.pfn +
383 dev->dev->caps.num_uars,
384 PAGE_SIZE, vma->vm_page_prot))
385 return -EAGAIN;
386 } else
387 return -EINVAL;
388
389 return 0;
390}
391
392static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
393 struct ib_ucontext *context,
394 struct ib_udata *udata)
395{
396 struct mlx4_ib_pd *pd;
397 int err;
398
399 pd = kmalloc(sizeof *pd, GFP_KERNEL);
400 if (!pd)
401 return ERR_PTR(-ENOMEM);
402
403 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
404 if (err) {
405 kfree(pd);
406 return ERR_PTR(err);
407 }
408
409 if (context)
410 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
411 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
412 kfree(pd);
413 return ERR_PTR(-EFAULT);
414 }
415
416 return &pd->ibpd;
417}
418
419static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
420{
421 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
422 kfree(pd);
423
424 return 0;
425}
426
427static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
428{
429 return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
430 &to_mqp(ibqp)->mqp, gid->raw);
431}
432
433static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
434{
435 return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
436 &to_mqp(ibqp)->mqp, gid->raw);
437}
438
439static int init_node_data(struct mlx4_ib_dev *dev)
440{
441 struct ib_smp *in_mad = NULL;
442 struct ib_smp *out_mad = NULL;
443 int err = -ENOMEM;
444
445 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
446 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
447 if (!in_mad || !out_mad)
448 goto out;
449
450 init_query_mad(in_mad);
451 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
452
453 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
454 if (err)
455 goto out;
456
457 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
458
459 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
460
461 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
462 if (err)
463 goto out;
464
465 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
466
467out:
468 kfree(in_mad);
469 kfree(out_mad);
470 return err;
471}
472
473static void *mlx4_ib_add(struct mlx4_dev *dev)
474{
475 struct mlx4_ib_dev *ibdev;
476
477 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
478 if (!ibdev) {
479 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
480 return NULL;
481 }
482
483 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
484 goto err_dealloc;
485
486 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
487 goto err_pd;
488
489 ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
490 if (!ibdev->uar_map)
491 goto err_uar;
492
493 INIT_LIST_HEAD(&ibdev->pgdir_list);
494 mutex_init(&ibdev->pgdir_mutex);
495
496 ibdev->dev = dev;
497
498 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
499 ibdev->ib_dev.owner = THIS_MODULE;
500 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
501 ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports;
502 ibdev->ib_dev.num_comp_vectors = 1;
503 ibdev->ib_dev.dma_device = &dev->pdev->dev;
504
505 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
506 ibdev->ib_dev.uverbs_cmd_mask =
507 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
508 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
509 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
510 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
511 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
512 (1ull << IB_USER_VERBS_CMD_REG_MR) |
513 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
514 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
515 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
516 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
517 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
518 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
519 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
520 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
521 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
522 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
523 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
524 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
525
526 ibdev->ib_dev.query_device = mlx4_ib_query_device;
527 ibdev->ib_dev.query_port = mlx4_ib_query_port;
528 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
529 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
530 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
531 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
532 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
533 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
534 ibdev->ib_dev.mmap = mlx4_ib_mmap;
535 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
536 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
537 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
538 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
539 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
540 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
541 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
542 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
543 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
544 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
545 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
546 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
547 ibdev->ib_dev.post_send = mlx4_ib_post_send;
548 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
549 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
550 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
551 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
552 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
553 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
554 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
555 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
556 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
557 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
558 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
559
560 if (init_node_data(ibdev))
561 goto err_map;
562
563 spin_lock_init(&ibdev->sm_lock);
564 mutex_init(&ibdev->cap_mask_mutex);
565
566 if (ib_register_device(&ibdev->ib_dev))
567 goto err_map;
568
569 if (mlx4_ib_mad_init(ibdev))
570 goto err_reg;
571
572 return ibdev;
573
574err_reg:
575 ib_unregister_device(&ibdev->ib_dev);
576
577err_map:
578 iounmap(ibdev->uar_map);
579
580err_uar:
581 mlx4_uar_free(dev, &ibdev->priv_uar);
582
583err_pd:
584 mlx4_pd_free(dev, ibdev->priv_pdn);
585
586err_dealloc:
587 ib_dealloc_device(&ibdev->ib_dev);
588
589 return NULL;
590}
591
592static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
593{
594 struct mlx4_ib_dev *ibdev = ibdev_ptr;
595 int p;
596
597 for (p = 1; p <= dev->caps.num_ports; ++p)
598 mlx4_CLOSE_PORT(dev, p);
599
600 mlx4_ib_mad_cleanup(ibdev);
601 ib_unregister_device(&ibdev->ib_dev);
602 iounmap(ibdev->uar_map);
603 mlx4_uar_free(dev, &ibdev->priv_uar);
604 mlx4_pd_free(dev, ibdev->priv_pdn);
605 ib_dealloc_device(&ibdev->ib_dev);
606}
607
608static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
609 enum mlx4_dev_event event, int subtype,
610 int port)
611{
612 struct ib_event ibev;
613
614 switch (event) {
615 case MLX4_EVENT_TYPE_PORT_CHANGE:
616 ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
617 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
618 break;
619
620 case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
621 ibev.event = IB_EVENT_DEVICE_FATAL;
622 break;
623
624 default:
625 return;
626 }
627
628 ibev.device = ibdev_ptr;
629 ibev.element.port_num = port;
630
631 ib_dispatch_event(&ibev);
632}
633
634static struct mlx4_interface mlx4_ib_interface = {
635 .add = mlx4_ib_add,
636 .remove = mlx4_ib_remove,
637 .event = mlx4_ib_event
638};
639
640static int __init mlx4_ib_init(void)
641{
642 return mlx4_register_interface(&mlx4_ib_interface);
643}
644
645static void __exit mlx4_ib_cleanup(void)
646{
647 mlx4_unregister_interface(&mlx4_ib_interface);
648}
649
650module_init(mlx4_ib_init);
651module_exit(mlx4_ib_cleanup);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
new file mode 100644
index 000000000000..93dac71f3230
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -0,0 +1,285 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX4_IB_H
34#define MLX4_IB_H
35
36#include <linux/compiler.h>
37#include <linux/list.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_umem.h>
41
42#include <linux/mlx4/device.h>
43#include <linux/mlx4/doorbell.h>
44
45enum {
46 MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4
47};
48
49struct mlx4_ib_db_pgdir;
50struct mlx4_ib_user_db_page;
51
52struct mlx4_ib_db {
53 __be32 *db;
54 union {
55 struct mlx4_ib_db_pgdir *pgdir;
56 struct mlx4_ib_user_db_page *user_page;
57 } u;
58 dma_addr_t dma;
59 int index;
60 int order;
61};
62
63struct mlx4_ib_ucontext {
64 struct ib_ucontext ibucontext;
65 struct mlx4_uar uar;
66 struct list_head db_page_list;
67 struct mutex db_page_mutex;
68};
69
70struct mlx4_ib_pd {
71 struct ib_pd ibpd;
72 u32 pdn;
73};
74
75struct mlx4_ib_cq_buf {
76 struct mlx4_buf buf;
77 struct mlx4_mtt mtt;
78};
79
80struct mlx4_ib_cq {
81 struct ib_cq ibcq;
82 struct mlx4_cq mcq;
83 struct mlx4_ib_cq_buf buf;
84 struct mlx4_ib_db db;
85 spinlock_t lock;
86 struct ib_umem *umem;
87};
88
89struct mlx4_ib_mr {
90 struct ib_mr ibmr;
91 struct mlx4_mr mmr;
92 struct ib_umem *umem;
93};
94
95struct mlx4_ib_wq {
96 u64 *wrid;
97 spinlock_t lock;
98 int max;
99 int max_gs;
100 int offset;
101 int wqe_shift;
102 unsigned head;
103 unsigned tail;
104};
105
106struct mlx4_ib_qp {
107 struct ib_qp ibqp;
108 struct mlx4_qp mqp;
109 struct mlx4_buf buf;
110
111 struct mlx4_ib_db db;
112 struct mlx4_ib_wq rq;
113
114 u32 doorbell_qpn;
115 __be32 sq_signal_bits;
116 struct mlx4_ib_wq sq;
117
118 struct ib_umem *umem;
119 struct mlx4_mtt mtt;
120 int buf_size;
121 struct mutex mutex;
122 u8 port;
123 u8 alt_port;
124 u8 atomic_rd_en;
125 u8 resp_depth;
126 u8 state;
127};
128
129struct mlx4_ib_srq {
130 struct ib_srq ibsrq;
131 struct mlx4_srq msrq;
132 struct mlx4_buf buf;
133 struct mlx4_ib_db db;
134 u64 *wrid;
135 spinlock_t lock;
136 int head;
137 int tail;
138 u16 wqe_ctr;
139 struct ib_umem *umem;
140 struct mlx4_mtt mtt;
141 struct mutex mutex;
142};
143
144struct mlx4_ib_ah {
145 struct ib_ah ibah;
146 struct mlx4_av av;
147};
148
149struct mlx4_ib_dev {
150 struct ib_device ib_dev;
151 struct mlx4_dev *dev;
152 void __iomem *uar_map;
153
154 struct list_head pgdir_list;
155 struct mutex pgdir_mutex;
156
157 struct mlx4_uar priv_uar;
158 u32 priv_pdn;
159 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
160
161 struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
162 struct ib_ah *sm_ah[MLX4_MAX_PORTS];
163 spinlock_t sm_lock;
164
165 struct mutex cap_mask_mutex;
166};
167
168static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
169{
170 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
171}
172
173static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
174{
175 return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
176}
177
178static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
179{
180 return container_of(ibpd, struct mlx4_ib_pd, ibpd);
181}
182
183static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
184{
185 return container_of(ibcq, struct mlx4_ib_cq, ibcq);
186}
187
188static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
189{
190 return container_of(mcq, struct mlx4_ib_cq, mcq);
191}
192
193static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
194{
195 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
196}
197
198static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
199{
200 return container_of(ibqp, struct mlx4_ib_qp, ibqp);
201}
202
203static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
204{
205 return container_of(mqp, struct mlx4_ib_qp, mqp);
206}
207
208static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
209{
210 return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
211}
212
213static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
214{
215 return container_of(msrq, struct mlx4_ib_srq, msrq);
216}
217
218static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
219{
220 return container_of(ibah, struct mlx4_ib_ah, ibah);
221}
222
223int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
224void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
225int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
226 struct mlx4_ib_db *db);
227void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db);
228
229struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
230int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
231 struct ib_umem *umem);
232struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
233 u64 virt_addr, int access_flags,
234 struct ib_udata *udata);
235int mlx4_ib_dereg_mr(struct ib_mr *mr);
236
237struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
238 struct ib_ucontext *context,
239 struct ib_udata *udata);
240int mlx4_ib_destroy_cq(struct ib_cq *cq);
241int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
242int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
243void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
244void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
245
246struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
247int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
248int mlx4_ib_destroy_ah(struct ib_ah *ah);
249
250struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
251 struct ib_srq_init_attr *init_attr,
252 struct ib_udata *udata);
253int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
254 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
255int mlx4_ib_destroy_srq(struct ib_srq *srq);
256void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
257int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
258 struct ib_recv_wr **bad_wr);
259
260struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
261 struct ib_qp_init_attr *init_attr,
262 struct ib_udata *udata);
263int mlx4_ib_destroy_qp(struct ib_qp *qp);
264int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
265 int attr_mask, struct ib_udata *udata);
266int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
267 struct ib_send_wr **bad_wr);
268int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
269 struct ib_recv_wr **bad_wr);
270
271int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
272 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
273 void *in_mad, void *response_mad);
274int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
275 struct ib_wc *in_wc, struct ib_grh *in_grh,
276 struct ib_mad *in_mad, struct ib_mad *out_mad);
277int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
278void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
279
280static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
281{
282 return !!(ah->av.g_slid & 0x80);
283}
284
285#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
new file mode 100644
index 000000000000..85ae906f1d12
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx4_ib.h"
34
35static u32 convert_access(int acc)
36{
37 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
38 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
39 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
40 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
41 MLX4_PERM_LOCAL_READ;
42}
43
44struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
45{
46 struct mlx4_ib_mr *mr;
47 int err;
48
49 mr = kmalloc(sizeof *mr, GFP_KERNEL);
50 if (!mr)
51 return ERR_PTR(-ENOMEM);
52
53 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
54 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
55 if (err)
56 goto err_free;
57
58 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
59 if (err)
60 goto err_mr;
61
62 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
63 mr->umem = NULL;
64
65 return &mr->ibmr;
66
67err_mr:
68 mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
69
70err_free:
71 kfree(mr);
72
73 return ERR_PTR(err);
74}
75
76int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
77 struct ib_umem *umem)
78{
79 u64 *pages;
80 struct ib_umem_chunk *chunk;
81 int i, j, k;
82 int n;
83 int len;
84 int err = 0;
85
86 pages = (u64 *) __get_free_page(GFP_KERNEL);
87 if (!pages)
88 return -ENOMEM;
89
90 i = n = 0;
91
92 list_for_each_entry(chunk, &umem->chunk_list, list)
93 for (j = 0; j < chunk->nmap; ++j) {
94 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift;
95 for (k = 0; k < len; ++k) {
96 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
97 umem->page_size * k;
98 /*
99 * Be friendly to WRITE_MTT firmware
100 * command, and pass it chunks of
101 * appropriate size.
102 */
103 if (i == PAGE_SIZE / sizeof (u64) - 2) {
104 err = mlx4_write_mtt(dev->dev, mtt, n,
105 i, pages);
106 if (err)
107 goto out;
108 n += i;
109 i = 0;
110 }
111 }
112 }
113
114 if (i)
115 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
116
117out:
118 free_page((unsigned long) pages);
119 return err;
120}
121
122struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
123 u64 virt_addr, int access_flags,
124 struct ib_udata *udata)
125{
126 struct mlx4_ib_dev *dev = to_mdev(pd->device);
127 struct mlx4_ib_mr *mr;
128 int shift;
129 int err;
130 int n;
131
132 mr = kmalloc(sizeof *mr, GFP_KERNEL);
133 if (!mr)
134 return ERR_PTR(-ENOMEM);
135
136 mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags);
137 if (IS_ERR(mr->umem)) {
138 err = PTR_ERR(mr->umem);
139 goto err_free;
140 }
141
142 n = ib_umem_page_count(mr->umem);
143 shift = ilog2(mr->umem->page_size);
144
145 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
146 convert_access(access_flags), n, shift, &mr->mmr);
147 if (err)
148 goto err_umem;
149
150 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
151 if (err)
152 goto err_mr;
153
154 err = mlx4_mr_enable(dev->dev, &mr->mmr);
155 if (err)
156 goto err_mr;
157
158 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
159
160 return &mr->ibmr;
161
162err_mr:
163 mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
164
165err_umem:
166 ib_umem_release(mr->umem);
167
168err_free:
169 kfree(mr);
170
171 return ERR_PTR(err);
172}
173
174int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
175{
176 struct mlx4_ib_mr *mr = to_mmr(ibmr);
177
178 mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
179 if (mr->umem)
180 ib_umem_release(mr->umem);
181 kfree(mr);
182
183 return 0;
184}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
new file mode 100644
index 000000000000..5cd706908450
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -0,0 +1,1294 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_cache.h>
34#include <rdma/ib_pack.h>
35
36#include <linux/mlx4/qp.h>
37
38#include "mlx4_ib.h"
39#include "user.h"
40
41enum {
42 MLX4_IB_ACK_REQ_FREQ = 8,
43};
44
45enum {
46 MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f
48};
49
50enum {
51 /*
52 * Largest possible UD header: send with GRH and immediate data.
53 */
54 MLX4_IB_UD_HEADER_SIZE = 72
55};
56
57struct mlx4_ib_sqp {
58 struct mlx4_ib_qp qp;
59 int pkey_index;
60 u32 qkey;
61 u32 send_psn;
62 struct ib_ud_header ud_header;
63 u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
64};
65
66static const __be32 mlx4_ib_opcode[] = {
67 [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
68 [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
69 [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
70 [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
71 [IB_WR_RDMA_READ] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_READ),
72 [IB_WR_ATOMIC_CMP_AND_SWP] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_CS),
73 [IB_WR_ATOMIC_FETCH_AND_ADD] = __constant_cpu_to_be32(MLX4_OPCODE_ATOMIC_FA),
74};
75
76static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
77{
78 return container_of(mqp, struct mlx4_ib_sqp, qp);
79}
80
81static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
82{
83 return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
84 qp->mqp.qpn <= dev->dev->caps.sqp_start + 3;
85}
86
87static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
88{
89 return qp->mqp.qpn >= dev->dev->caps.sqp_start &&
90 qp->mqp.qpn <= dev->dev->caps.sqp_start + 1;
91}
92
93static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
94{
95 if (qp->buf.nbufs == 1)
96 return qp->buf.u.direct.buf + offset;
97 else
98 return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf +
99 (offset & (PAGE_SIZE - 1));
100}
101
102static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
103{
104 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
105}
106
107static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
108{
109 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
110}
111
112static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
113{
114 struct ib_event event;
115 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
116
117 if (type == MLX4_EVENT_TYPE_PATH_MIG)
118 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
119
120 if (ibqp->event_handler) {
121 event.device = ibqp->device;
122 event.element.qp = ibqp;
123 switch (type) {
124 case MLX4_EVENT_TYPE_PATH_MIG:
125 event.event = IB_EVENT_PATH_MIG;
126 break;
127 case MLX4_EVENT_TYPE_COMM_EST:
128 event.event = IB_EVENT_COMM_EST;
129 break;
130 case MLX4_EVENT_TYPE_SQ_DRAINED:
131 event.event = IB_EVENT_SQ_DRAINED;
132 break;
133 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
134 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
135 break;
136 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
137 event.event = IB_EVENT_QP_FATAL;
138 break;
139 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
140 event.event = IB_EVENT_PATH_MIG_ERR;
141 break;
142 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
143 event.event = IB_EVENT_QP_REQ_ERR;
144 break;
145 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
146 event.event = IB_EVENT_QP_ACCESS_ERR;
147 break;
148 default:
149 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
150 "on QP %06x\n", type, qp->qpn);
151 return;
152 }
153
154 ibqp->event_handler(&event, ibqp->qp_context);
155 }
156}
157
158static int send_wqe_overhead(enum ib_qp_type type)
159{
160 /*
161 * UD WQEs must have a datagram segment.
162 * RC and UC WQEs might have a remote address segment.
163 * MLX WQEs need two extra inline data segments (for the UD
164 * header and space for the ICRC).
165 */
166 switch (type) {
167 case IB_QPT_UD:
168 return sizeof (struct mlx4_wqe_ctrl_seg) +
169 sizeof (struct mlx4_wqe_datagram_seg);
170 case IB_QPT_UC:
171 return sizeof (struct mlx4_wqe_ctrl_seg) +
172 sizeof (struct mlx4_wqe_raddr_seg);
173 case IB_QPT_RC:
174 return sizeof (struct mlx4_wqe_ctrl_seg) +
175 sizeof (struct mlx4_wqe_atomic_seg) +
176 sizeof (struct mlx4_wqe_raddr_seg);
177 case IB_QPT_SMI:
178 case IB_QPT_GSI:
179 return sizeof (struct mlx4_wqe_ctrl_seg) +
180 ALIGN(MLX4_IB_UD_HEADER_SIZE +
181 sizeof (struct mlx4_wqe_inline_seg),
182 sizeof (struct mlx4_wqe_data_seg)) +
183 ALIGN(4 +
184 sizeof (struct mlx4_wqe_inline_seg),
185 sizeof (struct mlx4_wqe_data_seg));
186 default:
187 return sizeof (struct mlx4_wqe_ctrl_seg);
188 }
189}
190
191static int set_qp_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
192 enum ib_qp_type type, struct mlx4_ib_qp *qp)
193{
194 /* Sanity check QP size before proceeding */
195 if (cap->max_send_wr > dev->dev->caps.max_wqes ||
196 cap->max_recv_wr > dev->dev->caps.max_wqes ||
197 cap->max_send_sge > dev->dev->caps.max_sq_sg ||
198 cap->max_recv_sge > dev->dev->caps.max_rq_sg ||
199 cap->max_inline_data + send_wqe_overhead(type) +
200 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
201 return -EINVAL;
202
203 /*
204 * For MLX transport we need 2 extra S/G entries:
205 * one for the header and one for the checksum at the end
206 */
207 if ((type == IB_QPT_SMI || type == IB_QPT_GSI) &&
208 cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
209 return -EINVAL;
210
211 qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0;
212 qp->sq.max = cap->max_send_wr ? roundup_pow_of_two(cap->max_send_wr) : 0;
213
214 qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge *
215 sizeof (struct mlx4_wqe_data_seg)));
216 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg);
217
218 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge *
219 sizeof (struct mlx4_wqe_data_seg),
220 cap->max_inline_data +
221 sizeof (struct mlx4_wqe_inline_seg)) +
222 send_wqe_overhead(type)));
223 qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
224 sizeof (struct mlx4_wqe_data_seg);
225
226 qp->buf_size = (qp->rq.max << qp->rq.wqe_shift) +
227 (qp->sq.max << qp->sq.wqe_shift);
228 if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
229 qp->rq.offset = 0;
230 qp->sq.offset = qp->rq.max << qp->rq.wqe_shift;
231 } else {
232 qp->rq.offset = qp->sq.max << qp->sq.wqe_shift;
233 qp->sq.offset = 0;
234 }
235
236 cap->max_send_wr = qp->sq.max;
237 cap->max_recv_wr = qp->rq.max;
238 cap->max_send_sge = qp->sq.max_gs;
239 cap->max_recv_sge = qp->rq.max_gs;
240 cap->max_inline_data = (1 << qp->sq.wqe_shift) - send_wqe_overhead(type) -
241 sizeof (struct mlx4_wqe_inline_seg);
242
243 return 0;
244}
245
246static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
247 struct ib_qp_init_attr *init_attr,
248 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
249{
250 struct mlx4_wqe_ctrl_seg *ctrl;
251 int err;
252 int i;
253
254 mutex_init(&qp->mutex);
255 spin_lock_init(&qp->sq.lock);
256 spin_lock_init(&qp->rq.lock);
257
258 qp->state = IB_QPS_RESET;
259 qp->atomic_rd_en = 0;
260 qp->resp_depth = 0;
261
262 qp->rq.head = 0;
263 qp->rq.tail = 0;
264 qp->sq.head = 0;
265 qp->sq.tail = 0;
266
267 err = set_qp_size(dev, &init_attr->cap, init_attr->qp_type, qp);
268 if (err)
269 goto err;
270
271 if (pd->uobject) {
272 struct mlx4_ib_create_qp ucmd;
273
274 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
275 err = -EFAULT;
276 goto err;
277 }
278
279 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
280 qp->buf_size, 0);
281 if (IS_ERR(qp->umem)) {
282 err = PTR_ERR(qp->umem);
283 goto err;
284 }
285
286 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
287 ilog2(qp->umem->page_size), &qp->mtt);
288 if (err)
289 goto err_buf;
290
291 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
292 if (err)
293 goto err_mtt;
294
295 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
296 ucmd.db_addr, &qp->db);
297 if (err)
298 goto err_mtt;
299 } else {
300 err = mlx4_ib_db_alloc(dev, &qp->db, 0);
301 if (err)
302 goto err;
303
304 *qp->db.db = 0;
305
306 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf)) {
307 err = -ENOMEM;
308 goto err_db;
309 }
310
311 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
312 &qp->mtt);
313 if (err)
314 goto err_buf;
315
316 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
317 if (err)
318 goto err_mtt;
319
320 for (i = 0; i < qp->sq.max; ++i) {
321 ctrl = get_send_wqe(qp, i);
322 ctrl->owner_opcode = cpu_to_be32(1 << 31);
323 }
324
325 qp->sq.wrid = kmalloc(qp->sq.max * sizeof (u64), GFP_KERNEL);
326 qp->rq.wrid = kmalloc(qp->rq.max * sizeof (u64), GFP_KERNEL);
327
328 if (!qp->sq.wrid || !qp->rq.wrid) {
329 err = -ENOMEM;
330 goto err_wrid;
331 }
332
333 /* We don't support inline sends for kernel QPs (yet) */
334 init_attr->cap.max_inline_data = 0;
335 }
336
337 err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp);
338 if (err)
339 goto err_wrid;
340
341 /*
342 * Hardware wants QPN written in big-endian order (after
343 * shifting) for send doorbell. Precompute this value to save
344 * a little bit when posting sends.
345 */
346 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
347
348 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
349 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
350 else
351 qp->sq_signal_bits = 0;
352
353 qp->mqp.event = mlx4_ib_qp_event;
354
355 return 0;
356
357err_wrid:
358 if (pd->uobject)
359 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
360 else {
361 kfree(qp->sq.wrid);
362 kfree(qp->rq.wrid);
363 }
364
365err_mtt:
366 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
367
368err_buf:
369 if (pd->uobject)
370 ib_umem_release(qp->umem);
371 else
372 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
373
374err_db:
375 if (!pd->uobject)
376 mlx4_ib_db_free(dev, &qp->db);
377
378err:
379 return err;
380}
381
382static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
383{
384 switch (state) {
385 case IB_QPS_RESET: return MLX4_QP_STATE_RST;
386 case IB_QPS_INIT: return MLX4_QP_STATE_INIT;
387 case IB_QPS_RTR: return MLX4_QP_STATE_RTR;
388 case IB_QPS_RTS: return MLX4_QP_STATE_RTS;
389 case IB_QPS_SQD: return MLX4_QP_STATE_SQD;
390 case IB_QPS_SQE: return MLX4_QP_STATE_SQER;
391 case IB_QPS_ERR: return MLX4_QP_STATE_ERR;
392 default: return -1;
393 }
394}
395
396static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
397{
398 if (send_cq == recv_cq)
399 spin_lock_irq(&send_cq->lock);
400 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
401 spin_lock_irq(&send_cq->lock);
402 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
403 } else {
404 spin_lock_irq(&recv_cq->lock);
405 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
406 }
407}
408
409static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
410{
411 if (send_cq == recv_cq)
412 spin_unlock_irq(&send_cq->lock);
413 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
414 spin_unlock(&recv_cq->lock);
415 spin_unlock_irq(&send_cq->lock);
416 } else {
417 spin_unlock(&send_cq->lock);
418 spin_unlock_irq(&recv_cq->lock);
419 }
420}
421
422static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
423 int is_user)
424{
425 struct mlx4_ib_cq *send_cq, *recv_cq;
426
427 if (qp->state != IB_QPS_RESET)
428 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
429 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
430 printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
431 qp->mqp.qpn);
432
433 send_cq = to_mcq(qp->ibqp.send_cq);
434 recv_cq = to_mcq(qp->ibqp.recv_cq);
435
436 mlx4_ib_lock_cqs(send_cq, recv_cq);
437
438 if (!is_user) {
439 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
440 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
441 if (send_cq != recv_cq)
442 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
443 }
444
445 mlx4_qp_remove(dev->dev, &qp->mqp);
446
447 mlx4_ib_unlock_cqs(send_cq, recv_cq);
448
449 mlx4_qp_free(dev->dev, &qp->mqp);
450 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
451
452 if (is_user) {
453 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
454 &qp->db);
455 ib_umem_release(qp->umem);
456 } else {
457 kfree(qp->sq.wrid);
458 kfree(qp->rq.wrid);
459 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
460 mlx4_ib_db_free(dev, &qp->db);
461 }
462}
463
464struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
465 struct ib_qp_init_attr *init_attr,
466 struct ib_udata *udata)
467{
468 struct mlx4_ib_dev *dev = to_mdev(pd->device);
469 struct mlx4_ib_sqp *sqp;
470 struct mlx4_ib_qp *qp;
471 int err;
472
473 switch (init_attr->qp_type) {
474 case IB_QPT_RC:
475 case IB_QPT_UC:
476 case IB_QPT_UD:
477 {
478 qp = kmalloc(sizeof *qp, GFP_KERNEL);
479 if (!qp)
480 return ERR_PTR(-ENOMEM);
481
482 err = create_qp_common(dev, pd, init_attr, udata, 0, qp);
483 if (err) {
484 kfree(qp);
485 return ERR_PTR(err);
486 }
487
488 qp->ibqp.qp_num = qp->mqp.qpn;
489
490 break;
491 }
492 case IB_QPT_SMI:
493 case IB_QPT_GSI:
494 {
495 /* Userspace is not allowed to create special QPs: */
496 if (pd->uobject)
497 return ERR_PTR(-EINVAL);
498
499 sqp = kmalloc(sizeof *sqp, GFP_KERNEL);
500 if (!sqp)
501 return ERR_PTR(-ENOMEM);
502
503 qp = &sqp->qp;
504
505 err = create_qp_common(dev, pd, init_attr, udata,
506 dev->dev->caps.sqp_start +
507 (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +
508 init_attr->port_num - 1,
509 qp);
510 if (err) {
511 kfree(sqp);
512 return ERR_PTR(err);
513 }
514
515 qp->port = init_attr->port_num;
516 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
517
518 break;
519 }
520 default:
521 /* Don't support raw QPs */
522 return ERR_PTR(-EINVAL);
523 }
524
525 return &qp->ibqp;
526}
527
528int mlx4_ib_destroy_qp(struct ib_qp *qp)
529{
530 struct mlx4_ib_dev *dev = to_mdev(qp->device);
531 struct mlx4_ib_qp *mqp = to_mqp(qp);
532
533 if (is_qp0(dev, mqp))
534 mlx4_CLOSE_PORT(dev->dev, mqp->port);
535
536 destroy_qp_common(dev, mqp, !!qp->pd->uobject);
537
538 if (is_sqp(dev, mqp))
539 kfree(to_msqp(mqp));
540 else
541 kfree(mqp);
542
543 return 0;
544}
545
546static void init_port(struct mlx4_ib_dev *dev, int port)
547{
548 struct mlx4_init_port_param param;
549 int err;
550
551 memset(&param, 0, sizeof param);
552
553 param.port_width_cap = dev->dev->caps.port_width_cap;
554 param.vl_cap = dev->dev->caps.vl_cap;
555 param.mtu = ib_mtu_enum_to_int(dev->dev->caps.mtu_cap);
556 param.max_gid = dev->dev->caps.gid_table_len;
557 param.max_pkey = dev->dev->caps.pkey_table_len;
558
559 err = mlx4_INIT_PORT(dev->dev, &param, port);
560 if (err)
561 printk(KERN_WARNING "INIT_PORT failed, return code %d.\n", err);
562}
563
564static int to_mlx4_st(enum ib_qp_type type)
565{
566 switch (type) {
567 case IB_QPT_RC: return MLX4_QP_ST_RC;
568 case IB_QPT_UC: return MLX4_QP_ST_UC;
569 case IB_QPT_UD: return MLX4_QP_ST_UD;
570 case IB_QPT_SMI:
571 case IB_QPT_GSI: return MLX4_QP_ST_MLX;
572 default: return -1;
573 }
574}
575
576static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, struct ib_qp_attr *attr,
577 int attr_mask)
578{
579 u8 dest_rd_atomic;
580 u32 access_flags;
581 u32 hw_access_flags = 0;
582
583 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
584 dest_rd_atomic = attr->max_dest_rd_atomic;
585 else
586 dest_rd_atomic = qp->resp_depth;
587
588 if (attr_mask & IB_QP_ACCESS_FLAGS)
589 access_flags = attr->qp_access_flags;
590 else
591 access_flags = qp->atomic_rd_en;
592
593 if (!dest_rd_atomic)
594 access_flags &= IB_ACCESS_REMOTE_WRITE;
595
596 if (access_flags & IB_ACCESS_REMOTE_READ)
597 hw_access_flags |= MLX4_QP_BIT_RRE;
598 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
599 hw_access_flags |= MLX4_QP_BIT_RAE;
600 if (access_flags & IB_ACCESS_REMOTE_WRITE)
601 hw_access_flags |= MLX4_QP_BIT_RWE;
602
603 return cpu_to_be32(hw_access_flags);
604}
605
606static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, struct ib_qp_attr *attr,
607 int attr_mask)
608{
609 if (attr_mask & IB_QP_PKEY_INDEX)
610 sqp->pkey_index = attr->pkey_index;
611 if (attr_mask & IB_QP_QKEY)
612 sqp->qkey = attr->qkey;
613 if (attr_mask & IB_QP_SQ_PSN)
614 sqp->send_psn = attr->sq_psn;
615}
616
617static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
618{
619 path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);
620}
621
622static int mlx4_set_path(struct mlx4_ib_dev *dev, struct ib_ah_attr *ah,
623 struct mlx4_qp_path *path, u8 port)
624{
625 path->grh_mylmc = ah->src_path_bits & 0x7f;
626 path->rlid = cpu_to_be16(ah->dlid);
627 if (ah->static_rate) {
628 path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;
629 while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
630 !(1 << path->static_rate & dev->dev->caps.stat_rate_support))
631 --path->static_rate;
632 } else
633 path->static_rate = 0;
634 path->counter_index = 0xff;
635
636 if (ah->ah_flags & IB_AH_GRH) {
637 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len) {
638 printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
639 ah->grh.sgid_index, dev->dev->caps.gid_table_len - 1);
640 return -1;
641 }
642
643 path->grh_mylmc |= 1 << 7;
644 path->mgid_index = ah->grh.sgid_index;
645 path->hop_limit = ah->grh.hop_limit;
646 path->tclass_flowlabel =
647 cpu_to_be32((ah->grh.traffic_class << 20) |
648 (ah->grh.flow_label));
649 memcpy(path->rgid, ah->grh.dgid.raw, 16);
650 }
651
652 path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
653 ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
654
655 return 0;
656}
657
658int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
659 int attr_mask, struct ib_udata *udata)
660{
661 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
662 struct mlx4_ib_qp *qp = to_mqp(ibqp);
663 struct mlx4_qp_context *context;
664 enum mlx4_qp_optpar optpar = 0;
665 enum ib_qp_state cur_state, new_state;
666 int sqd_event;
667 int err = -EINVAL;
668
669 context = kzalloc(sizeof *context, GFP_KERNEL);
670 if (!context)
671 return -ENOMEM;
672
673 mutex_lock(&qp->mutex);
674
675 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
676 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
677
678 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
679 goto out;
680
681 if ((attr_mask & IB_QP_PKEY_INDEX) &&
682 attr->pkey_index >= dev->dev->caps.pkey_table_len) {
683 goto out;
684 }
685
686 if ((attr_mask & IB_QP_PORT) &&
687 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
688 goto out;
689 }
690
691 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
692 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
693 goto out;
694 }
695
696 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
697 attr->max_dest_rd_atomic > 1 << dev->dev->caps.max_qp_dest_rdma) {
698 goto out;
699 }
700
701 context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
702 (to_mlx4_st(ibqp->qp_type) << 16));
703 context->flags |= cpu_to_be32(1 << 8); /* DE? */
704
705 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
706 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
707 else {
708 optpar |= MLX4_QP_OPTPAR_PM_STATE;
709 switch (attr->path_mig_state) {
710 case IB_MIG_MIGRATED:
711 context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
712 break;
713 case IB_MIG_REARM:
714 context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);
715 break;
716 case IB_MIG_ARMED:
717 context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);
718 break;
719 }
720 }
721
722 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
723 ibqp->qp_type == IB_QPT_UD)
724 context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
725 else if (attr_mask & IB_QP_PATH_MTU) {
726 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
727 printk(KERN_ERR "path MTU (%u) is invalid\n",
728 attr->path_mtu);
729 return -EINVAL;
730 }
731 context->mtu_msgmax = (attr->path_mtu << 5) | 31;
732 }
733
734 if (qp->rq.max)
735 context->rq_size_stride = ilog2(qp->rq.max) << 3;
736 context->rq_size_stride |= qp->rq.wqe_shift - 4;
737
738 if (qp->sq.max)
739 context->sq_size_stride = ilog2(qp->sq.max) << 3;
740 context->sq_size_stride |= qp->sq.wqe_shift - 4;
741
742 if (qp->ibqp.uobject)
743 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
744 else
745 context->usr_page = cpu_to_be32(dev->priv_uar.index);
746
747 if (attr_mask & IB_QP_DEST_QPN)
748 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
749
750 if (attr_mask & IB_QP_PORT) {
751 if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&
752 !(attr_mask & IB_QP_AV)) {
753 mlx4_set_sched(&context->pri_path, attr->port_num);
754 optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;
755 }
756 }
757
758 if (attr_mask & IB_QP_PKEY_INDEX) {
759 context->pri_path.pkey_index = attr->pkey_index;
760 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
761 }
762
763 if (attr_mask & IB_QP_RNR_RETRY) {
764 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
765 optpar |= MLX4_QP_OPTPAR_RNR_RETRY;
766 }
767
768 if (attr_mask & IB_QP_AV) {
769 if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,
770 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) {
771 err = -EINVAL;
772 goto out;
773 }
774
775 optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |
776 MLX4_QP_OPTPAR_SCHED_QUEUE);
777 }
778
779 if (attr_mask & IB_QP_TIMEOUT) {
780 context->pri_path.ackto = attr->timeout << 3;
781 optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;
782 }
783
784 if (attr_mask & IB_QP_ALT_PATH) {
785 if (attr->alt_pkey_index >= dev->dev->caps.pkey_table_len)
786 return -EINVAL;
787
788 if (attr->alt_port_num == 0 ||
789 attr->alt_port_num > dev->dev->caps.num_ports)
790 return -EINVAL;
791
792 if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
793 attr->alt_port_num))
794 return -EINVAL;
795
796 context->alt_path.pkey_index = attr->alt_pkey_index;
797 context->alt_path.ackto = attr->alt_timeout << 3;
798 optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
799 }
800
801 context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pdn);
802 context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
803 if (attr_mask & IB_QP_RETRY_CNT) {
804 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
805 optpar |= MLX4_QP_OPTPAR_RETRY_COUNT;
806 }
807
808 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
809 if (attr->max_rd_atomic)
810 context->params1 |=
811 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
812 optpar |= MLX4_QP_OPTPAR_SRA_MAX;
813 }
814
815 if (attr_mask & IB_QP_SQ_PSN)
816 context->next_send_psn = cpu_to_be32(attr->sq_psn);
817
818 context->cqn_send = cpu_to_be32(to_mcq(ibqp->send_cq)->mcq.cqn);
819
820 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
821 if (attr->max_dest_rd_atomic)
822 context->params2 |=
823 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
824 optpar |= MLX4_QP_OPTPAR_RRA_MAX;
825 }
826
827 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
828 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
829 optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
830 }
831
832 if (ibqp->srq)
833 context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
834
835 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
836 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
837 optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT;
838 }
839 if (attr_mask & IB_QP_RQ_PSN)
840 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
841
842 context->cqn_recv = cpu_to_be32(to_mcq(ibqp->recv_cq)->mcq.cqn);
843
844 if (attr_mask & IB_QP_QKEY) {
845 context->qkey = cpu_to_be32(attr->qkey);
846 optpar |= MLX4_QP_OPTPAR_Q_KEY;
847 }
848
849 if (ibqp->srq)
850 context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
851
852 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
853 context->db_rec_addr = cpu_to_be64(qp->db.dma);
854
855 if (cur_state == IB_QPS_INIT &&
856 new_state == IB_QPS_RTR &&
857 (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
858 ibqp->qp_type == IB_QPT_UD)) {
859 context->pri_path.sched_queue = (qp->port - 1) << 6;
860 if (is_qp0(dev, qp))
861 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
862 else
863 context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
864 }
865
866 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
867 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
868 sqd_event = 1;
869 else
870 sqd_event = 0;
871
872 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
873 to_mlx4_state(new_state), context, optpar,
874 sqd_event, &qp->mqp);
875 if (err)
876 goto out;
877
878 qp->state = new_state;
879
880 if (attr_mask & IB_QP_ACCESS_FLAGS)
881 qp->atomic_rd_en = attr->qp_access_flags;
882 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
883 qp->resp_depth = attr->max_dest_rd_atomic;
884 if (attr_mask & IB_QP_PORT)
885 qp->port = attr->port_num;
886 if (attr_mask & IB_QP_ALT_PATH)
887 qp->alt_port = attr->alt_port_num;
888
889 if (is_sqp(dev, qp))
890 store_sqp_attrs(to_msqp(qp), attr, attr_mask);
891
892 /*
893 * If we moved QP0 to RTR, bring the IB link up; if we moved
894 * QP0 to RESET or ERROR, bring the link back down.
895 */
896 if (is_qp0(dev, qp)) {
897 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
898 init_port(dev, qp->port);
899
900 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
901 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR))
902 mlx4_CLOSE_PORT(dev->dev, qp->port);
903 }
904
905 /*
906 * If we moved a kernel QP to RESET, clean up all old CQ
907 * entries and reinitialize the QP.
908 */
909 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
910 mlx4_ib_cq_clean(to_mcq(ibqp->recv_cq), qp->mqp.qpn,
911 ibqp->srq ? to_msrq(ibqp->srq): NULL);
912 if (ibqp->send_cq != ibqp->recv_cq)
913 mlx4_ib_cq_clean(to_mcq(ibqp->send_cq), qp->mqp.qpn, NULL);
914
915 qp->rq.head = 0;
916 qp->rq.tail = 0;
917 qp->sq.head = 0;
918 qp->sq.tail = 0;
919 *qp->db.db = 0;
920 }
921
922out:
923 mutex_unlock(&qp->mutex);
924 kfree(context);
925 return err;
926}
927
928static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
929 void *wqe)
930{
931 struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
932 struct mlx4_wqe_mlx_seg *mlx = wqe;
933 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
934 struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
935 u16 pkey;
936 int send_size;
937 int header_size;
938 int i;
939
940 send_size = 0;
941 for (i = 0; i < wr->num_sge; ++i)
942 send_size += wr->sg_list[i].length;
943
944 ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header);
945
946 sqp->ud_header.lrh.service_level =
947 be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
948 sqp->ud_header.lrh.destination_lid = ah->av.dlid;
949 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.g_slid & 0x7f);
950 if (mlx4_ib_ah_grh_present(ah)) {
951 sqp->ud_header.grh.traffic_class =
952 (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff;
953 sqp->ud_header.grh.flow_label =
954 ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
955 ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24,
956 ah->av.gid_index, &sqp->ud_header.grh.source_gid);
957 memcpy(sqp->ud_header.grh.destination_gid.raw,
958 ah->av.dgid, 16);
959 }
960
961 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
962 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
963 (sqp->ud_header.lrh.destination_lid ==
964 IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
965 (sqp->ud_header.lrh.service_level << 8));
966 mlx->rlid = sqp->ud_header.lrh.destination_lid;
967
968 switch (wr->opcode) {
969 case IB_WR_SEND:
970 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
971 sqp->ud_header.immediate_present = 0;
972 break;
973 case IB_WR_SEND_WITH_IMM:
974 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
975 sqp->ud_header.immediate_present = 1;
976 sqp->ud_header.immediate_data = wr->imm_data;
977 break;
978 default:
979 return -EINVAL;
980 }
981
982 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
983 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
984 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
985 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
986 if (!sqp->qp.ibqp.qp_num)
987 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
988 else
989 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
990 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
991 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
992 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
993 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
994 sqp->qkey : wr->wr.ud.remote_qkey);
995 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
996
997 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
998
999 if (0) {
1000 printk(KERN_ERR "built UD header of size %d:\n", header_size);
1001 for (i = 0; i < header_size / 4; ++i) {
1002 if (i % 8 == 0)
1003 printk(" [%02x] ", i * 4);
1004 printk(" %08x",
1005 be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
1006 if ((i + 1) % 8 == 0)
1007 printk("\n");
1008 }
1009 printk("\n");
1010 }
1011
1012 inl->byte_count = cpu_to_be32(1 << 31 | header_size);
1013 memcpy(inl + 1, sqp->header_buf, header_size);
1014
1015 return ALIGN(sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
1016}
1017
1018static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1019{
1020 unsigned cur;
1021 struct mlx4_ib_cq *cq;
1022
1023 cur = wq->head - wq->tail;
1024 if (likely(cur + nreq < wq->max))
1025 return 0;
1026
1027 cq = to_mcq(ib_cq);
1028 spin_lock(&cq->lock);
1029 cur = wq->head - wq->tail;
1030 spin_unlock(&cq->lock);
1031
1032 return cur + nreq >= wq->max;
1033}
1034
1035int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1036 struct ib_send_wr **bad_wr)
1037{
1038 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1039 void *wqe;
1040 struct mlx4_wqe_ctrl_seg *ctrl;
1041 unsigned long flags;
1042 int nreq;
1043 int err = 0;
1044 int ind;
1045 int size;
1046 int i;
1047
1048 spin_lock_irqsave(&qp->rq.lock, flags);
1049
1050 ind = qp->sq.head;
1051
1052 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1053 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1054 err = -ENOMEM;
1055 *bad_wr = wr;
1056 goto out;
1057 }
1058
1059 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
1060 err = -EINVAL;
1061 *bad_wr = wr;
1062 goto out;
1063 }
1064
1065 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.max - 1));
1066 qp->sq.wrid[ind & (qp->sq.max - 1)] = wr->wr_id;
1067
1068 ctrl->srcrb_flags =
1069 (wr->send_flags & IB_SEND_SIGNALED ?
1070 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
1071 (wr->send_flags & IB_SEND_SOLICITED ?
1072 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
1073 qp->sq_signal_bits;
1074
1075 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1076 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1077 ctrl->imm = wr->imm_data;
1078 else
1079 ctrl->imm = 0;
1080
1081 wqe += sizeof *ctrl;
1082 size = sizeof *ctrl / 16;
1083
1084 switch (ibqp->qp_type) {
1085 case IB_QPT_RC:
1086 case IB_QPT_UC:
1087 switch (wr->opcode) {
1088 case IB_WR_ATOMIC_CMP_AND_SWP:
1089 case IB_WR_ATOMIC_FETCH_AND_ADD:
1090 ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
1091 cpu_to_be64(wr->wr.atomic.remote_addr);
1092 ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
1093 cpu_to_be32(wr->wr.atomic.rkey);
1094 ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
1095
1096 wqe += sizeof (struct mlx4_wqe_raddr_seg);
1097
1098 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1099 ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
1100 cpu_to_be64(wr->wr.atomic.swap);
1101 ((struct mlx4_wqe_atomic_seg *) wqe)->compare =
1102 cpu_to_be64(wr->wr.atomic.compare_add);
1103 } else {
1104 ((struct mlx4_wqe_atomic_seg *) wqe)->swap_add =
1105 cpu_to_be64(wr->wr.atomic.compare_add);
1106 ((struct mlx4_wqe_atomic_seg *) wqe)->compare = 0;
1107 }
1108
1109 wqe += sizeof (struct mlx4_wqe_atomic_seg);
1110 size += (sizeof (struct mlx4_wqe_raddr_seg) +
1111 sizeof (struct mlx4_wqe_atomic_seg)) / 16;
1112
1113 break;
1114
1115 case IB_WR_RDMA_READ:
1116 case IB_WR_RDMA_WRITE:
1117 case IB_WR_RDMA_WRITE_WITH_IMM:
1118 ((struct mlx4_wqe_raddr_seg *) wqe)->raddr =
1119 cpu_to_be64(wr->wr.rdma.remote_addr);
1120 ((struct mlx4_wqe_raddr_seg *) wqe)->rkey =
1121 cpu_to_be32(wr->wr.rdma.rkey);
1122 ((struct mlx4_wqe_raddr_seg *) wqe)->reserved = 0;
1123
1124 wqe += sizeof (struct mlx4_wqe_raddr_seg);
1125 size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
1126
1127 break;
1128
1129 default:
1130 /* No extra segments required for sends */
1131 break;
1132 }
1133 break;
1134
1135 case IB_QPT_UD:
1136 memcpy(((struct mlx4_wqe_datagram_seg *) wqe)->av,
1137 &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
1138 ((struct mlx4_wqe_datagram_seg *) wqe)->dqpn =
1139 cpu_to_be32(wr->wr.ud.remote_qpn);
1140 ((struct mlx4_wqe_datagram_seg *) wqe)->qkey =
1141 cpu_to_be32(wr->wr.ud.remote_qkey);
1142
1143 wqe += sizeof (struct mlx4_wqe_datagram_seg);
1144 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1145 break;
1146
1147 case IB_QPT_SMI:
1148 case IB_QPT_GSI:
1149 err = build_mlx_header(to_msqp(qp), wr, ctrl);
1150 if (err < 0) {
1151 *bad_wr = wr;
1152 goto out;
1153 }
1154 wqe += err;
1155 size += err / 16;
1156
1157 err = 0;
1158 break;
1159
1160 default:
1161 break;
1162 }
1163
1164 for (i = 0; i < wr->num_sge; ++i) {
1165 ((struct mlx4_wqe_data_seg *) wqe)->byte_count =
1166 cpu_to_be32(wr->sg_list[i].length);
1167 ((struct mlx4_wqe_data_seg *) wqe)->lkey =
1168 cpu_to_be32(wr->sg_list[i].lkey);
1169 ((struct mlx4_wqe_data_seg *) wqe)->addr =
1170 cpu_to_be64(wr->sg_list[i].addr);
1171
1172 wqe += sizeof (struct mlx4_wqe_data_seg);
1173 size += sizeof (struct mlx4_wqe_data_seg) / 16;
1174 }
1175
1176 /* Add one more inline data segment for ICRC for MLX sends */
1177 if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) {
1178 ((struct mlx4_wqe_inline_seg *) wqe)->byte_count =
1179 cpu_to_be32((1 << 31) | 4);
1180 ((u32 *) wqe)[1] = 0;
1181 wqe += sizeof (struct mlx4_wqe_data_seg);
1182 size += sizeof (struct mlx4_wqe_data_seg) / 16;
1183 }
1184
1185 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1186 MLX4_WQE_CTRL_FENCE : 0) | size;
1187
1188 /*
1189 * Make sure descriptor is fully written before
1190 * setting ownership bit (because HW can start
1191 * executing as soon as we do).
1192 */
1193 wmb();
1194
1195 if (wr->opcode < 0 || wr->opcode > ARRAY_SIZE(mlx4_ib_opcode)) {
1196 err = -EINVAL;
1197 goto out;
1198 }
1199
1200 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
1201 (ind & qp->sq.max ? cpu_to_be32(1 << 31) : 0);
1202
1203 ++ind;
1204 }
1205
1206out:
1207 if (likely(nreq)) {
1208 qp->sq.head += nreq;
1209
1210 /*
1211 * Make sure that descriptors are written before
1212 * doorbell record.
1213 */
1214 wmb();
1215
1216 writel(qp->doorbell_qpn,
1217 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
1218
1219 /*
1220 * Make sure doorbells don't leak out of SQ spinlock
1221 * and reach the HCA out of order.
1222 */
1223 mmiowb();
1224 }
1225
1226 spin_unlock_irqrestore(&qp->rq.lock, flags);
1227
1228 return err;
1229}
1230
1231int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1232 struct ib_recv_wr **bad_wr)
1233{
1234 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1235 struct mlx4_wqe_data_seg *scat;
1236 unsigned long flags;
1237 int err = 0;
1238 int nreq;
1239 int ind;
1240 int i;
1241
1242 spin_lock_irqsave(&qp->rq.lock, flags);
1243
1244 ind = qp->rq.head & (qp->rq.max - 1);
1245
1246 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1247 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) {
1248 err = -ENOMEM;
1249 *bad_wr = wr;
1250 goto out;
1251 }
1252
1253 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1254 err = -EINVAL;
1255 *bad_wr = wr;
1256 goto out;
1257 }
1258
1259 scat = get_recv_wqe(qp, ind);
1260
1261 for (i = 0; i < wr->num_sge; ++i) {
1262 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
1263 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
1264 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
1265 }
1266
1267 if (i < qp->rq.max_gs) {
1268 scat[i].byte_count = 0;
1269 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
1270 scat[i].addr = 0;
1271 }
1272
1273 qp->rq.wrid[ind] = wr->wr_id;
1274
1275 ind = (ind + 1) & (qp->rq.max - 1);
1276 }
1277
1278out:
1279 if (likely(nreq)) {
1280 qp->rq.head += nreq;
1281
1282 /*
1283 * Make sure that descriptors are written before
1284 * doorbell record.
1285 */
1286 wmb();
1287
1288 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
1289 }
1290
1291 spin_unlock_irqrestore(&qp->rq.lock, flags);
1292
1293 return err;
1294}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
new file mode 100644
index 000000000000..42ab4a801d6a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx4/qp.h>
34#include <linux/mlx4/srq.h>
35
36#include "mlx4_ib.h"
37#include "user.h"
38
39static void *get_wqe(struct mlx4_ib_srq *srq, int n)
40{
41 int offset = n << srq->msrq.wqe_shift;
42
43 if (srq->buf.nbufs == 1)
44 return srq->buf.u.direct.buf + offset;
45 else
46 return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf +
47 (offset & (PAGE_SIZE - 1));
48}
49
50static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
51{
52 struct ib_event event;
53 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
54
55 if (ibsrq->event_handler) {
56 event.device = ibsrq->device;
57 event.element.srq = ibsrq;
58 switch (type) {
59 case MLX4_EVENT_TYPE_SRQ_LIMIT:
60 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
61 break;
62 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
63 event.event = IB_EVENT_SRQ_ERR;
64 break;
65 default:
66 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
67 "on SRQ %06x\n", type, srq->srqn);
68 return;
69 }
70
71 ibsrq->event_handler(&event, ibsrq->srq_context);
72 }
73}
74
75struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
76 struct ib_srq_init_attr *init_attr,
77 struct ib_udata *udata)
78{
79 struct mlx4_ib_dev *dev = to_mdev(pd->device);
80 struct mlx4_ib_srq *srq;
81 struct mlx4_wqe_srq_next_seg *next;
82 int desc_size;
83 int buf_size;
84 int err;
85 int i;
86
87 /* Sanity check SRQ size before proceeding */
88 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
89 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
90 return ERR_PTR(-EINVAL);
91
92 srq = kmalloc(sizeof *srq, GFP_KERNEL);
93 if (!srq)
94 return ERR_PTR(-ENOMEM);
95
96 mutex_init(&srq->mutex);
97 spin_lock_init(&srq->lock);
98 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
99 srq->msrq.max_gs = init_attr->attr.max_sge;
100
101 desc_size = max(32UL,
102 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
103 srq->msrq.max_gs *
104 sizeof (struct mlx4_wqe_data_seg)));
105 srq->msrq.wqe_shift = ilog2(desc_size);
106
107 buf_size = srq->msrq.max * desc_size;
108
109 if (pd->uobject) {
110 struct mlx4_ib_create_srq ucmd;
111
112 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
113 err = -EFAULT;
114 goto err_srq;
115 }
116
117 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
118 buf_size, 0);
119 if (IS_ERR(srq->umem)) {
120 err = PTR_ERR(srq->umem);
121 goto err_srq;
122 }
123
124 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
125 ilog2(srq->umem->page_size), &srq->mtt);
126 if (err)
127 goto err_buf;
128
129 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
130 if (err)
131 goto err_mtt;
132
133 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
134 ucmd.db_addr, &srq->db);
135 if (err)
136 goto err_mtt;
137 } else {
138 err = mlx4_ib_db_alloc(dev, &srq->db, 0);
139 if (err)
140 goto err_srq;
141
142 *srq->db.db = 0;
143
144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
145 err = -ENOMEM;
146 goto err_db;
147 }
148
149 srq->head = 0;
150 srq->tail = srq->msrq.max - 1;
151 srq->wqe_ctr = 0;
152
153 for (i = 0; i < srq->msrq.max; ++i) {
154 next = get_wqe(srq, i);
155 next->next_wqe_index =
156 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
157 }
158
159 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
160 &srq->mtt);
161 if (err)
162 goto err_buf;
163
164 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
165 if (err)
166 goto err_mtt;
167
168 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
169 if (!srq->wrid) {
170 err = -ENOMEM;
171 goto err_mtt;
172 }
173 }
174
175 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
176 srq->db.dma, &srq->msrq);
177 if (err)
178 goto err_wrid;
179
180 srq->msrq.event = mlx4_ib_srq_event;
181
182 if (pd->uobject)
183 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
184 err = -EFAULT;
185 goto err_wrid;
186 }
187
188 init_attr->attr.max_wr = srq->msrq.max - 1;
189
190 return &srq->ibsrq;
191
192err_wrid:
193 if (pd->uobject)
194 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
195 else
196 kfree(srq->wrid);
197
198err_mtt:
199 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
200
201err_buf:
202 if (pd->uobject)
203 ib_umem_release(srq->umem);
204 else
205 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
206
207err_db:
208 if (!pd->uobject)
209 mlx4_ib_db_free(dev, &srq->db);
210
211err_srq:
212 kfree(srq);
213
214 return ERR_PTR(err);
215}
216
217int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
218 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
219{
220 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
221 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
222 int ret;
223
224 /* We don't support resizing SRQs (yet?) */
225 if (attr_mask & IB_SRQ_MAX_WR)
226 return -EINVAL;
227
228 if (attr_mask & IB_SRQ_LIMIT) {
229 if (attr->srq_limit >= srq->msrq.max)
230 return -EINVAL;
231
232 mutex_lock(&srq->mutex);
233 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
234 mutex_unlock(&srq->mutex);
235
236 if (ret)
237 return ret;
238 }
239
240 return 0;
241}
242
243int mlx4_ib_destroy_srq(struct ib_srq *srq)
244{
245 struct mlx4_ib_dev *dev = to_mdev(srq->device);
246 struct mlx4_ib_srq *msrq = to_msrq(srq);
247
248 mlx4_srq_free(dev->dev, &msrq->msrq);
249 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
250
251 if (srq->uobject) {
252 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
253 ib_umem_release(msrq->umem);
254 } else {
255 kfree(msrq->wrid);
256 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
257 &msrq->buf);
258 mlx4_ib_db_free(dev, &msrq->db);
259 }
260
261 kfree(msrq);
262
263 return 0;
264}
265
266void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
267{
268 struct mlx4_wqe_srq_next_seg *next;
269
270 /* always called with interrupts disabled. */
271 spin_lock(&srq->lock);
272
273 next = get_wqe(srq, srq->tail);
274 next->next_wqe_index = cpu_to_be16(wqe_index);
275 srq->tail = wqe_index;
276
277 spin_unlock(&srq->lock);
278}
279
280int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
281 struct ib_recv_wr **bad_wr)
282{
283 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
284 struct mlx4_wqe_srq_next_seg *next;
285 struct mlx4_wqe_data_seg *scat;
286 unsigned long flags;
287 int err = 0;
288 int nreq;
289 int i;
290
291 spin_lock_irqsave(&srq->lock, flags);
292
293 for (nreq = 0; wr; ++nreq, wr = wr->next) {
294 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
295 err = -EINVAL;
296 *bad_wr = wr;
297 break;
298 }
299
300 srq->wrid[srq->head] = wr->wr_id;
301
302 next = get_wqe(srq, srq->head);
303 srq->head = be16_to_cpu(next->next_wqe_index);
304 scat = (struct mlx4_wqe_data_seg *) (next + 1);
305
306 for (i = 0; i < wr->num_sge; ++i) {
307 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
308 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
309 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
310 }
311
312 if (i < srq->msrq.max_gs) {
313 scat[i].byte_count = 0;
314 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
315 scat[i].addr = 0;
316 }
317 }
318
319 if (likely(nreq)) {
320 srq->wqe_ctr += nreq;
321
322 /*
323 * Make sure that descriptors are written before
324 * doorbell record.
325 */
326 wmb();
327
328 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
329 }
330
331 spin_unlock_irqrestore(&srq->lock, flags);
332
333 return err;
334}
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
new file mode 100644
index 000000000000..5b8eddc9fa83
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX4_IB_USER_H
34#define MLX4_IB_USER_H
35
36#include <linux/types.h>
37
38/*
39 * Increment this value if any changes that break userspace ABI
40 * compatibility are made.
41 */
42#define MLX4_IB_UVERBS_ABI_VERSION 1
43
44/*
45 * Make sure that all structs defined in this file remain laid out so
46 * that they pack the same way on 32-bit and 64-bit architectures (to
47 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
48 * In particular do not use pointer types -- pass pointers in __u64
49 * instead.
50 */
51
52struct mlx4_ib_alloc_ucontext_resp {
53 __u32 qp_tab_size;
54 __u16 bf_reg_size;
55 __u16 bf_regs_per_page;
56};
57
58struct mlx4_ib_alloc_pd_resp {
59 __u32 pdn;
60 __u32 reserved;
61};
62
63struct mlx4_ib_create_cq {
64 __u64 buf_addr;
65 __u64 db_addr;
66};
67
68struct mlx4_ib_create_cq_resp {
69 __u32 cqn;
70 __u32 reserved;
71};
72
73struct mlx4_ib_resize_cq {
74 __u64 buf_addr;
75};
76
77struct mlx4_ib_create_srq {
78 __u64 buf_addr;
79 __u64 db_addr;
80};
81
82struct mlx4_ib_create_srq_resp {
83 __u32 srqn;
84 __u32 reserved;
85};
86
87struct mlx4_ib_create_qp {
88 __u64 buf_addr;
89 __u64 db_addr;
90};
91
92#endif /* MLX4_IB_USER_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1c05486c3c68..6bcde1cb9688 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -37,6 +37,7 @@
37 */ 37 */
38 38
39#include <rdma/ib_smi.h> 39#include <rdma/ib_smi.h>
40#include <rdma/ib_umem.h>
40#include <rdma/ib_user_verbs.h> 41#include <rdma/ib_user_verbs.h>
41#include <linux/mm.h> 42#include <linux/mm.h>
42 43
@@ -908,6 +909,8 @@ static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
908 return ERR_PTR(err); 909 return ERR_PTR(err);
909 } 910 }
910 911
912 mr->umem = NULL;
913
911 return &mr->ibmr; 914 return &mr->ibmr;
912} 915}
913 916
@@ -1003,11 +1006,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
1003 } 1006 }
1004 1007
1005 kfree(page_list); 1008 kfree(page_list);
1009 mr->umem = NULL;
1010
1006 return &mr->ibmr; 1011 return &mr->ibmr;
1007} 1012}
1008 1013
1009static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, 1014static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1010 int acc, struct ib_udata *udata) 1015 u64 virt, int acc, struct ib_udata *udata)
1011{ 1016{
1012 struct mthca_dev *dev = to_mdev(pd->device); 1017 struct mthca_dev *dev = to_mdev(pd->device);
1013 struct ib_umem_chunk *chunk; 1018 struct ib_umem_chunk *chunk;
@@ -1018,20 +1023,26 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
1018 int err = 0; 1023 int err = 0;
1019 int write_mtt_size; 1024 int write_mtt_size;
1020 1025
1021 shift = ffs(region->page_size) - 1;
1022
1023 mr = kmalloc(sizeof *mr, GFP_KERNEL); 1026 mr = kmalloc(sizeof *mr, GFP_KERNEL);
1024 if (!mr) 1027 if (!mr)
1025 return ERR_PTR(-ENOMEM); 1028 return ERR_PTR(-ENOMEM);
1026 1029
1030 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
1031 if (IS_ERR(mr->umem)) {
1032 err = PTR_ERR(mr->umem);
1033 goto err;
1034 }
1035
1036 shift = ffs(mr->umem->page_size) - 1;
1037
1027 n = 0; 1038 n = 0;
1028 list_for_each_entry(chunk, &region->chunk_list, list) 1039 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1029 n += chunk->nents; 1040 n += chunk->nents;
1030 1041
1031 mr->mtt = mthca_alloc_mtt(dev, n); 1042 mr->mtt = mthca_alloc_mtt(dev, n);
1032 if (IS_ERR(mr->mtt)) { 1043 if (IS_ERR(mr->mtt)) {
1033 err = PTR_ERR(mr->mtt); 1044 err = PTR_ERR(mr->mtt);
1034 goto err; 1045 goto err_umem;
1035 } 1046 }
1036 1047
1037 pages = (u64 *) __get_free_page(GFP_KERNEL); 1048 pages = (u64 *) __get_free_page(GFP_KERNEL);
@@ -1044,12 +1055,12 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
1044 1055
1045 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); 1056 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
1046 1057
1047 list_for_each_entry(chunk, &region->chunk_list, list) 1058 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1048 for (j = 0; j < chunk->nmap; ++j) { 1059 for (j = 0; j < chunk->nmap; ++j) {
1049 len = sg_dma_len(&chunk->page_list[j]) >> shift; 1060 len = sg_dma_len(&chunk->page_list[j]) >> shift;
1050 for (k = 0; k < len; ++k) { 1061 for (k = 0; k < len; ++k) {
1051 pages[i++] = sg_dma_address(&chunk->page_list[j]) + 1062 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
1052 region->page_size * k; 1063 mr->umem->page_size * k;
1053 /* 1064 /*
1054 * Be friendly to write_mtt and pass it chunks 1065 * Be friendly to write_mtt and pass it chunks
1055 * of appropriate size. 1066 * of appropriate size.
@@ -1071,8 +1082,8 @@ mtt_done:
1071 if (err) 1082 if (err)
1072 goto err_mtt; 1083 goto err_mtt;
1073 1084
1074 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base, 1085 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
1075 region->length, convert_access(acc), mr); 1086 convert_access(acc), mr);
1076 1087
1077 if (err) 1088 if (err)
1078 goto err_mtt; 1089 goto err_mtt;
@@ -1082,6 +1093,9 @@ mtt_done:
1082err_mtt: 1093err_mtt:
1083 mthca_free_mtt(dev, mr->mtt); 1094 mthca_free_mtt(dev, mr->mtt);
1084 1095
1096err_umem:
1097 ib_umem_release(mr->umem);
1098
1085err: 1099err:
1086 kfree(mr); 1100 kfree(mr);
1087 return ERR_PTR(err); 1101 return ERR_PTR(err);
@@ -1090,8 +1104,12 @@ err:
1090static int mthca_dereg_mr(struct ib_mr *mr) 1104static int mthca_dereg_mr(struct ib_mr *mr)
1091{ 1105{
1092 struct mthca_mr *mmr = to_mmr(mr); 1106 struct mthca_mr *mmr = to_mmr(mr);
1107
1093 mthca_free_mr(to_mdev(mr->device), mmr); 1108 mthca_free_mr(to_mdev(mr->device), mmr);
1109 if (mmr->umem)
1110 ib_umem_release(mmr->umem);
1094 kfree(mmr); 1111 kfree(mmr);
1112
1095 return 0; 1113 return 0;
1096} 1114}
1097 1115
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 1d266ac2e094..262616c8ebb6 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -73,6 +73,7 @@ struct mthca_mtt;
73 73
74struct mthca_mr { 74struct mthca_mr {
75 struct ib_mr ibmr; 75 struct ib_mr ibmr;
76 struct ib_umem *umem;
76 struct mthca_mtt *mtt; 77 struct mthca_mtt *mtt;
77}; 78};
78 79
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 0e9b69535ad6..f814fb3a469d 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Input device support" 5menu "Input device support"
6 depends on !S390
6 7
7config INPUT 8config INPUT
8 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED 9 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index c90afeea54aa..d42fe89cddf6 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "ISDN subsystem" 5menu "ISDN subsystem"
6 depends on !S390
6 7
7config ISDN 8config ISDN
8 tristate "ISDN support" 9 tristate "ISDN support"
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c921d6c522f5..c92f9d764fce 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -17,7 +17,7 @@ config CAPI_TRACE
17 help 17 help
18 If you say Y here, the kernelcapi driver can make verbose traces 18 If you say Y here, the kernelcapi driver can make verbose traces
19 of CAPI messages. This feature can be enabled/disabled via IOCTL for 19 of CAPI messages. This feature can be enabled/disabled via IOCTL for
20 every controler (default disabled). 20 every controller (default disabled).
21 This will increase the size of the kernelcapi module by 20 KB. 21 This will increase the size of the kernelcapi module by 20 KB.
22 If unsure, say Y. 22 If unsure, say Y.
23 23
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
index af3eb9e795b5..85784a7ffb25 100644
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ b/drivers/isdn/hardware/eicon/divasync.h
@@ -216,7 +216,7 @@ typedef struct
216#define SERIAL_HOOK_RING 0x85 216#define SERIAL_HOOK_RING 0x85
217#define SERIAL_HOOK_DETACH 0x8f 217#define SERIAL_HOOK_DETACH 0x8f
218 unsigned char Flags; /* function refinements */ 218 unsigned char Flags; /* function refinements */
219 /* parameters passed by the the ATTACH request */ 219 /* parameters passed by the ATTACH request */
220 SERIAL_INT_CB InterruptHandler; /* called on each interrupt */ 220 SERIAL_INT_CB InterruptHandler; /* called on each interrupt */
221 SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */ 221 SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */
222 void *HandlerContext; /* context for both handlers */ 222 void *HandlerContext; /* context for both handlers */
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 99e70d4103b6..1f18f1993387 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1217,11 +1217,11 @@ usb_init(hfcusb_data * hfc)
1217 /* aux = output, reset off */ 1217 /* aux = output, reset off */
1218 write_usb(hfc, HFCUSB_CIRM, 0x10); 1218 write_usb(hfc, HFCUSB_CIRM, 0x10);
1219 1219
1220 /* set USB_SIZE to match the the wMaxPacketSize for INT or BULK transfers */ 1220 /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */
1221 write_usb(hfc, HFCUSB_USB_SIZE, 1221 write_usb(hfc, HFCUSB_USB_SIZE,
1222 (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4)); 1222 (hfc->packet_size / 8) | ((hfc->packet_size / 8) << 4));
1223 1223
1224 /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */ 1224 /* set USB_SIZE_I to match the wMaxPacketSize for ISO transfers */
1225 write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size); 1225 write_usb(hfc, HFCUSB_USB_SIZE_I, hfc->iso_packet_size);
1226 1226
1227 /* enable PCM/GCI master mode */ 1227 /* enable PCM/GCI master mode */
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 703cc88d1ef9..e8e37d826478 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -2,6 +2,7 @@
2# KVM configuration 2# KVM configuration
3# 3#
4menu "Virtualization" 4menu "Virtualization"
5 depends on X86
5 6
6config KVM 7config KVM
7 tristate "Kernel-based Virtual Machine (KVM) support" 8 tristate "Kernel-based Virtual Machine (KVM) support"
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index c8b8cfa332bb..0d892600ff00 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -2889,7 +2889,9 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2889 2889
2890 switch (val) { 2890 switch (val) {
2891 case CPU_DOWN_PREPARE: 2891 case CPU_DOWN_PREPARE:
2892 case CPU_DOWN_PREPARE_FROZEN:
2892 case CPU_UP_CANCELED: 2893 case CPU_UP_CANCELED:
2894 case CPU_UP_CANCELED_FROZEN:
2893 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 2895 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2894 cpu); 2896 cpu);
2895 decache_vcpus_on_cpu(cpu); 2897 decache_vcpus_on_cpu(cpu);
@@ -2897,6 +2899,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2897 NULL, 0, 1); 2899 NULL, 0, 1);
2898 break; 2900 break;
2899 case CPU_ONLINE: 2901 case CPU_ONLINE:
2902 case CPU_ONLINE_FROZEN:
2900 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 2903 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2901 cpu); 2904 cpu);
2902 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 2905 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 80acd08f0e97..87d2046f866c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,5 +1,6 @@
1 1
2menu "LED devices" 2menu "LED devices"
3 depends on HAS_IOMEM
3 4
4config NEW_LEDS 5config NEW_LEDS
5 bool "LED Support" 6 bool "LED Support"
diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c
index 1d49d2ade557..677c99325be5 100644
--- a/drivers/leds/leds-h1940.c
+++ b/drivers/leds/leds-h1940.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/leds/h1940-leds.c 2 * drivers/leds/leds-h1940.c
3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> 3 * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org>
4 * 4 *
5 * This file is subject to the terms and conditions of the GNU General Public 5 * This file is subject to the terms and conditions of the GNU General Public
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a32c91e27b3c..58926da0ae18 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -237,7 +237,7 @@ config PMAC_RACKMETER
237 tristate "Support for Apple XServe front panel LEDs" 237 tristate "Support for Apple XServe front panel LEDs"
238 depends on PPC_PMAC 238 depends on PPC_PMAC
239 help 239 help
240 This driver procides some support to control the front panel 240 This driver provides some support to control the front panel
241 blue LEDs "vu-meter" of the XServer macs. 241 blue LEDs "vu-meter" of the XServer macs.
242 242
243endif # MACINTOSH_DRIVERS 243endif # MACINTOSH_DRIVERS
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 0acf2f7fd9d7..c803d2bba65d 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -563,7 +563,7 @@ static void media_bay_step(int i)
563 ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL); 563 ide_init_hwif_ports(&hw, (unsigned long) bay->cd_base, (unsigned long) 0, NULL);
564 hw.irq = bay->cd_irq; 564 hw.irq = bay->cd_irq;
565 hw.chipset = ide_pmac; 565 hw.chipset = ide_pmac;
566 bay->cd_index = ide_register_hw(&hw, NULL); 566 bay->cd_index = ide_register_hw(&hw, 0, NULL);
567 pmu_resume(); 567 pmu_resume();
568 } 568 }
569 if (bay->cd_index == -1) { 569 if (bay->cd_index == -1) {
diff --git a/drivers/mca/mca-bus.c b/drivers/mca/mca-bus.c
index da862e4632dd..67b8e9453b19 100644
--- a/drivers/mca/mca-bus.c
+++ b/drivers/mca/mca-bus.c
@@ -47,19 +47,25 @@ static int mca_bus_match (struct device *dev, struct device_driver *drv)
47{ 47{
48 struct mca_device *mca_dev = to_mca_device (dev); 48 struct mca_device *mca_dev = to_mca_device (dev);
49 struct mca_driver *mca_drv = to_mca_driver (drv); 49 struct mca_driver *mca_drv = to_mca_driver (drv);
50 const short *mca_ids = mca_drv->id_table; 50 const unsigned short *mca_ids = mca_drv->id_table;
51 int i; 51 int i = 0;
52 52
53 if (!mca_ids) 53 if (mca_ids) {
54 return 0; 54 for(i = 0; mca_ids[i]; i++) {
55 55 if (mca_ids[i] == mca_dev->pos_id) {
56 for(i = 0; mca_ids[i]; i++) { 56 mca_dev->index = i;
57 if (mca_ids[i] == mca_dev->pos_id) { 57 return 1;
58 mca_dev->index = i; 58 }
59 return 1;
60 } 59 }
61 } 60 }
62 61 /* If the integrated id is present, treat it as though it were an
62 * additional id in the id_table (it can't be because by definition,
63 * integrated id's overflow a short */
64 if (mca_drv->integrated_id && mca_dev->pos_id ==
65 mca_drv->integrated_id) {
66 mca_dev->index = i;
67 return 1;
68 }
63 return 0; 69 return 0;
64} 70}
65 71
diff --git a/drivers/mca/mca-driver.c b/drivers/mca/mca-driver.c
index 2223466b3d8a..32cd39bcc715 100644
--- a/drivers/mca/mca-driver.c
+++ b/drivers/mca/mca-driver.c
@@ -36,12 +36,25 @@ int mca_register_driver(struct mca_driver *mca_drv)
36 mca_drv->driver.bus = &mca_bus_type; 36 mca_drv->driver.bus = &mca_bus_type;
37 if ((r = driver_register(&mca_drv->driver)) < 0) 37 if ((r = driver_register(&mca_drv->driver)) < 0)
38 return r; 38 return r;
39 mca_drv->integrated_id = 0;
39 } 40 }
40 41
41 return 0; 42 return 0;
42} 43}
43EXPORT_SYMBOL(mca_register_driver); 44EXPORT_SYMBOL(mca_register_driver);
44 45
46int mca_register_driver_integrated(struct mca_driver *mca_driver,
47 int integrated_id)
48{
49 int r = mca_register_driver(mca_driver);
50
51 if (!r)
52 mca_driver->integrated_id = integrated_id;
53
54 return r;
55}
56EXPORT_SYMBOL(mca_register_driver_integrated);
57
45void mca_unregister_driver(struct mca_driver *mca_drv) 58void mca_unregister_driver(struct mca_driver *mca_drv)
46{ 59{
47 if (MCA_bus) 60 if (MCA_bus)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4540ade6b6b5..7df934d69134 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -262,6 +262,15 @@ config DM_MULTIPATH_EMC
262 ---help--- 262 ---help---
263 Multipath support for EMC CX/AX series hardware. 263 Multipath support for EMC CX/AX series hardware.
264 264
265config DM_DELAY
266 tristate "I/O delaying target (EXPERIMENTAL)"
267 depends on BLK_DEV_DM && EXPERIMENTAL
268 ---help---
269 A target that delays reads and/or writes and can send
270 them to different devices. Useful for testing.
271
272 If unsure, say N.
273
265endmenu 274endmenu
266 275
267endif 276endif
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 34957a68d921..38754084eac7 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MD_FAULTY) += faulty.o
31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o 31obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o 32obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o 33obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
34obj-$(CONFIG_DM_DELAY) += dm-delay.o
34obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
35obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o 36obj-$(CONFIG_DM_MULTIPATH_EMC) += dm-emc.o
36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 37obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h
index da4349649f7f..c6be88826fae 100644
--- a/drivers/md/dm-bio-list.h
+++ b/drivers/md/dm-bio-list.h
@@ -8,17 +8,43 @@
8#define DM_BIO_LIST_H 8#define DM_BIO_LIST_H
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/prefetch.h>
11 12
12struct bio_list { 13struct bio_list {
13 struct bio *head; 14 struct bio *head;
14 struct bio *tail; 15 struct bio *tail;
15}; 16};
16 17
18static inline int bio_list_empty(const struct bio_list *bl)
19{
20 return bl->head == NULL;
21}
22
23#define BIO_LIST_INIT { .head = NULL, .tail = NULL }
24
25#define BIO_LIST(bl) \
26 struct bio_list bl = BIO_LIST_INIT
27
17static inline void bio_list_init(struct bio_list *bl) 28static inline void bio_list_init(struct bio_list *bl)
18{ 29{
19 bl->head = bl->tail = NULL; 30 bl->head = bl->tail = NULL;
20} 31}
21 32
33#define bio_list_for_each(bio, bl) \
34 for (bio = (bl)->head; bio && ({ prefetch(bio->bi_next); 1; }); \
35 bio = bio->bi_next)
36
37static inline unsigned bio_list_size(const struct bio_list *bl)
38{
39 unsigned sz = 0;
40 struct bio *bio;
41
42 bio_list_for_each(bio, bl)
43 sz++;
44
45 return sz;
46}
47
22static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 48static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
23{ 49{
24 bio->bi_next = NULL; 50 bio->bi_next = NULL;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d8121234c347..7b0fcfc9eaa5 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -33,7 +33,6 @@
33struct crypt_io { 33struct crypt_io {
34 struct dm_target *target; 34 struct dm_target *target;
35 struct bio *base_bio; 35 struct bio *base_bio;
36 struct bio *first_clone;
37 struct work_struct work; 36 struct work_struct work;
38 atomic_t pending; 37 atomic_t pending;
39 int error; 38 int error;
@@ -107,6 +106,8 @@ struct crypt_config {
107 106
108static struct kmem_cache *_crypt_io_pool; 107static struct kmem_cache *_crypt_io_pool;
109 108
109static void clone_init(struct crypt_io *, struct bio *);
110
110/* 111/*
111 * Different IV generation algorithms: 112 * Different IV generation algorithms:
112 * 113 *
@@ -120,6 +121,9 @@ static struct kmem_cache *_crypt_io_pool;
120 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 121 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
121 * (needed for LRW-32-AES and possible other narrow block modes) 122 * (needed for LRW-32-AES and possible other narrow block modes)
122 * 123 *
124 * null: the initial vector is always zero. Provides compatibility with
125 * obsolete loop_fish2 devices. Do not use for new devices.
126 *
123 * plumb: unimplemented, see: 127 * plumb: unimplemented, see:
124 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 128 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
125 */ 129 */
@@ -256,6 +260,13 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
256 return 0; 260 return 0;
257} 261}
258 262
263static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
264{
265 memset(iv, 0, cc->iv_size);
266
267 return 0;
268}
269
259static struct crypt_iv_operations crypt_iv_plain_ops = { 270static struct crypt_iv_operations crypt_iv_plain_ops = {
260 .generator = crypt_iv_plain_gen 271 .generator = crypt_iv_plain_gen
261}; 272};
@@ -272,6 +283,10 @@ static struct crypt_iv_operations crypt_iv_benbi_ops = {
272 .generator = crypt_iv_benbi_gen 283 .generator = crypt_iv_benbi_gen
273}; 284};
274 285
286static struct crypt_iv_operations crypt_iv_null_ops = {
287 .generator = crypt_iv_null_gen
288};
289
275static int 290static int
276crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, 291crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
277 struct scatterlist *in, unsigned int length, 292 struct scatterlist *in, unsigned int length,
@@ -378,36 +393,21 @@ static int crypt_convert(struct crypt_config *cc,
378 * This should never violate the device limitations 393 * This should never violate the device limitations
379 * May return a smaller bio when running out of pages 394 * May return a smaller bio when running out of pages
380 */ 395 */
381static struct bio * 396static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
382crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
383 struct bio *base_bio, unsigned int *bio_vec_idx)
384{ 397{
398 struct crypt_config *cc = io->target->private;
385 struct bio *clone; 399 struct bio *clone;
386 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 400 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
387 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; 401 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
388 unsigned int i; 402 unsigned int i;
389 403
390 if (base_bio) { 404 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
391 clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
392 __bio_clone(clone, base_bio);
393 } else
394 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
395
396 if (!clone) 405 if (!clone)
397 return NULL; 406 return NULL;
398 407
399 clone->bi_destructor = dm_crypt_bio_destructor; 408 clone_init(io, clone);
400
401 /* if the last bio was not complete, continue where that one ended */
402 clone->bi_idx = *bio_vec_idx;
403 clone->bi_vcnt = *bio_vec_idx;
404 clone->bi_size = 0;
405 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
406
407 /* clone->bi_idx pages have already been allocated */
408 size -= clone->bi_idx * PAGE_SIZE;
409 409
410 for (i = clone->bi_idx; i < nr_iovecs; i++) { 410 for (i = 0; i < nr_iovecs; i++) {
411 struct bio_vec *bv = bio_iovec_idx(clone, i); 411 struct bio_vec *bv = bio_iovec_idx(clone, i);
412 412
413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); 413 bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
@@ -419,7 +419,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
419 * return a partially allocated bio, the caller will then try 419 * return a partially allocated bio, the caller will then try
420 * to allocate additional bios while submitting this partial bio 420 * to allocate additional bios while submitting this partial bio
421 */ 421 */
422 if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1)) 422 if (i == (MIN_BIO_PAGES - 1))
423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; 423 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
424 424
425 bv->bv_offset = 0; 425 bv->bv_offset = 0;
@@ -438,12 +438,6 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
438 return NULL; 438 return NULL;
439 } 439 }
440 440
441 /*
442 * Remember the last bio_vec allocated to be able
443 * to correctly continue after the splitting.
444 */
445 *bio_vec_idx = clone->bi_vcnt;
446
447 return clone; 441 return clone;
448} 442}
449 443
@@ -495,9 +489,6 @@ static void dec_pending(struct crypt_io *io, int error)
495 if (!atomic_dec_and_test(&io->pending)) 489 if (!atomic_dec_and_test(&io->pending))
496 return; 490 return;
497 491
498 if (io->first_clone)
499 bio_put(io->first_clone);
500
501 bio_endio(io->base_bio, io->base_bio->bi_size, io->error); 492 bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
502 493
503 mempool_free(io, cc->io_pool); 494 mempool_free(io, cc->io_pool);
@@ -562,6 +553,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
562 clone->bi_end_io = crypt_endio; 553 clone->bi_end_io = crypt_endio;
563 clone->bi_bdev = cc->dev->bdev; 554 clone->bi_bdev = cc->dev->bdev;
564 clone->bi_rw = io->base_bio->bi_rw; 555 clone->bi_rw = io->base_bio->bi_rw;
556 clone->bi_destructor = dm_crypt_bio_destructor;
565} 557}
566 558
567static void process_read(struct crypt_io *io) 559static void process_read(struct crypt_io *io)
@@ -585,7 +577,6 @@ static void process_read(struct crypt_io *io)
585 } 577 }
586 578
587 clone_init(io, clone); 579 clone_init(io, clone);
588 clone->bi_destructor = dm_crypt_bio_destructor;
589 clone->bi_idx = 0; 580 clone->bi_idx = 0;
590 clone->bi_vcnt = bio_segments(base_bio); 581 clone->bi_vcnt = bio_segments(base_bio);
591 clone->bi_size = base_bio->bi_size; 582 clone->bi_size = base_bio->bi_size;
@@ -604,7 +595,6 @@ static void process_write(struct crypt_io *io)
604 struct convert_context ctx; 595 struct convert_context ctx;
605 unsigned remaining = base_bio->bi_size; 596 unsigned remaining = base_bio->bi_size;
606 sector_t sector = base_bio->bi_sector - io->target->begin; 597 sector_t sector = base_bio->bi_sector - io->target->begin;
607 unsigned bvec_idx = 0;
608 598
609 atomic_inc(&io->pending); 599 atomic_inc(&io->pending);
610 600
@@ -615,14 +605,14 @@ static void process_write(struct crypt_io *io)
615 * so repeat the whole process until all the data can be handled. 605 * so repeat the whole process until all the data can be handled.
616 */ 606 */
617 while (remaining) { 607 while (remaining) {
618 clone = crypt_alloc_buffer(cc, base_bio->bi_size, 608 clone = crypt_alloc_buffer(io, remaining);
619 io->first_clone, &bvec_idx);
620 if (unlikely(!clone)) { 609 if (unlikely(!clone)) {
621 dec_pending(io, -ENOMEM); 610 dec_pending(io, -ENOMEM);
622 return; 611 return;
623 } 612 }
624 613
625 ctx.bio_out = clone; 614 ctx.bio_out = clone;
615 ctx.idx_out = 0;
626 616
627 if (unlikely(crypt_convert(cc, &ctx) < 0)) { 617 if (unlikely(crypt_convert(cc, &ctx) < 0)) {
628 crypt_free_buffer_pages(cc, clone, clone->bi_size); 618 crypt_free_buffer_pages(cc, clone, clone->bi_size);
@@ -631,31 +621,26 @@ static void process_write(struct crypt_io *io)
631 return; 621 return;
632 } 622 }
633 623
634 clone_init(io, clone); 624 /* crypt_convert should have filled the clone bio */
635 clone->bi_sector = cc->start + sector; 625 BUG_ON(ctx.idx_out < clone->bi_vcnt);
636
637 if (!io->first_clone) {
638 /*
639 * hold a reference to the first clone, because it
640 * holds the bio_vec array and that can't be freed
641 * before all other clones are released
642 */
643 bio_get(clone);
644 io->first_clone = clone;
645 }
646 626
627 clone->bi_sector = cc->start + sector;
647 remaining -= clone->bi_size; 628 remaining -= clone->bi_size;
648 sector += bio_sectors(clone); 629 sector += bio_sectors(clone);
649 630
650 /* prevent bio_put of first_clone */ 631 /* Grab another reference to the io struct
632 * before we kick off the request */
651 if (remaining) 633 if (remaining)
652 atomic_inc(&io->pending); 634 atomic_inc(&io->pending);
653 635
654 generic_make_request(clone); 636 generic_make_request(clone);
655 637
638 /* Do not reference clone after this - it
639 * may be gone already. */
640
656 /* out of memory -> run queues */ 641 /* out of memory -> run queues */
657 if (remaining) 642 if (remaining)
658 congestion_wait(bio_data_dir(clone), HZ/100); 643 congestion_wait(WRITE, HZ/100);
659 } 644 }
660} 645}
661 646
@@ -832,6 +817,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
832 cc->iv_gen_ops = &crypt_iv_essiv_ops; 817 cc->iv_gen_ops = &crypt_iv_essiv_ops;
833 else if (strcmp(ivmode, "benbi") == 0) 818 else if (strcmp(ivmode, "benbi") == 0)
834 cc->iv_gen_ops = &crypt_iv_benbi_ops; 819 cc->iv_gen_ops = &crypt_iv_benbi_ops;
820 else if (strcmp(ivmode, "null") == 0)
821 cc->iv_gen_ops = &crypt_iv_null_ops;
835 else { 822 else {
836 ti->error = "Invalid IV mode"; 823 ti->error = "Invalid IV mode";
837 goto bad2; 824 goto bad2;
@@ -954,10 +941,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
954 struct crypt_config *cc = ti->private; 941 struct crypt_config *cc = ti->private;
955 struct crypt_io *io; 942 struct crypt_io *io;
956 943
944 if (bio_barrier(bio))
945 return -EOPNOTSUPP;
946
957 io = mempool_alloc(cc->io_pool, GFP_NOIO); 947 io = mempool_alloc(cc->io_pool, GFP_NOIO);
958 io->target = ti; 948 io->target = ti;
959 io->base_bio = bio; 949 io->base_bio = bio;
960 io->first_clone = NULL;
961 io->error = io->post_process = 0; 950 io->error = io->post_process = 0;
962 atomic_set(&io->pending, 0); 951 atomic_set(&io->pending, 0);
963 kcryptd_queue_io(io); 952 kcryptd_queue_io(io);
@@ -1057,7 +1046,7 @@ error:
1057 1046
1058static struct target_type crypt_target = { 1047static struct target_type crypt_target = {
1059 .name = "crypt", 1048 .name = "crypt",
1060 .version= {1, 3, 0}, 1049 .version= {1, 5, 0},
1061 .module = THIS_MODULE, 1050 .module = THIS_MODULE,
1062 .ctr = crypt_ctr, 1051 .ctr = crypt_ctr,
1063 .dtr = crypt_dtr, 1052 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
new file mode 100644
index 000000000000..52c7cf9e5803
--- /dev/null
+++ b/drivers/md/dm-delay.c
@@ -0,0 +1,383 @@
1/*
2 * Copyright (C) 2005-2007 Red Hat GmbH
3 *
4 * A target that delays reads and/or writes and can send
5 * them to different devices.
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/bio.h>
14#include <linux/slab.h>
15
16#include "dm.h"
17#include "dm-bio-list.h"
18
19#define DM_MSG_PREFIX "delay"
20
21struct delay_c {
22 struct timer_list delay_timer;
23 struct semaphore timer_lock;
24 struct work_struct flush_expired_bios;
25 struct list_head delayed_bios;
26 atomic_t may_delay;
27 mempool_t *delayed_pool;
28
29 struct dm_dev *dev_read;
30 sector_t start_read;
31 unsigned read_delay;
32 unsigned reads;
33
34 struct dm_dev *dev_write;
35 sector_t start_write;
36 unsigned write_delay;
37 unsigned writes;
38};
39
40struct delay_info {
41 struct delay_c *context;
42 struct list_head list;
43 struct bio *bio;
44 unsigned long expires;
45};
46
47static DEFINE_MUTEX(delayed_bios_lock);
48
49static struct workqueue_struct *kdelayd_wq;
50static struct kmem_cache *delayed_cache;
51
52static void handle_delayed_timer(unsigned long data)
53{
54 struct delay_c *dc = (struct delay_c *)data;
55
56 queue_work(kdelayd_wq, &dc->flush_expired_bios);
57}
58
59static void queue_timeout(struct delay_c *dc, unsigned long expires)
60{
61 down(&dc->timer_lock);
62
63 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
64 mod_timer(&dc->delay_timer, expires);
65
66 up(&dc->timer_lock);
67}
68
69static void flush_bios(struct bio *bio)
70{
71 struct bio *n;
72
73 while (bio) {
74 n = bio->bi_next;
75 bio->bi_next = NULL;
76 generic_make_request(bio);
77 bio = n;
78 }
79}
80
81static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
82{
83 struct delay_info *delayed, *next;
84 unsigned long next_expires = 0;
85 int start_timer = 0;
86 BIO_LIST(flush_bios);
87
88 mutex_lock(&delayed_bios_lock);
89 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
90 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
91 list_del(&delayed->list);
92 bio_list_add(&flush_bios, delayed->bio);
93 if ((bio_data_dir(delayed->bio) == WRITE))
94 delayed->context->writes--;
95 else
96 delayed->context->reads--;
97 mempool_free(delayed, dc->delayed_pool);
98 continue;
99 }
100
101 if (!start_timer) {
102 start_timer = 1;
103 next_expires = delayed->expires;
104 } else
105 next_expires = min(next_expires, delayed->expires);
106 }
107
108 mutex_unlock(&delayed_bios_lock);
109
110 if (start_timer)
111 queue_timeout(dc, next_expires);
112
113 return bio_list_get(&flush_bios);
114}
115
116static void flush_expired_bios(struct work_struct *work)
117{
118 struct delay_c *dc;
119
120 dc = container_of(work, struct delay_c, flush_expired_bios);
121 flush_bios(flush_delayed_bios(dc, 0));
122}
123
124/*
125 * Mapping parameters:
126 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
127 *
128 * With separate write parameters, the first set is only used for reads.
129 * Delays are specified in milliseconds.
130 */
131static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
132{
133 struct delay_c *dc;
134 unsigned long long tmpll;
135
136 if (argc != 3 && argc != 6) {
137 ti->error = "requires exactly 3 or 6 arguments";
138 return -EINVAL;
139 }
140
141 dc = kmalloc(sizeof(*dc), GFP_KERNEL);
142 if (!dc) {
143 ti->error = "Cannot allocate context";
144 return -ENOMEM;
145 }
146
147 dc->reads = dc->writes = 0;
148
149 if (sscanf(argv[1], "%llu", &tmpll) != 1) {
150 ti->error = "Invalid device sector";
151 goto bad;
152 }
153 dc->start_read = tmpll;
154
155 if (sscanf(argv[2], "%u", &dc->read_delay) != 1) {
156 ti->error = "Invalid delay";
157 goto bad;
158 }
159
160 if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
161 dm_table_get_mode(ti->table), &dc->dev_read)) {
162 ti->error = "Device lookup failed";
163 goto bad;
164 }
165
166 if (argc == 3) {
167 dc->dev_write = NULL;
168 goto out;
169 }
170
171 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
172 ti->error = "Invalid write device sector";
173 goto bad;
174 }
175 dc->start_write = tmpll;
176
177 if (sscanf(argv[5], "%u", &dc->write_delay) != 1) {
178 ti->error = "Invalid write delay";
179 goto bad;
180 }
181
182 if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
183 dm_table_get_mode(ti->table), &dc->dev_write)) {
184 ti->error = "Write device lookup failed";
185 dm_put_device(ti, dc->dev_read);
186 goto bad;
187 }
188
189out:
190 dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache);
191 if (!dc->delayed_pool) {
192 DMERR("Couldn't create delayed bio pool.");
193 goto bad;
194 }
195
196 init_timer(&dc->delay_timer);
197 dc->delay_timer.function = handle_delayed_timer;
198 dc->delay_timer.data = (unsigned long)dc;
199
200 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
201 INIT_LIST_HEAD(&dc->delayed_bios);
202 init_MUTEX(&dc->timer_lock);
203 atomic_set(&dc->may_delay, 1);
204
205 ti->private = dc;
206 return 0;
207
208bad:
209 kfree(dc);
210 return -EINVAL;
211}
212
213static void delay_dtr(struct dm_target *ti)
214{
215 struct delay_c *dc = ti->private;
216
217 flush_workqueue(kdelayd_wq);
218
219 dm_put_device(ti, dc->dev_read);
220
221 if (dc->dev_write)
222 dm_put_device(ti, dc->dev_write);
223
224 mempool_destroy(dc->delayed_pool);
225 kfree(dc);
226}
227
228static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
229{
230 struct delay_info *delayed;
231 unsigned long expires = 0;
232
233 if (!delay || !atomic_read(&dc->may_delay))
234 return 1;
235
236 delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO);
237
238 delayed->context = dc;
239 delayed->bio = bio;
240 delayed->expires = expires = jiffies + (delay * HZ / 1000);
241
242 mutex_lock(&delayed_bios_lock);
243
244 if (bio_data_dir(bio) == WRITE)
245 dc->writes++;
246 else
247 dc->reads++;
248
249 list_add_tail(&delayed->list, &dc->delayed_bios);
250
251 mutex_unlock(&delayed_bios_lock);
252
253 queue_timeout(dc, expires);
254
255 return 0;
256}
257
258static void delay_presuspend(struct dm_target *ti)
259{
260 struct delay_c *dc = ti->private;
261
262 atomic_set(&dc->may_delay, 0);
263 del_timer_sync(&dc->delay_timer);
264 flush_bios(flush_delayed_bios(dc, 1));
265}
266
267static void delay_resume(struct dm_target *ti)
268{
269 struct delay_c *dc = ti->private;
270
271 atomic_set(&dc->may_delay, 1);
272}
273
274static int delay_map(struct dm_target *ti, struct bio *bio,
275 union map_info *map_context)
276{
277 struct delay_c *dc = ti->private;
278
279 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
280 bio->bi_bdev = dc->dev_write->bdev;
281 bio->bi_sector = dc->start_write +
282 (bio->bi_sector - ti->begin);
283
284 return delay_bio(dc, dc->write_delay, bio);
285 }
286
287 bio->bi_bdev = dc->dev_read->bdev;
288 bio->bi_sector = dc->start_read +
289 (bio->bi_sector - ti->begin);
290
291 return delay_bio(dc, dc->read_delay, bio);
292}
293
294static int delay_status(struct dm_target *ti, status_type_t type,
295 char *result, unsigned maxlen)
296{
297 struct delay_c *dc = ti->private;
298 int sz = 0;
299
300 switch (type) {
301 case STATUSTYPE_INFO:
302 DMEMIT("%u %u", dc->reads, dc->writes);
303 break;
304
305 case STATUSTYPE_TABLE:
306 DMEMIT("%s %llu %u", dc->dev_read->name,
307 (unsigned long long) dc->start_read,
308 dc->read_delay);
309 if (dc->dev_write)
310 DMEMIT("%s %llu %u", dc->dev_write->name,
311 (unsigned long long) dc->start_write,
312 dc->write_delay);
313 break;
314 }
315
316 return 0;
317}
318
319static struct target_type delay_target = {
320 .name = "delay",
321 .version = {1, 0, 2},
322 .module = THIS_MODULE,
323 .ctr = delay_ctr,
324 .dtr = delay_dtr,
325 .map = delay_map,
326 .presuspend = delay_presuspend,
327 .resume = delay_resume,
328 .status = delay_status,
329};
330
331static int __init dm_delay_init(void)
332{
333 int r = -ENOMEM;
334
335 kdelayd_wq = create_workqueue("kdelayd");
336 if (!kdelayd_wq) {
337 DMERR("Couldn't start kdelayd");
338 goto bad_queue;
339 }
340
341 delayed_cache = kmem_cache_create("dm-delay",
342 sizeof(struct delay_info),
343 __alignof__(struct delay_info),
344 0, NULL, NULL);
345 if (!delayed_cache) {
346 DMERR("Couldn't create delayed bio cache.");
347 goto bad_memcache;
348 }
349
350 r = dm_register_target(&delay_target);
351 if (r < 0) {
352 DMERR("register failed %d", r);
353 goto bad_register;
354 }
355
356 return 0;
357
358bad_register:
359 kmem_cache_destroy(delayed_cache);
360bad_memcache:
361 destroy_workqueue(kdelayd_wq);
362bad_queue:
363 return r;
364}
365
366static void __exit dm_delay_exit(void)
367{
368 int r = dm_unregister_target(&delay_target);
369
370 if (r < 0)
371 DMERR("unregister failed %d", r);
372
373 kmem_cache_destroy(delayed_cache);
374 destroy_workqueue(kdelayd_wq);
375}
376
377/* Module hooks */
378module_init(dm_delay_init);
379module_exit(dm_delay_exit);
380
381MODULE_DESCRIPTION(DM_NAME " delay target");
382MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
383MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 99cdffa7fbfe..07e0a0c84f6e 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * dm-snapshot.c 2 * dm-exception-store.c
3 * 3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited. 4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * Copyright (C) 2006 Red Hat GmbH
5 * 6 *
6 * This file is released under the GPL. 7 * This file is released under the GPL.
7 */ 8 */
@@ -123,6 +124,7 @@ struct pstore {
123 atomic_t pending_count; 124 atomic_t pending_count;
124 uint32_t callback_count; 125 uint32_t callback_count;
125 struct commit_callback *callbacks; 126 struct commit_callback *callbacks;
127 struct dm_io_client *io_client;
126}; 128};
127 129
128static inline unsigned int sectors_to_pages(unsigned int sectors) 130static inline unsigned int sectors_to_pages(unsigned int sectors)
@@ -159,14 +161,20 @@ static void free_area(struct pstore *ps)
159 */ 161 */
160static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) 162static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
161{ 163{
162 struct io_region where; 164 struct io_region where = {
163 unsigned long bits; 165 .bdev = ps->snap->cow->bdev,
164 166 .sector = ps->snap->chunk_size * chunk,
165 where.bdev = ps->snap->cow->bdev; 167 .count = ps->snap->chunk_size,
166 where.sector = ps->snap->chunk_size * chunk; 168 };
167 where.count = ps->snap->chunk_size; 169 struct dm_io_request io_req = {
168 170 .bi_rw = rw,
169 return dm_io_sync_vm(1, &where, rw, ps->area, &bits); 171 .mem.type = DM_IO_VMA,
172 .mem.ptr.vma = ps->area,
173 .client = ps->io_client,
174 .notify.fn = NULL,
175 };
176
177 return dm_io(&io_req, 1, &where, NULL);
170} 178}
171 179
172/* 180/*
@@ -213,17 +221,18 @@ static int read_header(struct pstore *ps, int *new_snapshot)
213 chunk_size_supplied = 0; 221 chunk_size_supplied = 0;
214 } 222 }
215 223
216 r = dm_io_get(sectors_to_pages(ps->snap->chunk_size)); 224 ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
217 if (r) 225 chunk_size));
218 return r; 226 if (IS_ERR(ps->io_client))
227 return PTR_ERR(ps->io_client);
219 228
220 r = alloc_area(ps); 229 r = alloc_area(ps);
221 if (r) 230 if (r)
222 goto bad1; 231 return r;
223 232
224 r = chunk_io(ps, 0, READ); 233 r = chunk_io(ps, 0, READ);
225 if (r) 234 if (r)
226 goto bad2; 235 goto bad;
227 236
228 dh = (struct disk_header *) ps->area; 237 dh = (struct disk_header *) ps->area;
229 238
@@ -235,7 +244,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
235 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { 244 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
236 DMWARN("Invalid or corrupt snapshot"); 245 DMWARN("Invalid or corrupt snapshot");
237 r = -ENXIO; 246 r = -ENXIO;
238 goto bad2; 247 goto bad;
239 } 248 }
240 249
241 *new_snapshot = 0; 250 *new_snapshot = 0;
@@ -252,27 +261,22 @@ static int read_header(struct pstore *ps, int *new_snapshot)
252 (unsigned long long)ps->snap->chunk_size); 261 (unsigned long long)ps->snap->chunk_size);
253 262
254 /* We had a bogus chunk_size. Fix stuff up. */ 263 /* We had a bogus chunk_size. Fix stuff up. */
255 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
256 free_area(ps); 264 free_area(ps);
257 265
258 ps->snap->chunk_size = chunk_size; 266 ps->snap->chunk_size = chunk_size;
259 ps->snap->chunk_mask = chunk_size - 1; 267 ps->snap->chunk_mask = chunk_size - 1;
260 ps->snap->chunk_shift = ffs(chunk_size) - 1; 268 ps->snap->chunk_shift = ffs(chunk_size) - 1;
261 269
262 r = dm_io_get(sectors_to_pages(chunk_size)); 270 r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
271 ps->io_client);
263 if (r) 272 if (r)
264 return r; 273 return r;
265 274
266 r = alloc_area(ps); 275 r = alloc_area(ps);
267 if (r) 276 return r;
268 goto bad1;
269
270 return 0;
271 277
272bad2: 278bad:
273 free_area(ps); 279 free_area(ps);
274bad1:
275 dm_io_put(sectors_to_pages(ps->snap->chunk_size));
276 return r; 280 return r;
277} 281}
278 282
@@ -405,7 +409,7 @@ static void persistent_destroy(struct exception_store *store)
405{ 409{
406 struct pstore *ps = get_info(store); 410 struct pstore *ps = get_info(store);
407 411
408 dm_io_put(sectors_to_pages(ps->snap->chunk_size)); 412 dm_io_client_destroy(ps->io_client);
409 vfree(ps->callbacks); 413 vfree(ps->callbacks);
410 free_area(ps); 414 free_area(ps);
411 kfree(ps); 415 kfree(ps);
diff --git a/drivers/md/dm-hw-handler.h b/drivers/md/dm-hw-handler.h
index 32eff28e4adc..e0832e6fcf36 100644
--- a/drivers/md/dm-hw-handler.h
+++ b/drivers/md/dm-hw-handler.h
@@ -16,6 +16,7 @@
16struct hw_handler_type; 16struct hw_handler_type;
17struct hw_handler { 17struct hw_handler {
18 struct hw_handler_type *type; 18 struct hw_handler_type *type;
19 struct mapped_device *md;
19 void *context; 20 void *context;
20}; 21};
21 22
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 8bdc8a87b249..352c6fbeac53 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software 2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 */ 6 */
@@ -12,13 +13,17 @@
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15static struct bio_set *_bios; 16struct dm_io_client {
17 mempool_t *pool;
18 struct bio_set *bios;
19};
16 20
17/* FIXME: can we shrink this ? */ 21/* FIXME: can we shrink this ? */
18struct io { 22struct io {
19 unsigned long error; 23 unsigned long error;
20 atomic_t count; 24 atomic_t count;
21 struct task_struct *sleeper; 25 struct task_struct *sleeper;
26 struct dm_io_client *client;
22 io_notify_fn callback; 27 io_notify_fn callback;
23 void *context; 28 void *context;
24}; 29};
@@ -26,63 +31,58 @@ struct io {
26/* 31/*
27 * io contexts are only dynamically allocated for asynchronous 32 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll 33 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME: 34 * have the same number of io contexts as bios! (FIXME: must reduce this).
30 * must reduce this).
31 */ 35 */
32static unsigned _num_ios;
33static mempool_t *_io_pool;
34 36
35static unsigned int pages_to_ios(unsigned int pages) 37static unsigned int pages_to_ios(unsigned int pages)
36{ 38{
37 return 4 * pages; /* too many ? */ 39 return 4 * pages; /* too many ? */
38} 40}
39 41
40static int resize_pool(unsigned int new_ios) 42/*
43 * Create a client with mempool and bioset.
44 */
45struct dm_io_client *dm_io_client_create(unsigned num_pages)
41{ 46{
42 int r = 0; 47 unsigned ios = pages_to_ios(num_pages);
43 48 struct dm_io_client *client;
44 if (_io_pool) {
45 if (new_ios == 0) {
46 /* free off the pool */
47 mempool_destroy(_io_pool);
48 _io_pool = NULL;
49 bioset_free(_bios);
50
51 } else {
52 /* resize the pool */
53 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
54 }
55 49
56 } else { 50 client = kmalloc(sizeof(*client), GFP_KERNEL);
57 /* create new pool */ 51 if (!client)
58 _io_pool = mempool_create_kmalloc_pool(new_ios, 52 return ERR_PTR(-ENOMEM);
59 sizeof(struct io)); 53
60 if (!_io_pool) 54 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
61 return -ENOMEM; 55 if (!client->pool)
62 56 goto bad;
63 _bios = bioset_create(16, 16);
64 if (!_bios) {
65 mempool_destroy(_io_pool);
66 _io_pool = NULL;
67 return -ENOMEM;
68 }
69 }
70 57
71 if (!r) 58 client->bios = bioset_create(16, 16);
72 _num_ios = new_ios; 59 if (!client->bios)
60 goto bad;
73 61
74 return r; 62 return client;
63
64 bad:
65 if (client->pool)
66 mempool_destroy(client->pool);
67 kfree(client);
68 return ERR_PTR(-ENOMEM);
75} 69}
70EXPORT_SYMBOL(dm_io_client_create);
76 71
77int dm_io_get(unsigned int num_pages) 72int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
78{ 73{
79 return resize_pool(_num_ios + pages_to_ios(num_pages)); 74 return mempool_resize(client->pool, pages_to_ios(num_pages),
75 GFP_KERNEL);
80} 76}
77EXPORT_SYMBOL(dm_io_client_resize);
81 78
82void dm_io_put(unsigned int num_pages) 79void dm_io_client_destroy(struct dm_io_client *client)
83{ 80{
84 resize_pool(_num_ios - pages_to_ios(num_pages)); 81 mempool_destroy(client->pool);
82 bioset_free(client->bios);
83 kfree(client);
85} 84}
85EXPORT_SYMBOL(dm_io_client_destroy);
86 86
87/*----------------------------------------------------------------- 87/*-----------------------------------------------------------------
88 * We need to keep track of which region a bio is doing io for. 88 * We need to keep track of which region a bio is doing io for.
@@ -118,7 +118,7 @@ static void dec_count(struct io *io, unsigned int region, int error)
118 io_notify_fn fn = io->callback; 118 io_notify_fn fn = io->callback;
119 void *context = io->context; 119 void *context = io->context;
120 120
121 mempool_free(io, _io_pool); 121 mempool_free(io, io->client->pool);
122 fn(r, context); 122 fn(r, context);
123 } 123 }
124 } 124 }
@@ -126,7 +126,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
126 126
127static int endio(struct bio *bio, unsigned int done, int error) 127static int endio(struct bio *bio, unsigned int done, int error)
128{ 128{
129 struct io *io = (struct io *) bio->bi_private; 129 struct io *io;
130 unsigned region;
130 131
131 /* keep going until we've finished */ 132 /* keep going until we've finished */
132 if (bio->bi_size) 133 if (bio->bi_size)
@@ -135,10 +136,17 @@ static int endio(struct bio *bio, unsigned int done, int error)
135 if (error && bio_data_dir(bio) == READ) 136 if (error && bio_data_dir(bio) == READ)
136 zero_fill_bio(bio); 137 zero_fill_bio(bio);
137 138
138 dec_count(io, bio_get_region(bio), error); 139 /*
140 * The bio destructor in bio_put() may use the io object.
141 */
142 io = bio->bi_private;
143 region = bio_get_region(bio);
144
139 bio->bi_max_vecs++; 145 bio->bi_max_vecs++;
140 bio_put(bio); 146 bio_put(bio);
141 147
148 dec_count(io, region, error);
149
142 return 0; 150 return 0;
143} 151}
144 152
@@ -209,6 +217,9 @@ static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
209 dp->context_ptr = bvec; 217 dp->context_ptr = bvec;
210} 218}
211 219
220/*
221 * Functions for getting the pages from a VMA.
222 */
212static void vm_get_page(struct dpages *dp, 223static void vm_get_page(struct dpages *dp,
213 struct page **p, unsigned long *len, unsigned *offset) 224 struct page **p, unsigned long *len, unsigned *offset)
214{ 225{
@@ -233,7 +244,34 @@ static void vm_dp_init(struct dpages *dp, void *data)
233 244
234static void dm_bio_destructor(struct bio *bio) 245static void dm_bio_destructor(struct bio *bio)
235{ 246{
236 bio_free(bio, _bios); 247 struct io *io = bio->bi_private;
248
249 bio_free(bio, io->client->bios);
250}
251
252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
237} 275}
238 276
239/*----------------------------------------------------------------- 277/*-----------------------------------------------------------------
@@ -256,7 +294,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
256 * to hide it from bio_add_page(). 294 * to hide it from bio_add_page().
257 */ 295 */
258 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; 296 num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
259 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios); 297 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
260 bio->bi_sector = where->sector + (where->count - remaining); 298 bio->bi_sector = where->sector + (where->count - remaining);
261 bio->bi_bdev = where->bdev; 299 bio->bi_bdev = where->bdev;
262 bio->bi_end_io = endio; 300 bio->bi_end_io = endio;
@@ -311,8 +349,9 @@ static void dispatch_io(int rw, unsigned int num_regions,
311 dec_count(io, 0, 0); 349 dec_count(io, 0, 0);
312} 350}
313 351
314static int sync_io(unsigned int num_regions, struct io_region *where, 352static int sync_io(struct dm_io_client *client, unsigned int num_regions,
315 int rw, struct dpages *dp, unsigned long *error_bits) 353 struct io_region *where, int rw, struct dpages *dp,
354 unsigned long *error_bits)
316{ 355{
317 struct io io; 356 struct io io;
318 357
@@ -324,6 +363,7 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
324 io.error = 0; 363 io.error = 0;
325 atomic_set(&io.count, 1); /* see dispatch_io() */ 364 atomic_set(&io.count, 1); /* see dispatch_io() */
326 io.sleeper = current; 365 io.sleeper = current;
366 io.client = client;
327 367
328 dispatch_io(rw, num_regions, where, dp, &io, 1); 368 dispatch_io(rw, num_regions, where, dp, &io, 1);
329 369
@@ -340,12 +380,15 @@ static int sync_io(unsigned int num_regions, struct io_region *where,
340 if (atomic_read(&io.count)) 380 if (atomic_read(&io.count))
341 return -EINTR; 381 return -EINTR;
342 382
343 *error_bits = io.error; 383 if (error_bits)
384 *error_bits = io.error;
385
344 return io.error ? -EIO : 0; 386 return io.error ? -EIO : 0;
345} 387}
346 388
347static int async_io(unsigned int num_regions, struct io_region *where, int rw, 389static int async_io(struct dm_io_client *client, unsigned int num_regions,
348 struct dpages *dp, io_notify_fn fn, void *context) 390 struct io_region *where, int rw, struct dpages *dp,
391 io_notify_fn fn, void *context)
349{ 392{
350 struct io *io; 393 struct io *io;
351 394
@@ -355,10 +398,11 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
355 return -EIO; 398 return -EIO;
356 } 399 }
357 400
358 io = mempool_alloc(_io_pool, GFP_NOIO); 401 io = mempool_alloc(client->pool, GFP_NOIO);
359 io->error = 0; 402 io->error = 0;
360 atomic_set(&io->count, 1); /* see dispatch_io() */ 403 atomic_set(&io->count, 1); /* see dispatch_io() */
361 io->sleeper = NULL; 404 io->sleeper = NULL;
405 io->client = client;
362 io->callback = fn; 406 io->callback = fn;
363 io->context = context; 407 io->context = context;
364 408
@@ -366,61 +410,51 @@ static int async_io(unsigned int num_regions, struct io_region *where, int rw,
366 return 0; 410 return 0;
367} 411}
368 412
369int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 413static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
370 struct page_list *pl, unsigned int offset,
371 unsigned long *error_bits)
372{ 414{
373 struct dpages dp; 415 /* Set up dpages based on memory type */
374 list_dp_init(&dp, pl, offset); 416 switch (io_req->mem.type) {
375 return sync_io(num_regions, where, rw, &dp, error_bits); 417 case DM_IO_PAGE_LIST:
376} 418 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
419 break;
420
421 case DM_IO_BVEC:
422 bvec_dp_init(dp, io_req->mem.ptr.bvec);
423 break;
424
425 case DM_IO_VMA:
426 vm_dp_init(dp, io_req->mem.ptr.vma);
427 break;
428
429 case DM_IO_KMEM:
430 km_dp_init(dp, io_req->mem.ptr.addr);
431 break;
432
433 default:
434 return -EINVAL;
435 }
377 436
378int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, 437 return 0;
379 struct bio_vec *bvec, unsigned long *error_bits)
380{
381 struct dpages dp;
382 bvec_dp_init(&dp, bvec);
383 return sync_io(num_regions, where, rw, &dp, error_bits);
384} 438}
385 439
386int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, 440/*
387 void *data, unsigned long *error_bits) 441 * New collapsed (a)synchronous interface
442 */
443int dm_io(struct dm_io_request *io_req, unsigned num_regions,
444 struct io_region *where, unsigned long *sync_error_bits)
388{ 445{
446 int r;
389 struct dpages dp; 447 struct dpages dp;
390 vm_dp_init(&dp, data);
391 return sync_io(num_regions, where, rw, &dp, error_bits);
392}
393 448
394int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 449 r = dp_init(io_req, &dp);
395 struct page_list *pl, unsigned int offset, 450 if (r)
396 io_notify_fn fn, void *context) 451 return r;
397{
398 struct dpages dp;
399 list_dp_init(&dp, pl, offset);
400 return async_io(num_regions, where, rw, &dp, fn, context);
401}
402 452
403int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, 453 if (!io_req->notify.fn)
404 struct bio_vec *bvec, io_notify_fn fn, void *context) 454 return sync_io(io_req->client, num_regions, where,
405{ 455 io_req->bi_rw, &dp, sync_error_bits);
406 struct dpages dp;
407 bvec_dp_init(&dp, bvec);
408 return async_io(num_regions, where, rw, &dp, fn, context);
409}
410 456
411int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, 457 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
412 void *data, io_notify_fn fn, void *context) 458 &dp, io_req->notify.fn, io_req->notify.context);
413{
414 struct dpages dp;
415 vm_dp_init(&dp, data);
416 return async_io(num_regions, where, rw, &dp, fn, context);
417} 459}
418 460EXPORT_SYMBOL(dm_io);
419EXPORT_SYMBOL(dm_io_get);
420EXPORT_SYMBOL(dm_io_put);
421EXPORT_SYMBOL(dm_io_sync);
422EXPORT_SYMBOL(dm_io_async);
423EXPORT_SYMBOL(dm_io_sync_bvec);
424EXPORT_SYMBOL(dm_io_async_bvec);
425EXPORT_SYMBOL(dm_io_sync_vm);
426EXPORT_SYMBOL(dm_io_async_vm);
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index f9035bfd1a9f..f647e2cceaa6 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -12,7 +12,7 @@
12struct io_region { 12struct io_region {
13 struct block_device *bdev; 13 struct block_device *bdev;
14 sector_t sector; 14 sector_t sector;
15 sector_t count; 15 sector_t count; /* If this is zero the region is ignored. */
16}; 16};
17 17
18struct page_list { 18struct page_list {
@@ -20,55 +20,60 @@ struct page_list {
20 struct page *page; 20 struct page *page;
21}; 21};
22 22
23
24/*
25 * 'error' is a bitset, with each bit indicating whether an error
26 * occurred doing io to the corresponding region.
27 */
28typedef void (*io_notify_fn)(unsigned long error, void *context); 23typedef void (*io_notify_fn)(unsigned long error, void *context);
29 24
25enum dm_io_mem_type {
26 DM_IO_PAGE_LIST,/* Page list */
27 DM_IO_BVEC, /* Bio vector */
28 DM_IO_VMA, /* Virtual memory area */
29 DM_IO_KMEM, /* Kernel memory */
30};
31
32struct dm_io_memory {
33 enum dm_io_mem_type type;
34
35 union {
36 struct page_list *pl;
37 struct bio_vec *bvec;
38 void *vma;
39 void *addr;
40 } ptr;
41
42 unsigned offset;
43};
44
45struct dm_io_notify {
46 io_notify_fn fn; /* Callback for asynchronous requests */
47 void *context; /* Passed to callback */
48};
30 49
31/* 50/*
32 * Before anyone uses the IO interface they should call 51 * IO request structure
33 * dm_io_get(), specifying roughly how many pages they are
34 * expecting to perform io on concurrently.
35 *
36 * This function may block.
37 */ 52 */
38int dm_io_get(unsigned int num_pages); 53struct dm_io_client;
39void dm_io_put(unsigned int num_pages); 54struct dm_io_request {
55 int bi_rw; /* READ|WRITE - not READA */
56 struct dm_io_memory mem; /* Memory to use for io */
57 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
58 struct dm_io_client *client; /* Client memory handler */
59};
40 60
41/* 61/*
42 * Synchronous IO. 62 * For async io calls, users can alternatively use the dm_io() function below
63 * and dm_io_client_create() to create private mempools for the client.
43 * 64 *
44 * Please ensure that the rw flag in the next two functions is 65 * Create/destroy may block.
45 * either READ or WRITE, ie. we don't take READA. Any
46 * regions with a zero count field will be ignored.
47 */ 66 */
48int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, 67struct dm_io_client *dm_io_client_create(unsigned num_pages);
49 struct page_list *pl, unsigned int offset, 68int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client);
50 unsigned long *error_bits); 69void dm_io_client_destroy(struct dm_io_client *client);
51
52int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
53 struct bio_vec *bvec, unsigned long *error_bits);
54
55int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
56 void *data, unsigned long *error_bits);
57 70
58/* 71/*
59 * Aynchronous IO. 72 * IO interface using private per-client pools.
60 * 73 * Each bit in the optional 'sync_error_bits' bitset indicates whether an
61 * The 'where' array may be safely allocated on the stack since 74 * error occurred doing io to the corresponding region.
62 * the function takes a copy.
63 */ 75 */
64int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, 76int dm_io(struct dm_io_request *io_req, unsigned num_regions,
65 struct page_list *pl, unsigned int offset, 77 struct io_region *region, unsigned long *sync_error_bits);
66 io_notify_fn fn, void *context);
67
68int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
69 struct bio_vec *bvec, io_notify_fn fn, void *context);
70
71int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
72 void *data, io_notify_fn fn, void *context);
73 78
74#endif 79#endif
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 6a9261351848..a66428d860fe 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -149,9 +149,12 @@ struct log_c {
149 FORCESYNC, /* Force a sync to happen */ 149 FORCESYNC, /* Force a sync to happen */
150 } sync; 150 } sync;
151 151
152 struct dm_io_request io_req;
153
152 /* 154 /*
153 * Disk log fields 155 * Disk log fields
154 */ 156 */
157 int log_dev_failed;
155 struct dm_dev *log_dev; 158 struct dm_dev *log_dev;
156 struct log_header header; 159 struct log_header header;
157 160
@@ -199,13 +202,20 @@ static void header_from_disk(struct log_header *core, struct log_header *disk)
199 core->nr_regions = le64_to_cpu(disk->nr_regions); 202 core->nr_regions = le64_to_cpu(disk->nr_regions);
200} 203}
201 204
205static int rw_header(struct log_c *lc, int rw)
206{
207 lc->io_req.bi_rw = rw;
208 lc->io_req.mem.ptr.vma = lc->disk_header;
209 lc->io_req.notify.fn = NULL;
210
211 return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
212}
213
202static int read_header(struct log_c *log) 214static int read_header(struct log_c *log)
203{ 215{
204 int r; 216 int r;
205 unsigned long ebits;
206 217
207 r = dm_io_sync_vm(1, &log->header_location, READ, 218 r = rw_header(log, READ);
208 log->disk_header, &ebits);
209 if (r) 219 if (r)
210 return r; 220 return r;
211 221
@@ -233,11 +243,8 @@ static int read_header(struct log_c *log)
233 243
234static inline int write_header(struct log_c *log) 244static inline int write_header(struct log_c *log)
235{ 245{
236 unsigned long ebits;
237
238 header_to_disk(&log->header, log->disk_header); 246 header_to_disk(&log->header, log->disk_header);
239 return dm_io_sync_vm(1, &log->header_location, WRITE, 247 return rw_header(log, WRITE);
240 log->disk_header, &ebits);
241} 248}
242 249
243/*---------------------------------------------------------------- 250/*----------------------------------------------------------------
@@ -256,6 +263,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
256 uint32_t region_size; 263 uint32_t region_size;
257 unsigned int region_count; 264 unsigned int region_count;
258 size_t bitset_size, buf_size; 265 size_t bitset_size, buf_size;
266 int r;
259 267
260 if (argc < 1 || argc > 2) { 268 if (argc < 1 || argc > 2) {
261 DMWARN("wrong number of arguments to mirror log"); 269 DMWARN("wrong number of arguments to mirror log");
@@ -315,6 +323,7 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
315 lc->disk_header = NULL; 323 lc->disk_header = NULL;
316 } else { 324 } else {
317 lc->log_dev = dev; 325 lc->log_dev = dev;
326 lc->log_dev_failed = 0;
318 lc->header_location.bdev = lc->log_dev->bdev; 327 lc->header_location.bdev = lc->log_dev->bdev;
319 lc->header_location.sector = 0; 328 lc->header_location.sector = 0;
320 329
@@ -324,6 +333,15 @@ static int create_log_context(struct dirty_log *log, struct dm_target *ti,
324 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 333 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
325 bitset_size, ti->limits.hardsect_size); 334 bitset_size, ti->limits.hardsect_size);
326 lc->header_location.count = buf_size >> SECTOR_SHIFT; 335 lc->header_location.count = buf_size >> SECTOR_SHIFT;
336 lc->io_req.mem.type = DM_IO_VMA;
337 lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
338 PAGE_SIZE));
339 if (IS_ERR(lc->io_req.client)) {
340 r = PTR_ERR(lc->io_req.client);
341 DMWARN("couldn't allocate disk io client");
342 kfree(lc);
343 return -ENOMEM;
344 }
327 345
328 lc->disk_header = vmalloc(buf_size); 346 lc->disk_header = vmalloc(buf_size);
329 if (!lc->disk_header) { 347 if (!lc->disk_header) {
@@ -424,6 +442,7 @@ static void disk_dtr(struct dirty_log *log)
424 442
425 dm_put_device(lc->ti, lc->log_dev); 443 dm_put_device(lc->ti, lc->log_dev);
426 vfree(lc->disk_header); 444 vfree(lc->disk_header);
445 dm_io_client_destroy(lc->io_req.client);
427 destroy_log_context(lc); 446 destroy_log_context(lc);
428} 447}
429 448
@@ -437,6 +456,15 @@ static int count_bits32(uint32_t *addr, unsigned size)
437 return count; 456 return count;
438} 457}
439 458
459static void fail_log_device(struct log_c *lc)
460{
461 if (lc->log_dev_failed)
462 return;
463
464 lc->log_dev_failed = 1;
465 dm_table_event(lc->ti->table);
466}
467
440static int disk_resume(struct dirty_log *log) 468static int disk_resume(struct dirty_log *log)
441{ 469{
442 int r; 470 int r;
@@ -446,8 +474,19 @@ static int disk_resume(struct dirty_log *log)
446 474
447 /* read the disk header */ 475 /* read the disk header */
448 r = read_header(lc); 476 r = read_header(lc);
449 if (r) 477 if (r) {
450 return r; 478 DMWARN("%s: Failed to read header on mirror log device",
479 lc->log_dev->name);
480 fail_log_device(lc);
481 /*
482 * If the log device cannot be read, we must assume
483 * all regions are out-of-sync. If we simply return
484 * here, the state will be uninitialized and could
485 * lead us to return 'in-sync' status for regions
486 * that are actually 'out-of-sync'.
487 */
488 lc->header.nr_regions = 0;
489 }
451 490
452 /* set or clear any new bits -- device has grown */ 491 /* set or clear any new bits -- device has grown */
453 if (lc->sync == NOSYNC) 492 if (lc->sync == NOSYNC)
@@ -472,7 +511,14 @@ static int disk_resume(struct dirty_log *log)
472 lc->header.nr_regions = lc->region_count; 511 lc->header.nr_regions = lc->region_count;
473 512
474 /* write the new header */ 513 /* write the new header */
475 return write_header(lc); 514 r = write_header(lc);
515 if (r) {
516 DMWARN("%s: Failed to write header on mirror log device",
517 lc->log_dev->name);
518 fail_log_device(lc);
519 }
520
521 return r;
476} 522}
477 523
478static uint32_t core_get_region_size(struct dirty_log *log) 524static uint32_t core_get_region_size(struct dirty_log *log)
@@ -516,7 +562,9 @@ static int disk_flush(struct dirty_log *log)
516 return 0; 562 return 0;
517 563
518 r = write_header(lc); 564 r = write_header(lc);
519 if (!r) 565 if (r)
566 fail_log_device(lc);
567 else
520 lc->touched = 0; 568 lc->touched = 0;
521 569
522 return r; 570 return r;
@@ -591,6 +639,7 @@ static int core_status(struct dirty_log *log, status_type_t status,
591 639
592 switch(status) { 640 switch(status) {
593 case STATUSTYPE_INFO: 641 case STATUSTYPE_INFO:
642 DMEMIT("1 %s", log->type->name);
594 break; 643 break;
595 644
596 case STATUSTYPE_TABLE: 645 case STATUSTYPE_TABLE:
@@ -606,17 +655,17 @@ static int disk_status(struct dirty_log *log, status_type_t status,
606 char *result, unsigned int maxlen) 655 char *result, unsigned int maxlen)
607{ 656{
608 int sz = 0; 657 int sz = 0;
609 char buffer[16];
610 struct log_c *lc = log->context; 658 struct log_c *lc = log->context;
611 659
612 switch(status) { 660 switch(status) {
613 case STATUSTYPE_INFO: 661 case STATUSTYPE_INFO:
662 DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
663 lc->log_dev_failed ? 'D' : 'A');
614 break; 664 break;
615 665
616 case STATUSTYPE_TABLE: 666 case STATUSTYPE_TABLE:
617 format_dev_t(buffer, lc->log_dev->bdev->bd_dev);
618 DMEMIT("%s %u %s %u ", log->type->name, 667 DMEMIT("%s %u %s %u ", log->type->name,
619 lc->sync == DEFAULTSYNC ? 2 : 3, buffer, 668 lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
620 lc->region_size); 669 lc->region_size);
621 DMEMIT_SYNC; 670 DMEMIT_SYNC;
622 } 671 }
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3aa013506967..de54b39e6ffe 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -668,6 +668,9 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
668 return -EINVAL; 668 return -EINVAL;
669 } 669 }
670 670
671 m->hw_handler.md = dm_table_get_md(ti->table);
672 dm_put(m->hw_handler.md);
673
671 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv); 674 r = hwht->create(&m->hw_handler, hw_argc - 1, as->argv);
672 if (r) { 675 if (r) {
673 dm_put_hw_handler(hwht); 676 dm_put_hw_handler(hwht);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 23a642619bed..ef124b71ccc8 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -21,15 +21,11 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22 22
23#define DM_MSG_PREFIX "raid1" 23#define DM_MSG_PREFIX "raid1"
24#define DM_IO_PAGES 64
24 25
25static struct workqueue_struct *_kmirrord_wq; 26#define DM_RAID1_HANDLE_ERRORS 0x01
26static struct work_struct _kmirrord_work;
27static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
28 27
29static inline void wake(void) 28static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
30{
31 queue_work(_kmirrord_wq, &_kmirrord_work);
32}
33 29
34/*----------------------------------------------------------------- 30/*-----------------------------------------------------------------
35 * Region hash 31 * Region hash
@@ -125,17 +121,23 @@ struct mirror_set {
125 struct list_head list; 121 struct list_head list;
126 struct region_hash rh; 122 struct region_hash rh;
127 struct kcopyd_client *kcopyd_client; 123 struct kcopyd_client *kcopyd_client;
124 uint64_t features;
128 125
129 spinlock_t lock; /* protects the next two lists */ 126 spinlock_t lock; /* protects the next two lists */
130 struct bio_list reads; 127 struct bio_list reads;
131 struct bio_list writes; 128 struct bio_list writes;
132 129
130 struct dm_io_client *io_client;
131
133 /* recovery */ 132 /* recovery */
134 region_t nr_regions; 133 region_t nr_regions;
135 int in_sync; 134 int in_sync;
136 135
137 struct mirror *default_mirror; /* Default mirror */ 136 struct mirror *default_mirror; /* Default mirror */
138 137
138 struct workqueue_struct *kmirrord_wq;
139 struct work_struct kmirrord_work;
140
139 unsigned int nr_mirrors; 141 unsigned int nr_mirrors;
140 struct mirror mirror[0]; 142 struct mirror mirror[0];
141}; 143};
@@ -153,6 +155,11 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
153 return region << rh->region_shift; 155 return region << rh->region_shift;
154} 156}
155 157
158static void wake(struct mirror_set *ms)
159{
160 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
161}
162
156/* FIXME move this */ 163/* FIXME move this */
157static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 164static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
158 165
@@ -398,8 +405,7 @@ static void rh_update_states(struct region_hash *rh)
398 mempool_free(reg, rh->region_pool); 405 mempool_free(reg, rh->region_pool);
399 } 406 }
400 407
401 if (!list_empty(&recovered)) 408 rh->log->type->flush(rh->log);
402 rh->log->type->flush(rh->log);
403 409
404 list_for_each_entry_safe (reg, next, &clean, list) 410 list_for_each_entry_safe (reg, next, &clean, list)
405 mempool_free(reg, rh->region_pool); 411 mempool_free(reg, rh->region_pool);
@@ -471,7 +477,7 @@ static void rh_dec(struct region_hash *rh, region_t region)
471 spin_unlock_irqrestore(&rh->region_lock, flags); 477 spin_unlock_irqrestore(&rh->region_lock, flags);
472 478
473 if (should_wake) 479 if (should_wake)
474 wake(); 480 wake(rh->ms);
475} 481}
476 482
477/* 483/*
@@ -558,7 +564,7 @@ static void rh_recovery_end(struct region *reg, int success)
558 list_add(&reg->list, &reg->rh->recovered_regions); 564 list_add(&reg->list, &reg->rh->recovered_regions);
559 spin_unlock_irq(&rh->region_lock); 565 spin_unlock_irq(&rh->region_lock);
560 566
561 wake(); 567 wake(rh->ms);
562} 568}
563 569
564static void rh_flush(struct region_hash *rh) 570static void rh_flush(struct region_hash *rh)
@@ -592,7 +598,7 @@ static void rh_start_recovery(struct region_hash *rh)
592 for (i = 0; i < MAX_RECOVERY; i++) 598 for (i = 0; i < MAX_RECOVERY; i++)
593 up(&rh->recovery_count); 599 up(&rh->recovery_count);
594 600
595 wake(); 601 wake(rh->ms);
596} 602}
597 603
598/* 604/*
@@ -735,7 +741,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
735 /* 741 /*
736 * We can only read balance if the region is in sync. 742 * We can only read balance if the region is in sync.
737 */ 743 */
738 if (rh_in_sync(&ms->rh, region, 0)) 744 if (rh_in_sync(&ms->rh, region, 1))
739 m = choose_mirror(ms, bio->bi_sector); 745 m = choose_mirror(ms, bio->bi_sector);
740 else 746 else
741 m = ms->default_mirror; 747 m = ms->default_mirror;
@@ -792,6 +798,14 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
792 unsigned int i; 798 unsigned int i;
793 struct io_region io[KCOPYD_MAX_REGIONS+1]; 799 struct io_region io[KCOPYD_MAX_REGIONS+1];
794 struct mirror *m; 800 struct mirror *m;
801 struct dm_io_request io_req = {
802 .bi_rw = WRITE,
803 .mem.type = DM_IO_BVEC,
804 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
805 .notify.fn = write_callback,
806 .notify.context = bio,
807 .client = ms->io_client,
808 };
795 809
796 for (i = 0; i < ms->nr_mirrors; i++) { 810 for (i = 0; i < ms->nr_mirrors; i++) {
797 m = ms->mirror + i; 811 m = ms->mirror + i;
@@ -802,9 +816,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
802 } 816 }
803 817
804 bio_set_ms(bio, ms); 818 bio_set_ms(bio, ms);
805 dm_io_async_bvec(ms->nr_mirrors, io, WRITE, 819
806 bio->bi_io_vec + bio->bi_idx, 820 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
807 write_callback, bio);
808} 821}
809 822
810static void do_writes(struct mirror_set *ms, struct bio_list *writes) 823static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -870,11 +883,10 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
870/*----------------------------------------------------------------- 883/*-----------------------------------------------------------------
871 * kmirrord 884 * kmirrord
872 *---------------------------------------------------------------*/ 885 *---------------------------------------------------------------*/
873static LIST_HEAD(_mirror_sets); 886static void do_mirror(struct work_struct *work)
874static DECLARE_RWSEM(_mirror_sets_lock);
875
876static void do_mirror(struct mirror_set *ms)
877{ 887{
888 struct mirror_set *ms =container_of(work, struct mirror_set,
889 kmirrord_work);
878 struct bio_list reads, writes; 890 struct bio_list reads, writes;
879 891
880 spin_lock(&ms->lock); 892 spin_lock(&ms->lock);
@@ -890,16 +902,6 @@ static void do_mirror(struct mirror_set *ms)
890 do_writes(ms, &writes); 902 do_writes(ms, &writes);
891} 903}
892 904
893static void do_work(struct work_struct *ignored)
894{
895 struct mirror_set *ms;
896
897 down_read(&_mirror_sets_lock);
898 list_for_each_entry (ms, &_mirror_sets, list)
899 do_mirror(ms);
900 up_read(&_mirror_sets_lock);
901}
902
903/*----------------------------------------------------------------- 905/*-----------------------------------------------------------------
904 * Target functions 906 * Target functions
905 *---------------------------------------------------------------*/ 907 *---------------------------------------------------------------*/
@@ -931,6 +933,13 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
931 ms->in_sync = 0; 933 ms->in_sync = 0;
932 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR]; 934 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
933 935
936 ms->io_client = dm_io_client_create(DM_IO_PAGES);
937 if (IS_ERR(ms->io_client)) {
938 ti->error = "Error creating dm_io client";
939 kfree(ms);
940 return NULL;
941 }
942
934 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 943 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
935 ti->error = "Error creating dirty region hash"; 944 ti->error = "Error creating dirty region hash";
936 kfree(ms); 945 kfree(ms);
@@ -946,6 +955,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
946 while (m--) 955 while (m--)
947 dm_put_device(ti, ms->mirror[m].dev); 956 dm_put_device(ti, ms->mirror[m].dev);
948 957
958 dm_io_client_destroy(ms->io_client);
949 rh_exit(&ms->rh); 959 rh_exit(&ms->rh);
950 kfree(ms); 960 kfree(ms);
951} 961}
@@ -978,23 +988,6 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
978 return 0; 988 return 0;
979} 989}
980 990
981static int add_mirror_set(struct mirror_set *ms)
982{
983 down_write(&_mirror_sets_lock);
984 list_add_tail(&ms->list, &_mirror_sets);
985 up_write(&_mirror_sets_lock);
986 wake();
987
988 return 0;
989}
990
991static void del_mirror_set(struct mirror_set *ms)
992{
993 down_write(&_mirror_sets_lock);
994 list_del(&ms->list);
995 up_write(&_mirror_sets_lock);
996}
997
998/* 991/*
999 * Create dirty log: log_type #log_params <log_params> 992 * Create dirty log: log_type #log_params <log_params>
1000 */ 993 */
@@ -1037,16 +1030,55 @@ static struct dirty_log *create_dirty_log(struct dm_target *ti,
1037 return dl; 1030 return dl;
1038} 1031}
1039 1032
1033static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1034 unsigned *args_used)
1035{
1036 unsigned num_features;
1037 struct dm_target *ti = ms->ti;
1038
1039 *args_used = 0;
1040
1041 if (!argc)
1042 return 0;
1043
1044 if (sscanf(argv[0], "%u", &num_features) != 1) {
1045 ti->error = "Invalid number of features";
1046 return -EINVAL;
1047 }
1048
1049 argc--;
1050 argv++;
1051 (*args_used)++;
1052
1053 if (num_features > argc) {
1054 ti->error = "Not enough arguments to support feature count";
1055 return -EINVAL;
1056 }
1057
1058 if (!strcmp("handle_errors", argv[0]))
1059 ms->features |= DM_RAID1_HANDLE_ERRORS;
1060 else {
1061 ti->error = "Unrecognised feature requested";
1062 return -EINVAL;
1063 }
1064
1065 (*args_used)++;
1066
1067 return 0;
1068}
1069
1040/* 1070/*
1041 * Construct a mirror mapping: 1071 * Construct a mirror mapping:
1042 * 1072 *
1043 * log_type #log_params <log_params> 1073 * log_type #log_params <log_params>
1044 * #mirrors [mirror_path offset]{2,} 1074 * #mirrors [mirror_path offset]{2,}
1075 * [#features <features>]
1045 * 1076 *
1046 * log_type is "core" or "disk" 1077 * log_type is "core" or "disk"
1047 * #log_params is between 1 and 3 1078 * #log_params is between 1 and 3
1079 *
1080 * If present, features must be "handle_errors".
1048 */ 1081 */
1049#define DM_IO_PAGES 64
1050static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1082static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1051{ 1083{
1052 int r; 1084 int r;
@@ -1070,8 +1102,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1070 1102
1071 argv++, argc--; 1103 argv++, argc--;
1072 1104
1073 if (argc != nr_mirrors * 2) { 1105 if (argc < nr_mirrors * 2) {
1074 ti->error = "Wrong number of mirror arguments"; 1106 ti->error = "Too few mirror arguments";
1075 dm_destroy_dirty_log(dl); 1107 dm_destroy_dirty_log(dl);
1076 return -EINVAL; 1108 return -EINVAL;
1077 } 1109 }
@@ -1096,13 +1128,37 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1096 ti->private = ms; 1128 ti->private = ms;
1097 ti->split_io = ms->rh.region_size; 1129 ti->split_io = ms->rh.region_size;
1098 1130
1131 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1132 if (!ms->kmirrord_wq) {
1133 DMERR("couldn't start kmirrord");
1134 free_context(ms, ti, m);
1135 return -ENOMEM;
1136 }
1137 INIT_WORK(&ms->kmirrord_work, do_mirror);
1138
1139 r = parse_features(ms, argc, argv, &args_used);
1140 if (r) {
1141 free_context(ms, ti, ms->nr_mirrors);
1142 return r;
1143 }
1144
1145 argv += args_used;
1146 argc -= args_used;
1147
1148 if (argc) {
1149 ti->error = "Too many mirror arguments";
1150 free_context(ms, ti, ms->nr_mirrors);
1151 return -EINVAL;
1152 }
1153
1099 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1154 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1100 if (r) { 1155 if (r) {
1156 destroy_workqueue(ms->kmirrord_wq);
1101 free_context(ms, ti, ms->nr_mirrors); 1157 free_context(ms, ti, ms->nr_mirrors);
1102 return r; 1158 return r;
1103 } 1159 }
1104 1160
1105 add_mirror_set(ms); 1161 wake(ms);
1106 return 0; 1162 return 0;
1107} 1163}
1108 1164
@@ -1110,8 +1166,9 @@ static void mirror_dtr(struct dm_target *ti)
1110{ 1166{
1111 struct mirror_set *ms = (struct mirror_set *) ti->private; 1167 struct mirror_set *ms = (struct mirror_set *) ti->private;
1112 1168
1113 del_mirror_set(ms); 1169 flush_workqueue(ms->kmirrord_wq);
1114 kcopyd_client_destroy(ms->kcopyd_client); 1170 kcopyd_client_destroy(ms->kcopyd_client);
1171 destroy_workqueue(ms->kmirrord_wq);
1115 free_context(ms, ti, ms->nr_mirrors); 1172 free_context(ms, ti, ms->nr_mirrors);
1116} 1173}
1117 1174
@@ -1127,7 +1184,7 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1127 spin_unlock(&ms->lock); 1184 spin_unlock(&ms->lock);
1128 1185
1129 if (should_wake) 1186 if (should_wake)
1130 wake(); 1187 wake(ms);
1131} 1188}
1132 1189
1133/* 1190/*
@@ -1222,11 +1279,9 @@ static void mirror_resume(struct dm_target *ti)
1222static int mirror_status(struct dm_target *ti, status_type_t type, 1279static int mirror_status(struct dm_target *ti, status_type_t type,
1223 char *result, unsigned int maxlen) 1280 char *result, unsigned int maxlen)
1224{ 1281{
1225 unsigned int m, sz; 1282 unsigned int m, sz = 0;
1226 struct mirror_set *ms = (struct mirror_set *) ti->private; 1283 struct mirror_set *ms = (struct mirror_set *) ti->private;
1227 1284
1228 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1229
1230 switch (type) { 1285 switch (type) {
1231 case STATUSTYPE_INFO: 1286 case STATUSTYPE_INFO:
1232 DMEMIT("%d ", ms->nr_mirrors); 1287 DMEMIT("%d ", ms->nr_mirrors);
@@ -1237,13 +1292,21 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1237 (unsigned long long)ms->rh.log->type-> 1292 (unsigned long long)ms->rh.log->type->
1238 get_sync_count(ms->rh.log), 1293 get_sync_count(ms->rh.log),
1239 (unsigned long long)ms->nr_regions); 1294 (unsigned long long)ms->nr_regions);
1295
1296 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1297
1240 break; 1298 break;
1241 1299
1242 case STATUSTYPE_TABLE: 1300 case STATUSTYPE_TABLE:
1301 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1302
1243 DMEMIT("%d", ms->nr_mirrors); 1303 DMEMIT("%d", ms->nr_mirrors);
1244 for (m = 0; m < ms->nr_mirrors; m++) 1304 for (m = 0; m < ms->nr_mirrors; m++)
1245 DMEMIT(" %s %llu", ms->mirror[m].dev->name, 1305 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1246 (unsigned long long)ms->mirror[m].offset); 1306 (unsigned long long)ms->mirror[m].offset);
1307
1308 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1309 DMEMIT(" 1 handle_errors");
1247 } 1310 }
1248 1311
1249 return 0; 1312 return 0;
@@ -1251,7 +1314,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1251 1314
1252static struct target_type mirror_target = { 1315static struct target_type mirror_target = {
1253 .name = "mirror", 1316 .name = "mirror",
1254 .version = {1, 0, 2}, 1317 .version = {1, 0, 3},
1255 .module = THIS_MODULE, 1318 .module = THIS_MODULE,
1256 .ctr = mirror_ctr, 1319 .ctr = mirror_ctr,
1257 .dtr = mirror_dtr, 1320 .dtr = mirror_dtr,
@@ -1270,20 +1333,11 @@ static int __init dm_mirror_init(void)
1270 if (r) 1333 if (r)
1271 return r; 1334 return r;
1272 1335
1273 _kmirrord_wq = create_singlethread_workqueue("kmirrord");
1274 if (!_kmirrord_wq) {
1275 DMERR("couldn't start kmirrord");
1276 dm_dirty_log_exit();
1277 return r;
1278 }
1279 INIT_WORK(&_kmirrord_work, do_work);
1280
1281 r = dm_register_target(&mirror_target); 1336 r = dm_register_target(&mirror_target);
1282 if (r < 0) { 1337 if (r < 0) {
1283 DMERR("%s: Failed to register mirror target", 1338 DMERR("%s: Failed to register mirror target",
1284 mirror_target.name); 1339 mirror_target.name);
1285 dm_dirty_log_exit(); 1340 dm_dirty_log_exit();
1286 destroy_workqueue(_kmirrord_wq);
1287 } 1341 }
1288 1342
1289 return r; 1343 return r;
@@ -1297,7 +1351,6 @@ static void __exit dm_mirror_exit(void)
1297 if (r < 0) 1351 if (r < 0)
1298 DMERR("%s: unregister failed %d", mirror_target.name, r); 1352 DMERR("%s: unregister failed %d", mirror_target.name, r);
1299 1353
1300 destroy_workqueue(_kmirrord_wq);
1301 dm_dirty_log_exit(); 1354 dm_dirty_log_exit();
1302} 1355}
1303 1356
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 05befa91807a..2fc199b0016b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -425,13 +425,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md)
425} 425}
426 426
427/* 427/*
428 * If possible (ie. blk_size[major] is set), this checks an area 428 * If possible, this checks an area of a destination device is valid.
429 * of a destination device is valid.
430 */ 429 */
431static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len) 430static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
432{ 431{
433 sector_t dev_size; 432 sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
434 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT; 433
434 if (!dev_size)
435 return 1;
436
435 return ((start < dev_size) && (len <= (dev_size - start))); 437 return ((start < dev_size) && (len <= (dev_size - start)));
436} 438}
437 439
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 11a98df298ec..2717a355dc5b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1236,6 +1236,7 @@ void dm_put(struct mapped_device *md)
1236 free_dev(md); 1236 free_dev(md);
1237 } 1237 }
1238} 1238}
1239EXPORT_SYMBOL_GPL(dm_put);
1239 1240
1240/* 1241/*
1241 * Process the deferred bios 1242 * Process the deferred bios
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index b46f6c575f7e..dbc234e3c69f 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2002 Sistina Software (UK) Limited. 2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 * 6 *
@@ -45,6 +46,8 @@ struct kcopyd_client {
45 unsigned int nr_pages; 46 unsigned int nr_pages;
46 unsigned int nr_free_pages; 47 unsigned int nr_free_pages;
47 48
49 struct dm_io_client *io_client;
50
48 wait_queue_head_t destroyq; 51 wait_queue_head_t destroyq;
49 atomic_t nr_jobs; 52 atomic_t nr_jobs;
50}; 53};
@@ -342,16 +345,20 @@ static void complete_io(unsigned long error, void *context)
342static int run_io_job(struct kcopyd_job *job) 345static int run_io_job(struct kcopyd_job *job)
343{ 346{
344 int r; 347 int r;
348 struct dm_io_request io_req = {
349 .bi_rw = job->rw,
350 .mem.type = DM_IO_PAGE_LIST,
351 .mem.ptr.pl = job->pages,
352 .mem.offset = job->offset,
353 .notify.fn = complete_io,
354 .notify.context = job,
355 .client = job->kc->io_client,
356 };
345 357
346 if (job->rw == READ) 358 if (job->rw == READ)
347 r = dm_io_async(1, &job->source, job->rw, 359 r = dm_io(&io_req, 1, &job->source, NULL);
348 job->pages,
349 job->offset, complete_io, job);
350
351 else 360 else
352 r = dm_io_async(job->num_dests, job->dests, job->rw, 361 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
353 job->pages,
354 job->offset, complete_io, job);
355 362
356 return r; 363 return r;
357} 364}
@@ -670,8 +677,9 @@ int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
670 return r; 677 return r;
671 } 678 }
672 679
673 r = dm_io_get(nr_pages); 680 kc->io_client = dm_io_client_create(nr_pages);
674 if (r) { 681 if (IS_ERR(kc->io_client)) {
682 r = PTR_ERR(kc->io_client);
675 client_free_pages(kc); 683 client_free_pages(kc);
676 kfree(kc); 684 kfree(kc);
677 kcopyd_exit(); 685 kcopyd_exit();
@@ -691,7 +699,7 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
691 /* Wait for completion of all jobs submitted by this client. */ 699 /* Wait for completion of all jobs submitted by this client. */
692 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 700 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
693 701
694 dm_io_put(kc->nr_pages); 702 dm_io_client_destroy(kc->io_client);
695 client_free_pages(kc); 703 client_free_pages(kc);
696 client_del(kc); 704 client_del(kc);
697 kfree(kc); 705 kfree(kc);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2b4315d7e5d6..65814b0340cb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -33,6 +33,7 @@
33*/ 33*/
34 34
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/kernel.h>
36#include <linux/kthread.h> 37#include <linux/kthread.h>
37#include <linux/linkage.h> 38#include <linux/linkage.h>
38#include <linux/raid/md.h> 39#include <linux/raid/md.h>
@@ -273,6 +274,7 @@ static mddev_t * mddev_find(dev_t unit)
273 atomic_set(&new->active, 1); 274 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock); 275 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait); 276 init_waitqueue_head(&new->sb_wait);
277 new->reshape_position = MaxSector;
276 278
277 new->queue = blk_alloc_queue(GFP_KERNEL); 279 new->queue = blk_alloc_queue(GFP_KERNEL);
278 if (!new->queue) { 280 if (!new->queue) {
@@ -589,14 +591,41 @@ abort:
589 return ret; 591 return ret;
590} 592}
591 593
594
595static u32 md_csum_fold(u32 csum)
596{
597 csum = (csum & 0xffff) + (csum >> 16);
598 return (csum & 0xffff) + (csum >> 16);
599}
600
592static unsigned int calc_sb_csum(mdp_super_t * sb) 601static unsigned int calc_sb_csum(mdp_super_t * sb)
593{ 602{
603 u64 newcsum = 0;
604 u32 *sb32 = (u32*)sb;
605 int i;
594 unsigned int disk_csum, csum; 606 unsigned int disk_csum, csum;
595 607
596 disk_csum = sb->sb_csum; 608 disk_csum = sb->sb_csum;
597 sb->sb_csum = 0; 609 sb->sb_csum = 0;
598 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 610
611 for (i = 0; i < MD_SB_BYTES/4 ; i++)
612 newcsum += sb32[i];
613 csum = (newcsum & 0xffffffff) + (newcsum>>32);
614
615
616#ifdef CONFIG_ALPHA
617 /* This used to use csum_partial, which was wrong for several
618 * reasons including that different results are returned on
619 * different architectures. It isn't critical that we get exactly
620 * the same return value as before (we always csum_fold before
621 * testing, and that removes any differences). However as we
622 * know that csum_partial always returned a 16bit value on
623 * alphas, do a fold to maximise conformity to previous behaviour.
624 */
625 sb->sb_csum = md_csum_fold(disk_csum);
626#else
599 sb->sb_csum = disk_csum; 627 sb->sb_csum = disk_csum;
628#endif
600 return csum; 629 return csum;
601} 630}
602 631
@@ -684,7 +713,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
684 if (sb->raid_disks <= 0) 713 if (sb->raid_disks <= 0)
685 goto abort; 714 goto abort;
686 715
687 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 716 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
688 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 717 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
689 b); 718 b);
690 goto abort; 719 goto abort;
@@ -694,6 +723,17 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
694 rdev->data_offset = 0; 723 rdev->data_offset = 0;
695 rdev->sb_size = MD_SB_BYTES; 724 rdev->sb_size = MD_SB_BYTES;
696 725
726 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
727 if (sb->level != 1 && sb->level != 4
728 && sb->level != 5 && sb->level != 6
729 && sb->level != 10) {
730 /* FIXME use a better test */
731 printk(KERN_WARNING
732 "md: bitmaps not supported for this level.\n");
733 goto abort;
734 }
735 }
736
697 if (sb->level == LEVEL_MULTIPATH) 737 if (sb->level == LEVEL_MULTIPATH)
698 rdev->desc_nr = -1; 738 rdev->desc_nr = -1;
699 else 739 else
@@ -792,16 +832,8 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
792 mddev->max_disks = MD_SB_DISKS; 832 mddev->max_disks = MD_SB_DISKS;
793 833
794 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 834 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795 mddev->bitmap_file == NULL) { 835 mddev->bitmap_file == NULL)
796 if (mddev->level != 1 && mddev->level != 4
797 && mddev->level != 5 && mddev->level != 6
798 && mddev->level != 10) {
799 /* FIXME use a better test */
800 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
801 return -EINVAL;
802 }
803 mddev->bitmap_offset = mddev->default_bitmap_offset; 836 mddev->bitmap_offset = mddev->default_bitmap_offset;
804 }
805 837
806 } else if (mddev->pers == NULL) { 838 } else if (mddev->pers == NULL) {
807 /* Insist on good event counter while assembling */ 839 /* Insist on good event counter while assembling */
@@ -1058,6 +1090,18 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1058 bdevname(rdev->bdev,b)); 1090 bdevname(rdev->bdev,b));
1059 return -EINVAL; 1091 return -EINVAL;
1060 } 1092 }
1093 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1094 if (sb->level != cpu_to_le32(1) &&
1095 sb->level != cpu_to_le32(4) &&
1096 sb->level != cpu_to_le32(5) &&
1097 sb->level != cpu_to_le32(6) &&
1098 sb->level != cpu_to_le32(10)) {
1099 printk(KERN_WARNING
1100 "md: bitmaps not supported for this level.\n");
1101 return -EINVAL;
1102 }
1103 }
1104
1061 rdev->preferred_minor = 0xffff; 1105 rdev->preferred_minor = 0xffff;
1062 rdev->data_offset = le64_to_cpu(sb->data_offset); 1106 rdev->data_offset = le64_to_cpu(sb->data_offset);
1063 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1107 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
@@ -1141,14 +1185,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1141 mddev->max_disks = (4096-256)/2; 1185 mddev->max_disks = (4096-256)/2;
1142 1186
1143 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1187 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1144 mddev->bitmap_file == NULL ) { 1188 mddev->bitmap_file == NULL )
1145 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1146 && mddev->level != 10) {
1147 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1148 return -EINVAL;
1149 }
1150 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1189 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1151 } 1190
1152 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1191 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1153 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1192 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1154 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1193 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
@@ -2204,6 +2243,10 @@ static ssize_t
2204layout_show(mddev_t *mddev, char *page) 2243layout_show(mddev_t *mddev, char *page)
2205{ 2244{
2206 /* just a number, not meaningful for all levels */ 2245 /* just a number, not meaningful for all levels */
2246 if (mddev->reshape_position != MaxSector &&
2247 mddev->layout != mddev->new_layout)
2248 return sprintf(page, "%d (%d)\n",
2249 mddev->new_layout, mddev->layout);
2207 return sprintf(page, "%d\n", mddev->layout); 2250 return sprintf(page, "%d\n", mddev->layout);
2208} 2251}
2209 2252
@@ -2212,13 +2255,16 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
2212{ 2255{
2213 char *e; 2256 char *e;
2214 unsigned long n = simple_strtoul(buf, &e, 10); 2257 unsigned long n = simple_strtoul(buf, &e, 10);
2215 if (mddev->pers)
2216 return -EBUSY;
2217 2258
2218 if (!*buf || (*e && *e != '\n')) 2259 if (!*buf || (*e && *e != '\n'))
2219 return -EINVAL; 2260 return -EINVAL;
2220 2261
2221 mddev->layout = n; 2262 if (mddev->pers)
2263 return -EBUSY;
2264 if (mddev->reshape_position != MaxSector)
2265 mddev->new_layout = n;
2266 else
2267 mddev->layout = n;
2222 return len; 2268 return len;
2223} 2269}
2224static struct md_sysfs_entry md_layout = 2270static struct md_sysfs_entry md_layout =
@@ -2230,6 +2276,10 @@ raid_disks_show(mddev_t *mddev, char *page)
2230{ 2276{
2231 if (mddev->raid_disks == 0) 2277 if (mddev->raid_disks == 0)
2232 return 0; 2278 return 0;
2279 if (mddev->reshape_position != MaxSector &&
2280 mddev->delta_disks != 0)
2281 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2282 mddev->raid_disks - mddev->delta_disks);
2233 return sprintf(page, "%d\n", mddev->raid_disks); 2283 return sprintf(page, "%d\n", mddev->raid_disks);
2234} 2284}
2235 2285
@@ -2247,7 +2297,11 @@ raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2247 2297
2248 if (mddev->pers) 2298 if (mddev->pers)
2249 rv = update_raid_disks(mddev, n); 2299 rv = update_raid_disks(mddev, n);
2250 else 2300 else if (mddev->reshape_position != MaxSector) {
2301 int olddisks = mddev->raid_disks - mddev->delta_disks;
2302 mddev->delta_disks = n - olddisks;
2303 mddev->raid_disks = n;
2304 } else
2251 mddev->raid_disks = n; 2305 mddev->raid_disks = n;
2252 return rv ? rv : len; 2306 return rv ? rv : len;
2253} 2307}
@@ -2257,6 +2311,10 @@ __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2257static ssize_t 2311static ssize_t
2258chunk_size_show(mddev_t *mddev, char *page) 2312chunk_size_show(mddev_t *mddev, char *page)
2259{ 2313{
2314 if (mddev->reshape_position != MaxSector &&
2315 mddev->chunk_size != mddev->new_chunk)
2316 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2317 mddev->chunk_size);
2260 return sprintf(page, "%d\n", mddev->chunk_size); 2318 return sprintf(page, "%d\n", mddev->chunk_size);
2261} 2319}
2262 2320
@@ -2267,12 +2325,15 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2267 char *e; 2325 char *e;
2268 unsigned long n = simple_strtoul(buf, &e, 10); 2326 unsigned long n = simple_strtoul(buf, &e, 10);
2269 2327
2270 if (mddev->pers)
2271 return -EBUSY;
2272 if (!*buf || (*e && *e != '\n')) 2328 if (!*buf || (*e && *e != '\n'))
2273 return -EINVAL; 2329 return -EINVAL;
2274 2330
2275 mddev->chunk_size = n; 2331 if (mddev->pers)
2332 return -EBUSY;
2333 else if (mddev->reshape_position != MaxSector)
2334 mddev->new_chunk = n;
2335 else
2336 mddev->chunk_size = n;
2276 return len; 2337 return len;
2277} 2338}
2278static struct md_sysfs_entry md_chunk_size = 2339static struct md_sysfs_entry md_chunk_size =
@@ -2637,8 +2698,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
2637 minor = simple_strtoul(buf, &e, 10); 2698 minor = simple_strtoul(buf, &e, 10);
2638 if (e==buf || (*e && *e != '\n') ) 2699 if (e==buf || (*e && *e != '\n') )
2639 return -EINVAL; 2700 return -EINVAL;
2640 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2701 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2641 super_types[major].name == NULL)
2642 return -ENOENT; 2702 return -ENOENT;
2643 mddev->major_version = major; 2703 mddev->major_version = major;
2644 mddev->minor_version = minor; 2704 mddev->minor_version = minor;
@@ -2859,6 +2919,37 @@ suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2859static struct md_sysfs_entry md_suspend_hi = 2919static struct md_sysfs_entry md_suspend_hi =
2860__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2920__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2861 2921
2922static ssize_t
2923reshape_position_show(mddev_t *mddev, char *page)
2924{
2925 if (mddev->reshape_position != MaxSector)
2926 return sprintf(page, "%llu\n",
2927 (unsigned long long)mddev->reshape_position);
2928 strcpy(page, "none\n");
2929 return 5;
2930}
2931
2932static ssize_t
2933reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
2934{
2935 char *e;
2936 unsigned long long new = simple_strtoull(buf, &e, 10);
2937 if (mddev->pers)
2938 return -EBUSY;
2939 if (buf == e || (*e && *e != '\n'))
2940 return -EINVAL;
2941 mddev->reshape_position = new;
2942 mddev->delta_disks = 0;
2943 mddev->new_level = mddev->level;
2944 mddev->new_layout = mddev->layout;
2945 mddev->new_chunk = mddev->chunk_size;
2946 return len;
2947}
2948
2949static struct md_sysfs_entry md_reshape_position =
2950__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
2951 reshape_position_store);
2952
2862 2953
2863static struct attribute *md_default_attrs[] = { 2954static struct attribute *md_default_attrs[] = {
2864 &md_level.attr, 2955 &md_level.attr,
@@ -2871,6 +2962,7 @@ static struct attribute *md_default_attrs[] = {
2871 &md_new_device.attr, 2962 &md_new_device.attr,
2872 &md_safe_delay.attr, 2963 &md_safe_delay.attr,
2873 &md_array_state.attr, 2964 &md_array_state.attr,
2965 &md_reshape_position.attr,
2874 NULL, 2966 NULL,
2875}; 2967};
2876 2968
@@ -3409,6 +3501,7 @@ static int do_md_stop(mddev_t * mddev, int mode)
3409 mddev->size = 0; 3501 mddev->size = 0;
3410 mddev->raid_disks = 0; 3502 mddev->raid_disks = 0;
3411 mddev->recovery_cp = 0; 3503 mddev->recovery_cp = 0;
3504 mddev->reshape_position = MaxSector;
3412 3505
3413 } else if (mddev->pers) 3506 } else if (mddev->pers)
3414 printk(KERN_INFO "md: %s switched to read-only mode.\n", 3507 printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4019,7 +4112,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4019 if (info->raid_disks == 0) { 4112 if (info->raid_disks == 0) {
4020 /* just setting version number for superblock loading */ 4113 /* just setting version number for superblock loading */
4021 if (info->major_version < 0 || 4114 if (info->major_version < 0 ||
4022 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 4115 info->major_version >= ARRAY_SIZE(super_types) ||
4023 super_types[info->major_version].name == NULL) { 4116 super_types[info->major_version].name == NULL) {
4024 /* maybe try to auto-load a module? */ 4117 /* maybe try to auto-load a module? */
4025 printk(KERN_INFO 4118 printk(KERN_INFO
@@ -4941,15 +5034,6 @@ static int md_seq_open(struct inode *inode, struct file *file)
4941 return error; 5034 return error;
4942} 5035}
4943 5036
4944static int md_seq_release(struct inode *inode, struct file *file)
4945{
4946 struct seq_file *m = file->private_data;
4947 struct mdstat_info *mi = m->private;
4948 m->private = NULL;
4949 kfree(mi);
4950 return seq_release(inode, file);
4951}
4952
4953static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 5037static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4954{ 5038{
4955 struct seq_file *m = filp->private_data; 5039 struct seq_file *m = filp->private_data;
@@ -4971,7 +5055,7 @@ static const struct file_operations md_seq_fops = {
4971 .open = md_seq_open, 5055 .open = md_seq_open,
4972 .read = seq_read, 5056 .read = seq_read,
4973 .llseek = seq_lseek, 5057 .llseek = seq_lseek,
4974 .release = md_seq_release, 5058 .release = seq_release_private,
4975 .poll = mdstat_poll, 5059 .poll = mdstat_poll,
4976}; 5060};
4977 5061
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 97ee870b265d..3a95cc5e029c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -271,21 +271,25 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
271 */ 271 */
272 update_head_pos(mirror, r1_bio); 272 update_head_pos(mirror, r1_bio);
273 273
274 if (uptodate || (conf->raid_disks - conf->mddev->degraded) <= 1) { 274 if (uptodate)
275 /* 275 set_bit(R1BIO_Uptodate, &r1_bio->state);
276 * Set R1BIO_Uptodate in our master bio, so that 276 else {
277 * we will return a good error code for to the higher 277 /* If all other devices have failed, we want to return
278 * levels even if IO on some other mirrored buffer fails. 278 * the error upwards rather than fail the last device.
279 * 279 * Here we redefine "uptodate" to mean "Don't want to retry"
280 * The 'master' represents the composite IO operation to
281 * user-side. So if something waits for IO, then it will
282 * wait for the 'master' bio.
283 */ 280 */
284 if (uptodate) 281 unsigned long flags;
285 set_bit(R1BIO_Uptodate, &r1_bio->state); 282 spin_lock_irqsave(&conf->device_lock, flags);
283 if (r1_bio->mddev->degraded == conf->raid_disks ||
284 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
285 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
286 uptodate = 1;
287 spin_unlock_irqrestore(&conf->device_lock, flags);
288 }
286 289
290 if (uptodate)
287 raid_end_bio_io(r1_bio); 291 raid_end_bio_io(r1_bio);
288 } else { 292 else {
289 /* 293 /*
290 * oops, read error: 294 * oops, read error:
291 */ 295 */
@@ -992,13 +996,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
992 unsigned long flags; 996 unsigned long flags;
993 spin_lock_irqsave(&conf->device_lock, flags); 997 spin_lock_irqsave(&conf->device_lock, flags);
994 mddev->degraded++; 998 mddev->degraded++;
999 set_bit(Faulty, &rdev->flags);
995 spin_unlock_irqrestore(&conf->device_lock, flags); 1000 spin_unlock_irqrestore(&conf->device_lock, flags);
996 /* 1001 /*
997 * if recovery is running, make sure it aborts. 1002 * if recovery is running, make sure it aborts.
998 */ 1003 */
999 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 1004 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
1000 } 1005 } else
1001 set_bit(Faulty, &rdev->flags); 1006 set_bit(Faulty, &rdev->flags);
1002 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1007 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1003 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n" 1008 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
1004 " Operation continuing on %d devices\n", 1009 " Operation continuing on %d devices\n",
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8d59914f2057..061375ee6592 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -353,8 +353,8 @@ static int grow_stripes(raid5_conf_t *conf, int num)
353 struct kmem_cache *sc; 353 struct kmem_cache *sc;
354 int devs = conf->raid_disks; 354 int devs = conf->raid_disks;
355 355
356 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); 356 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev));
357 sprintf(conf->cache_name[1], "raid5/%s-alt", mdname(conf->mddev)); 357 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev));
358 conf->active_name = 0; 358 conf->active_name = 0;
359 sc = kmem_cache_create(conf->cache_name[conf->active_name], 359 sc = kmem_cache_create(conf->cache_name[conf->active_name],
360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 360 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 91d25798ae4a..3a80e0cc7369 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Multimedia devices" 5menu "Multimedia devices"
6 depends on HAS_IOMEM
6 7
7config VIDEO_DEV 8config VIDEO_DEV
8 tristate "Video For Linux" 9 tristate "Video For Linux"
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
index 68ed3a788083..9200a30dd1b9 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c
@@ -3,7 +3,7 @@
3 * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) 3 * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
4 * see dvb-usb-init.c for copyright information. 4 * see dvb-usb-init.c for copyright information.
5 * 5 *
6 * This file contains functions for initializing the the input-device and for handling remote-control-queries. 6 * This file contains functions for initializing the input-device and for handling remote-control-queries.
7 */ 7 */
8#include "dvb-usb-common.h" 8#include "dvb-usb-common.h"
9#include <linux/usb/input.h> 9#include <linux/usb/input.h>
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index f5d40aa3d27f..f64546c6aeb5 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -266,7 +266,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state)
266{ 266{
267 267
268/* internal */ 268/* internal */
269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth 269// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0)); 270 dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096 271 dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
272 272
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0349a4b5da3f..aece458cfe12 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -223,7 +223,7 @@ static int dib7000p_set_bandwidth(struct dvb_frontend *demod, u8 BW_Idx)
223static int dib7000p_sad_calib(struct dib7000p_state *state) 223static int dib7000p_sad_calib(struct dib7000p_state *state)
224{ 224{
225/* internal */ 225/* internal */
226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth 226// dib7000p_write_word(state, 72, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is written in set_bandwidth
227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0)); 227 dib7000p_write_word(state, 73, (0 << 1) | (0 << 0));
228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096 228 dib7000p_write_word(state, 74, 776); // 0.625*3.3 / 4096
229 229
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 110536843e8e..e725f612a6b7 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -1,6 +1,6 @@
1/* 1/*
2 TDA10021 - Single Chip Cable Channel Receiver driver module 2 TDA10021 - Single Chip Cable Channel Receiver driver module
3 used on the the Siemens DVB-C cards 3 used on the Siemens DVB-C cards
4 4
5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de> 5 Copyright (C) 1999 Convergence Integrated Media GmbH <ralph@convergence.de>
6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de> 6 Copyright (C) 2004 Markus Schulz <msc@antzsystem.de>
diff --git a/drivers/media/dvb/frontends/ves1x93.c b/drivers/media/dvb/frontends/ves1x93.c
index 54d7b07571b8..23fd0303c91b 100644
--- a/drivers/media/dvb/frontends/ves1x93.c
+++ b/drivers/media/dvb/frontends/ves1x93.c
@@ -306,7 +306,7 @@ static int ves1x93_read_status(struct dvb_frontend* fe, fe_status_t* status)
306 * The ves1893 sometimes returns sync values that make no sense, 306 * The ves1893 sometimes returns sync values that make no sense,
307 * because, e.g., the SIGNAL bit is 0, while some of the higher 307 * because, e.g., the SIGNAL bit is 0, while some of the higher
308 * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?). 308 * bits are 1 (and how can there be a CARRIER w/o a SIGNAL?).
309 * Tests showed that the the VITERBI and SYNC bits are returned 309 * Tests showed that the VITERBI and SYNC bits are returned
310 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong. 310 * reliably, while the SIGNAL and CARRIER bits ar sometimes wrong.
311 * If such a case occurs, we read the value again, until we get a 311 * If such a case occurs, we read the value again, until we get a
312 * valid value. 312 * valid value.
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 563a8319e608..54ccc6e1f92e 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -70,7 +70,7 @@ static int em2800_i2c_send_max4(struct em28xx *dev, unsigned char addr,
70 70
71 ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len); 71 ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
72 if (ret != 2 + len) { 72 if (ret != 2 + len) {
73 em28xx_warn("writting to i2c device failed (error=%i)\n", ret); 73 em28xx_warn("writing to i2c device failed (error=%i)\n", ret);
74 return -EIO; 74 return -EIO;
75 } 75 }
76 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0; 76 for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index bec67609500f..2c7b158ce7e1 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -1729,7 +1729,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
1729 1729
1730 endpoint = &interface->cur_altsetting->endpoint[1].desc; 1730 endpoint = &interface->cur_altsetting->endpoint[1].desc;
1731 1731
1732 /* check if the the device has the iso in endpoint at the correct place */ 1732 /* check if the device has the iso in endpoint at the correct place */
1733 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 1733 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
1734 USB_ENDPOINT_XFER_ISOC) { 1734 USB_ENDPOINT_XFER_ISOC) {
1735 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n"); 1735 em28xx_err(DRIVER_NAME " probing error: endpoint is non-ISO endpoint!\n");
diff --git a/drivers/media/video/pwc/philips.txt b/drivers/media/video/pwc/philips.txt
index f5e848410311..f9f3584281d8 100644
--- a/drivers/media/video/pwc/philips.txt
+++ b/drivers/media/video/pwc/philips.txt
@@ -54,9 +54,9 @@ fps
54 Specifies the desired framerate. Is an integer in the range of 4-30. 54 Specifies the desired framerate. Is an integer in the range of 4-30.
55 55
56fbufs 56fbufs
57 This paramter specifies the number of internal buffers to use for storing 57 This parameter specifies the number of internal buffers to use for storing
58 frames from the cam. This will help if the process that reads images from 58 frames from the cam. This will help if the process that reads images from
59 the cam is a bit slow or momentarely busy. However, on slow machines it 59 the cam is a bit slow or momentarily busy. However, on slow machines it
60 only introduces lag, so choose carefully. The default is 3, which is 60 only introduces lag, so choose carefully. The default is 3, which is
61 reasonable. You can set it between 2 and 5. 61 reasonable. You can set it between 2 and 5.
62 62
@@ -209,7 +209,7 @@ trace
209 209
210 128 0x80 PWCX debugging Off 210 128 0x80 PWCX debugging Off
211 211
212 For example, to trace the open() & read() fuctions, sum 8 + 4 = 12, 212 For example, to trace the open() & read() functions, sum 8 + 4 = 12,
213 so you would supply trace=12 during insmod or modprobe. If 213 so you would supply trace=12 during insmod or modprobe. If
214 you want to turn the initialization and probing tracing off, set trace=0. 214 you want to turn the initialization and probing tracing off, set trace=0.
215 The default value for trace is 35 (0x23). 215 The default value for trace is 35 (0x23).
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 876fd2768242..982b115193f8 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -28,7 +28,7 @@
28 * 28 *
29 * Portions of this code were also copied from usbvideo.c 29 * Portions of this code were also copied from usbvideo.c
30 * 30 *
31 * Special thanks to the the whole team at Sourceforge for help making 31 * Special thanks to the whole team at Sourceforge for help making
32 * this driver become a reality. Notably: 32 * this driver become a reality. Notably:
33 * Andy Armstrong who reverse engineered the color encoding and 33 * Andy Armstrong who reverse engineered the color encoding and
34 * Pavel Machek and Chris Cheney who worked on reverse engineering the 34 * Pavel Machek and Chris Cheney who worked on reverse engineering the
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 71037f91c222..c88cc75ab49b 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -1,5 +1,6 @@
1 1
2menu "Fusion MPT device support" 2menu "Fusion MPT device support"
3 depends on PCI
3 4
4config FUSION 5config FUSION
5 bool 6 bool
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index d6b4c607453b..ddc7ae029dd3 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -571,7 +571,7 @@ mpi_fc.h
571 * 11-02-00 01.01.01 Original release for post 1.0 work 571 * 11-02-00 01.01.01 Original release for post 1.0 work
572 * 12-04-00 01.01.02 Added messages for Common Transport Send and 572 * 12-04-00 01.01.02 Added messages for Common Transport Send and
573 * Primitive Send. 573 * Primitive Send.
574 * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix 574 * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
575 * and modified the FcPrimitiveSend flags. 575 * and modified the FcPrimitiveSend flags.
576 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger 576 * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
577 * field. 577 * field.
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 97471af4309c..5021d1a2a1d4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -3585,7 +3585,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
3585 * index = chain_idx 3585 * index = chain_idx
3586 * 3586 *
3587 * Calculate the number of chain buffers needed(plus 1) per I/O 3587 * Calculate the number of chain buffers needed(plus 1) per I/O
3588 * then multiply the the maximum number of simultaneous cmds 3588 * then multiply the maximum number of simultaneous cmds
3589 * 3589 *
3590 * num_sge = num sge in request frame + last chain buffer 3590 * num_sge = num sge in request frame + last chain buffer
3591 * scale = num sge per chain buffer if no chain element 3591 * scale = num sge per chain buffer if no chain element
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 6443392bffff..f4ac21e5771e 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -1,5 +1,6 @@
1 1
2menu "I2O device support" 2menu "I2O device support"
3 depends on PCI
3 4
4config I2O 5config I2O
5 tristate "I2O support" 6 tristate "I2O support"
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ab6e985275b2..a20a51efe118 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Multifunction device drivers" 5menu "Multifunction device drivers"
6 depends on HAS_IOMEM
6 7
7config MFD_SM501 8config MFD_SM501
8 tristate "Support for Silicon Motion SM501" 9 tristate "Support for Silicon Motion SM501"
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 877e7909a0e5..2f2fbffafbe0 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -130,7 +130,7 @@ config SONY_LAPTOP
130 130
131 Read <file:Documentation/sony-laptop.txt> for more information. 131 Read <file:Documentation/sony-laptop.txt> for more information.
132 132
133config SONY_LAPTOP_OLD 133config SONYPI_COMPAT
134 bool "Sonypi compatibility" 134 bool "Sonypi compatibility"
135 depends on SONY_LAPTOP 135 depends on SONY_LAPTOP
136 ---help--- 136 ---help---
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 65c32a95e121..4f9060a2a2f2 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -30,7 +30,7 @@
30 * Eric Burghard - LED display support for W1N 30 * Eric Burghard - LED display support for W1N
31 * Josh Green - Light Sens support 31 * Josh Green - Light Sens support
32 * Thomas Tuttle - His first patch for led support was very helpfull 32 * Thomas Tuttle - His first patch for led support was very helpfull
33 * 33 * Sam Lin - GPS support
34 */ 34 */
35 35
36#include <linux/autoconf.h> 36#include <linux/autoconf.h>
@@ -48,7 +48,7 @@
48#include <acpi/acpi_bus.h> 48#include <acpi/acpi_bus.h>
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#define ASUS_LAPTOP_VERSION "0.41" 51#define ASUS_LAPTOP_VERSION "0.42"
52 52
53#define ASUS_HOTK_NAME "Asus Laptop Support" 53#define ASUS_HOTK_NAME "Asus Laptop Support"
54#define ASUS_HOTK_CLASS "hotkey" 54#define ASUS_HOTK_CLASS "hotkey"
@@ -83,6 +83,7 @@
83#define PLED_ON 0x20 //Phone LED 83#define PLED_ON 0x20 //Phone LED
84#define GLED_ON 0x40 //Gaming LED 84#define GLED_ON 0x40 //Gaming LED
85#define LCD_ON 0x80 //LCD backlight 85#define LCD_ON 0x80 //LCD backlight
86#define GPS_ON 0x100 //GPS
86 87
87#define ASUS_LOG ASUS_HOTK_FILE ": " 88#define ASUS_LOG ASUS_HOTK_FILE ": "
88#define ASUS_ERR KERN_ERR ASUS_LOG 89#define ASUS_ERR KERN_ERR ASUS_LOG
@@ -148,7 +149,7 @@ ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
148ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G 149ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
149 M6A M6V VX-1 V6J V6V W3Z */ 150 M6A M6V VX-1 V6J V6V W3Z */
150 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V 151 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
151 S5A M5A z33A W1Jc W2V */ 152 S5A M5A z33A W1Jc W2V G1 */
152 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */ 153 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
153 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */ 154 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
154 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */ 155 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
@@ -162,6 +163,12 @@ ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L
162ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */ 163ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
163ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */ 164ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
164 165
166/* GPS */
167/* R2H use different handle for GPS on/off */
168ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
169ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
170ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
171
165/* 172/*
166 * This is the main structure, we can use it to store anything interesting 173 * This is the main structure, we can use it to store anything interesting
167 * about the hotk device 174 * about the hotk device
@@ -278,12 +285,28 @@ static int read_wireless_status(int mask)
278 return (hotk->status & mask) ? 1 : 0; 285 return (hotk->status & mask) ? 1 : 0;
279} 286}
280 287
288static int read_gps_status(void)
289{
290 ulong status;
291 acpi_status rv = AE_OK;
292
293 rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
294 if (ACPI_FAILURE(rv))
295 printk(ASUS_WARNING "Error reading GPS status\n");
296 else
297 return status ? 1 : 0;
298
299 return (hotk->status & GPS_ON) ? 1 : 0;
300}
301
281/* Generic LED functions */ 302/* Generic LED functions */
282static int read_status(int mask) 303static int read_status(int mask)
283{ 304{
284 /* There is a special method for both wireless devices */ 305 /* There is a special method for both wireless devices */
285 if (mask == BT_ON || mask == WL_ON) 306 if (mask == BT_ON || mask == WL_ON)
286 return read_wireless_status(mask); 307 return read_wireless_status(mask);
308 else if (mask == GPS_ON)
309 return read_gps_status();
287 310
288 return (hotk->status & mask) ? 1 : 0; 311 return (hotk->status & mask) ? 1 : 0;
289} 312}
@@ -299,6 +322,10 @@ static void write_status(acpi_handle handle, int out, int mask)
299 case GLED_ON: 322 case GLED_ON:
300 out = (out & 0x1) + 1; 323 out = (out & 0x1) + 1;
301 break; 324 break;
325 case GPS_ON:
326 handle = (out) ? gps_on_handle : gps_off_handle;
327 out = 0x02;
328 break;
302 default: 329 default:
303 out &= 0x1; 330 out &= 0x1;
304 break; 331 break;
@@ -667,6 +694,21 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
667 return rv; 694 return rv;
668} 695}
669 696
697/*
698 * GPS
699 */
700static ssize_t show_gps(struct device *dev,
701 struct device_attribute *attr, char *buf)
702{
703 return sprintf(buf, "%d\n", read_status(GPS_ON));
704}
705
706static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
707 const char *buf, size_t count)
708{
709 return store_status(buf, count, NULL, GPS_ON);
710}
711
670static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) 712static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
671{ 713{
672 /* TODO Find a better way to handle events count. */ 714 /* TODO Find a better way to handle events count. */
@@ -715,6 +757,7 @@ static ASUS_CREATE_DEVICE_ATTR(display);
715static ASUS_CREATE_DEVICE_ATTR(ledd); 757static ASUS_CREATE_DEVICE_ATTR(ledd);
716static ASUS_CREATE_DEVICE_ATTR(ls_switch); 758static ASUS_CREATE_DEVICE_ATTR(ls_switch);
717static ASUS_CREATE_DEVICE_ATTR(ls_level); 759static ASUS_CREATE_DEVICE_ATTR(ls_level);
760static ASUS_CREATE_DEVICE_ATTR(gps);
718 761
719static struct attribute *asuspf_attributes[] = { 762static struct attribute *asuspf_attributes[] = {
720 &dev_attr_infos.attr, 763 &dev_attr_infos.attr,
@@ -724,6 +767,7 @@ static struct attribute *asuspf_attributes[] = {
724 &dev_attr_ledd.attr, 767 &dev_attr_ledd.attr,
725 &dev_attr_ls_switch.attr, 768 &dev_attr_ls_switch.attr,
726 &dev_attr_ls_level.attr, 769 &dev_attr_ls_level.attr,
770 &dev_attr_gps.attr,
727 NULL 771 NULL
728}; 772};
729 773
@@ -763,6 +807,9 @@ static void asus_hotk_add_fs(void)
763 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl); 807 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
764 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw); 808 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
765 } 809 }
810
811 if (gps_status_handle && gps_on_handle && gps_off_handle)
812 ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
766} 813}
767 814
768static int asus_handle_init(char *name, acpi_handle * handle, 815static int asus_handle_init(char *name, acpi_handle * handle,
@@ -890,9 +937,13 @@ static int asus_hotk_get_info(void)
890 937
891 /* There is a lot of models with "ALSL", but a few get 938 /* There is a lot of models with "ALSL", but a few get
892 a real light sens, so we need to check it. */ 939 a real light sens, so we need to check it. */
893 if (ASUS_HANDLE_INIT(ls_switch)) 940 if (!ASUS_HANDLE_INIT(ls_switch))
894 ASUS_HANDLE_INIT(ls_level); 941 ASUS_HANDLE_INIT(ls_level);
895 942
943 ASUS_HANDLE_INIT(gps_on);
944 ASUS_HANDLE_INIT(gps_off);
945 ASUS_HANDLE_INIT(gps_status);
946
896 kfree(model); 947 kfree(model);
897 948
898 return AE_OK; 949 return AE_OK;
@@ -950,7 +1001,7 @@ static int asus_hotk_add(struct acpi_device *device)
950 * We install the handler, it will receive the hotk in parameter, so, we 1001 * We install the handler, it will receive the hotk in parameter, so, we
951 * could add other data to the hotk struct 1002 * could add other data to the hotk struct
952 */ 1003 */
953 status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, 1004 status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
954 asus_hotk_notify, hotk); 1005 asus_hotk_notify, hotk);
955 if (ACPI_FAILURE(status)) 1006 if (ACPI_FAILURE(status))
956 printk(ASUS_ERR "Error installing notify handler\n"); 1007 printk(ASUS_ERR "Error installing notify handler\n");
@@ -981,6 +1032,9 @@ static int asus_hotk_add(struct acpi_device *device)
981 if (ls_level_handle) 1032 if (ls_level_handle)
982 set_light_sens_level(hotk->light_level); 1033 set_light_sens_level(hotk->light_level);
983 1034
1035 /* GPS is on by default */
1036 write_status(NULL, 1, GPS_ON);
1037
984 end: 1038 end:
985 if (result) { 1039 if (result) {
986 kfree(hotk->name); 1040 kfree(hotk->name);
@@ -997,7 +1051,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
997 if (!device || !acpi_driver_data(device)) 1051 if (!device || !acpi_driver_data(device))
998 return -EINVAL; 1052 return -EINVAL;
999 1053
1000 status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, 1054 status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
1001 asus_hotk_notify); 1055 asus_hotk_notify);
1002 if (ACPI_FAILURE(status)) 1056 if (ACPI_FAILURE(status))
1003 printk(ASUS_ERR "Error removing notify handler\n"); 1057 printk(ASUS_ERR "Error removing notify handler\n");
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index 68c4b58525ba..41e901f53e7c 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -85,7 +85,7 @@ static int set_lcd_level(int level)
85 buf[0] = 0x80; 85 buf[0] = 0x80;
86 buf[1] = (u8) (level*31); 86 buf[1] = (u8) (level*31);
87 87
88 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0); 88 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1);
89} 89}
90 90
91static int get_lcd_level(void) 91static int get_lcd_level(void)
@@ -93,7 +93,7 @@ static int get_lcd_level(void)
93 u8 wdata = 0, rdata; 93 u8 wdata = 0, rdata;
94 int result; 94 int result;
95 95
96 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1); 96 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
97 if (result < 0) 97 if (result < 0)
98 return result; 98 return result;
99 99
@@ -105,7 +105,7 @@ static int get_auto_brightness(void)
105 u8 wdata = 4, rdata; 105 u8 wdata = 4, rdata;
106 int result; 106 int result;
107 107
108 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1); 108 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
109 if (result < 0) 109 if (result < 0)
110 return result; 110 return result;
111 111
@@ -119,14 +119,14 @@ static int set_auto_brightness(int enable)
119 119
120 wdata[0] = 4; 120 wdata[0] = 4;
121 121
122 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1); 122 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1);
123 if (result < 0) 123 if (result < 0)
124 return result; 124 return result;
125 125
126 wdata[0] = 0x84; 126 wdata[0] = 0x84;
127 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); 127 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
128 128
129 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0); 129 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1);
130} 130}
131 131
132static int get_wireless_state(int *wlan, int *bluetooth) 132static int get_wireless_state(int *wlan, int *bluetooth)
@@ -134,7 +134,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
134 u8 wdata = 0, rdata; 134 u8 wdata = 0, rdata;
135 int result; 135 int result;
136 136
137 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1); 137 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
138 if (result < 0) 138 if (result < 0)
139 return -1; 139 return -1;
140 140
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index c15c1f61bd1b..8ee0321ef1c8 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -63,7 +63,7 @@
63#include <asm/uaccess.h> 63#include <asm/uaccess.h>
64#include <linux/sonypi.h> 64#include <linux/sonypi.h>
65#include <linux/sony-laptop.h> 65#include <linux/sony-laptop.h>
66#ifdef CONFIG_SONY_LAPTOP_OLD 66#ifdef CONFIG_SONYPI_COMPAT
67#include <linux/poll.h> 67#include <linux/poll.h>
68#include <linux/miscdevice.h> 68#include <linux/miscdevice.h>
69#endif 69#endif
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(camera,
114 "set this to 1 to enable Motion Eye camera controls " 114 "set this to 1 to enable Motion Eye camera controls "
115 "(only use it if you have a C1VE or C1VN model)"); 115 "(only use it if you have a C1VE or C1VN model)");
116 116
117#ifdef CONFIG_SONY_LAPTOP_OLD 117#ifdef CONFIG_SONYPI_COMPAT
118static int minor = -1; 118static int minor = -1;
119module_param(minor, int, 0); 119module_param(minor, int, 0);
120MODULE_PARM_DESC(minor, 120MODULE_PARM_DESC(minor,
@@ -1504,7 +1504,7 @@ static struct attribute_group spic_attribute_group = {
1504}; 1504};
1505 1505
1506/******** SONYPI compatibility **********/ 1506/******** SONYPI compatibility **********/
1507#ifdef CONFIG_SONY_LAPTOP_OLD 1507#ifdef CONFIG_SONYPI_COMPAT
1508 1508
1509/* battery / brightness / temperature addresses */ 1509/* battery / brightness / temperature addresses */
1510#define SONYPI_BAT_FLAGS 0x81 1510#define SONYPI_BAT_FLAGS 0x81
@@ -1798,7 +1798,7 @@ static void sonypi_compat_exit(void)
1798static int sonypi_compat_init(void) { return 0; } 1798static int sonypi_compat_init(void) { return 0; }
1799static void sonypi_compat_exit(void) { } 1799static void sonypi_compat_exit(void) { }
1800static void sonypi_compat_report_event(u8 event) { } 1800static void sonypi_compat_report_event(u8 event) { }
1801#endif /* CONFIG_SONY_LAPTOP_OLD */ 1801#endif /* CONFIG_SONYPI_COMPAT */
1802 1802
1803/* 1803/*
1804 * ACPI callbacks 1804 * ACPI callbacks
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 45b7d53b949c..c0b41e8bcd9d 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig MMC 5menuconfig MMC
6 tristate "MMC/SD card support" 6 tristate "MMC/SD card support"
7 depends on HAS_IOMEM
7 help 8 help
8 MMC is the "multi-media card" bus protocol. 9 MMC is the "multi-media card" bus protocol.
9 10
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index b6c16704aaab..7385acfa1dd9 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -501,9 +501,9 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
501{ 501{
502#ifdef CONFIG_MMC_DEBUG 502#ifdef CONFIG_MMC_DEBUG
503 unsigned long flags; 503 unsigned long flags;
504 spin_lock_irqsave(host->lock, flags); 504 spin_lock_irqsave(&host->lock, flags);
505 BUG_ON(host->removed); 505 BUG_ON(host->removed);
506 spin_unlock_irqrestore(host->lock, flags); 506 spin_unlock_irqrestore(&host->lock, flags);
507#endif 507#endif
508 508
509 mmc_schedule_delayed_work(&host->detect, delay); 509 mmc_schedule_delayed_work(&host->detect, delay);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index c1b47db29bd2..fbec8cd55e38 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -2,6 +2,7 @@
2 2
3menuconfig MTD 3menuconfig MTD
4 tristate "Memory Technology Device (MTD) support" 4 tristate "Memory Technology Device (MTD) support"
5 depends on HAS_IOMEM
5 help 6 help
6 Memory Technology Devices are flash, RAM and similar chips, often 7 Memory Technology Devices are flash, RAM and similar chips, often
7 used for solid state file systems on embedded devices. This option 8 used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index d28e0fc85e12..479d32b57a1e 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -1,5 +1,4 @@
1# drivers/mtd/chips/Kconfig 1# drivers/mtd/chips/Kconfig
2# $Id: Kconfig,v 1.18 2005/11/07 11:14:22 gleixner Exp $
3 2
4menu "RAM/ROM/Flash chip drivers" 3menu "RAM/ROM/Flash chip drivers"
5 depends on MTD!=n 4 depends on MTD!=n
@@ -231,45 +230,6 @@ config MTD_ABSENT
231 the system regardless of media presence. Device nodes created 230 the system regardless of media presence. Device nodes created
232 with this driver will return -ENODEV upon access. 231 with this driver will return -ENODEV upon access.
233 232
234config MTD_OBSOLETE_CHIPS
235 bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
236 help
237 This option does not enable any code directly, but will allow you to
238 select some other chip drivers which are now considered obsolete,
239 because the generic CONFIG_JEDECPROBE code above should now detect
240 the chips which are supported by these drivers, and allow the generic
241 CFI-compatible drivers to drive the chips. Say 'N' here unless you have
242 already tried the CONFIG_JEDECPROBE method and reported its failure
243 to the MTD mailing list at <linux-mtd@lists.infradead.org>
244
245config MTD_AMDSTD
246 tristate "AMD compatible flash chip support (non-CFI)"
247 depends on MTD_OBSOLETE_CHIPS && BROKEN
248 help
249 This option enables support for flash chips using AMD-compatible
250 commands, including some which are not CFI-compatible and hence
251 cannot be used with the CONFIG_MTD_CFI_AMDSTD option.
252
253 It also works on AMD compatible chips that do conform to CFI.
254
255config MTD_SHARP
256 tristate "pre-CFI Sharp chip support"
257 depends on MTD_OBSOLETE_CHIPS
258 help
259 This option enables support for flash chips using Sharp-compatible
260 commands, including some which are not CFI-compatible and hence
261 cannot be used with the CONFIG_MTD_CFI_INTELxxx options.
262
263config MTD_JEDEC
264 tristate "JEDEC device support"
265 depends on MTD_OBSOLETE_CHIPS && BROKEN
266 help
267 Enable older JEDEC flash interface devices for self
268 programming flash. It is commonly used in older AMD chips. It is
269 only called JEDEC because the JEDEC association
270 <http://www.jedec.org/> distributes the identification codes for the
271 chips.
272
273config MTD_XIP 233config MTD_XIP
274 bool "XIP aware MTD support" 234 bool "XIP aware MTD support"
275 depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP 235 depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARCH_MTD_XIP
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 75bc1c2a0f43..36582412ccda 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -1,19 +1,15 @@
1# 1#
2# linux/drivers/chips/Makefile 2# linux/drivers/chips/Makefile
3# 3#
4# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
5 4
6obj-$(CONFIG_MTD) += chipreg.o 5obj-$(CONFIG_MTD) += chipreg.o
7obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
8obj-$(CONFIG_MTD_CFI) += cfi_probe.o 6obj-$(CONFIG_MTD_CFI) += cfi_probe.o
9obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o 7obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
10obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o 8obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
11obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o 9obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
12obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o 10obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
13obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o 11obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
14obj-$(CONFIG_MTD_JEDEC) += jedec.o
15obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o 12obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
16obj-$(CONFIG_MTD_RAM) += map_ram.o 13obj-$(CONFIG_MTD_RAM) += map_ram.o
17obj-$(CONFIG_MTD_ROM) += map_rom.o 14obj-$(CONFIG_MTD_ROM) += map_rom.o
18obj-$(CONFIG_MTD_SHARP) += sharp.o
19obj-$(CONFIG_MTD_ABSENT) += map_absent.o 15obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
deleted file mode 100644
index e7999f15d85a..000000000000
--- a/drivers/mtd/chips/amd_flash.c
+++ /dev/null
@@ -1,1396 +0,0 @@
1/*
2 * MTD map driver for AMD compatible flash chips (non-CFI)
3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 *
6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7 *
8 * Copyright (c) 2001 Axis Communications AB
9 *
10 * This file is under GPL.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/mtd/map.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/flashchip.h>
26
27/* There's no limit. It exists only to avoid realloc. */
28#define MAX_AMD_CHIPS 8
29
30#define DEVICE_TYPE_X8 (8 / 8)
31#define DEVICE_TYPE_X16 (16 / 8)
32#define DEVICE_TYPE_X32 (32 / 8)
33
34/* Addresses */
35#define ADDR_MANUFACTURER 0x0000
36#define ADDR_DEVICE_ID 0x0001
37#define ADDR_SECTOR_LOCK 0x0002
38#define ADDR_HANDSHAKE 0x0003
39#define ADDR_UNLOCK_1 0x0555
40#define ADDR_UNLOCK_2 0x02AA
41
42/* Commands */
43#define CMD_UNLOCK_DATA_1 0x00AA
44#define CMD_UNLOCK_DATA_2 0x0055
45#define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46#define CMD_UNLOCK_BYPASS_MODE 0x0020
47#define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48#define CMD_RESET_DATA 0x00F0
49#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
51
52#define CMD_UNLOCK_SECTOR 0x0060
53
54/* Manufacturers */
55#define MANUFACTURER_AMD 0x0001
56#define MANUFACTURER_ATMEL 0x001F
57#define MANUFACTURER_FUJITSU 0x0004
58#define MANUFACTURER_ST 0x0020
59#define MANUFACTURER_SST 0x00BF
60#define MANUFACTURER_TOSHIBA 0x0098
61
62/* AMD */
63#define AM29F800BB 0x2258
64#define AM29F800BT 0x22D6
65#define AM29LV800BB 0x225B
66#define AM29LV800BT 0x22DA
67#define AM29LV160DT 0x22C4
68#define AM29LV160DB 0x2249
69#define AM29BDS323D 0x22D1
70
71/* Atmel */
72#define AT49xV16x 0x00C0
73#define AT49xV16xT 0x00C2
74
75/* Fujitsu */
76#define MBM29LV160TE 0x22C4
77#define MBM29LV160BE 0x2249
78#define MBM29LV800BB 0x225B
79
80/* ST - www.st.com */
81#define M29W800T 0x00D7
82#define M29W160DT 0x22C4
83#define M29W160DB 0x2249
84
85/* SST */
86#define SST39LF800 0x2781
87#define SST39LF160 0x2782
88
89/* Toshiba */
90#define TC58FVT160 0x00C2
91#define TC58FVB160 0x0043
92
93#define D6_MASK 0x40
94
95struct amd_flash_private {
96 int device_type;
97 int interleave;
98 int numchips;
99 unsigned long chipshift;
100 struct flchip chips[0];
101};
102
103struct amd_flash_info {
104 const __u16 mfr_id;
105 const __u16 dev_id;
106 const char *name;
107 const u_long size;
108 const int numeraseregions;
109 const struct mtd_erase_region_info regions[4];
110};
111
112
113
114static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
115 u_char *);
116static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
117 const u_char *);
118static int amd_flash_erase(struct mtd_info *, struct erase_info *);
119static void amd_flash_sync(struct mtd_info *);
120static int amd_flash_suspend(struct mtd_info *);
121static void amd_flash_resume(struct mtd_info *);
122static void amd_flash_destroy(struct mtd_info *);
123static struct mtd_info *amd_flash_probe(struct map_info *map);
124
125
126static struct mtd_chip_driver amd_flash_chipdrv = {
127 .probe = amd_flash_probe,
128 .destroy = amd_flash_destroy,
129 .name = "amd_flash",
130 .module = THIS_MODULE
131};
132
133static inline __u32 wide_read(struct map_info *map, __u32 addr)
134{
135 if (map->buswidth == 1) {
136 return map_read8(map, addr);
137 } else if (map->buswidth == 2) {
138 return map_read16(map, addr);
139 } else if (map->buswidth == 4) {
140 return map_read32(map, addr);
141 }
142
143 return 0;
144}
145
146static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
147{
148 if (map->buswidth == 1) {
149 map_write8(map, val, addr);
150 } else if (map->buswidth == 2) {
151 map_write16(map, val, addr);
152 } else if (map->buswidth == 4) {
153 map_write32(map, val, addr);
154 }
155}
156
157static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
158{
159 const struct amd_flash_private *private = map->fldrv_priv;
160 if ((private->interleave == 2) &&
161 (private->device_type == DEVICE_TYPE_X16)) {
162 cmd |= (cmd << 16);
163 }
164
165 return cmd;
166}
167
168static inline void send_unlock(struct map_info *map, unsigned long base)
169{
170 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
171 base + (map->buswidth * ADDR_UNLOCK_1));
172 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
173 base + (map->buswidth * ADDR_UNLOCK_2));
174}
175
176static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
177{
178 send_unlock(map, base);
179 wide_write(map, make_cmd(map, cmd),
180 base + (map->buswidth * ADDR_UNLOCK_1));
181}
182
183static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
184 __u32 cmd, unsigned long addr)
185{
186 send_unlock(map, base);
187 wide_write(map, make_cmd(map, cmd), addr);
188}
189
190static inline int flash_is_busy(struct map_info *map, unsigned long addr,
191 int interleave)
192{
193
194 if ((interleave == 2) && (map->buswidth == 4)) {
195 __u32 read1, read2;
196
197 read1 = wide_read(map, addr);
198 read2 = wide_read(map, addr);
199
200 return (((read1 >> 16) & D6_MASK) !=
201 ((read2 >> 16) & D6_MASK)) ||
202 (((read1 & 0xffff) & D6_MASK) !=
203 ((read2 & 0xffff) & D6_MASK));
204 }
205
206 return ((wide_read(map, addr) & D6_MASK) !=
207 (wide_read(map, addr) & D6_MASK));
208}
209
210static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
211 int unlock)
212{
213 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
214 int SLA = unlock ?
215 (sect_addr | (0x40 * map->buswidth)) :
216 (sect_addr & ~(0x40 * map->buswidth)) ;
217
218 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
219
220 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
221 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
222 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
223 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
224}
225
226static inline int is_sector_locked(struct map_info *map,
227 unsigned long sect_addr)
228{
229 int status;
230
231 wide_write(map, CMD_RESET_DATA, 0);
232 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
233
234 /* status is 0x0000 for unlocked and 0x0001 for locked */
235 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
236 wide_write(map, CMD_RESET_DATA, 0);
237 return status;
238}
239
240static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
241 int is_unlock)
242{
243 struct map_info *map;
244 struct mtd_erase_region_info *merip;
245 int eraseoffset, erasesize, eraseblocks;
246 int i;
247 int retval = 0;
248 int lock_status;
249
250 map = mtd->priv;
251
252 /* Pass the whole chip through sector by sector and check for each
253 sector if the sector and the given interval overlap */
254 for(i = 0; i < mtd->numeraseregions; i++) {
255 merip = &mtd->eraseregions[i];
256
257 eraseoffset = merip->offset;
258 erasesize = merip->erasesize;
259 eraseblocks = merip->numblocks;
260
261 if (ofs > eraseoffset + erasesize)
262 continue;
263
264 while (eraseblocks > 0) {
265 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
266 unlock_sector(map, eraseoffset, is_unlock);
267
268 lock_status = is_sector_locked(map, eraseoffset);
269
270 if (is_unlock && lock_status) {
271 printk("Cannot unlock sector at address %x length %xx\n",
272 eraseoffset, merip->erasesize);
273 retval = -1;
274 } else if (!is_unlock && !lock_status) {
275 printk("Cannot lock sector at address %x length %x\n",
276 eraseoffset, merip->erasesize);
277 retval = -1;
278 }
279 }
280 eraseoffset += erasesize;
281 eraseblocks --;
282 }
283 }
284 return retval;
285}
286
287static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
288{
289 return amd_flash_do_unlock(mtd, ofs, len, 1);
290}
291
292static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
293{
294 return amd_flash_do_unlock(mtd, ofs, len, 0);
295}
296
297
298/*
299 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
300 * matching table entry (-1 if not found or alias for already found chip).
301 */
302static int probe_new_chip(struct mtd_info *mtd, __u32 base,
303 struct flchip *chips,
304 struct amd_flash_private *private,
305 const struct amd_flash_info *table, int table_size)
306{
307 __u32 mfr_id;
308 __u32 dev_id;
309 struct map_info *map = mtd->priv;
310 struct amd_flash_private temp;
311 int i;
312
313 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
314 temp.interleave = 2;
315 map->fldrv_priv = &temp;
316
317 /* Enter autoselect mode. */
318 send_cmd(map, base, CMD_RESET_DATA);
319 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
320
321 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
322 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
323
324 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
325 ((dev_id >> 16) == (dev_id & 0xffff))) {
326 mfr_id &= 0xffff;
327 dev_id &= 0xffff;
328 } else {
329 temp.interleave = 1;
330 }
331
332 for (i = 0; i < table_size; i++) {
333 if ((mfr_id == table[i].mfr_id) &&
334 (dev_id == table[i].dev_id)) {
335 if (chips) {
336 int j;
337
338 /* Is this an alias for an already found chip?
339 * In that case that chip should be in
340 * autoselect mode now.
341 */
342 for (j = 0; j < private->numchips; j++) {
343 __u32 mfr_id_other;
344 __u32 dev_id_other;
345
346 mfr_id_other =
347 wide_read(map, chips[j].start +
348 (map->buswidth *
349 ADDR_MANUFACTURER
350 ));
351 dev_id_other =
352 wide_read(map, chips[j].start +
353 (map->buswidth *
354 ADDR_DEVICE_ID));
355 if (temp.interleave == 2) {
356 mfr_id_other &= 0xffff;
357 dev_id_other &= 0xffff;
358 }
359 if ((mfr_id_other == mfr_id) &&
360 (dev_id_other == dev_id)) {
361
362 /* Exit autoselect mode. */
363 send_cmd(map, base,
364 CMD_RESET_DATA);
365
366 return -1;
367 }
368 }
369
370 if (private->numchips == MAX_AMD_CHIPS) {
371 printk(KERN_WARNING
372 "%s: Too many flash chips "
373 "detected. Increase "
374 "MAX_AMD_CHIPS from %d.\n",
375 map->name, MAX_AMD_CHIPS);
376
377 return -1;
378 }
379
380 chips[private->numchips].start = base;
381 chips[private->numchips].state = FL_READY;
382 chips[private->numchips].mutex =
383 &chips[private->numchips]._spinlock;
384 private->numchips++;
385 }
386
387 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
388 temp.interleave, (table[i].size)/(1024*1024),
389 table[i].name, base);
390
391 mtd->size += table[i].size * temp.interleave;
392 mtd->numeraseregions += table[i].numeraseregions;
393
394 break;
395 }
396 }
397
398 /* Exit autoselect mode. */
399 send_cmd(map, base, CMD_RESET_DATA);
400
401 if (i == table_size) {
402 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
403 "mfr id 0x%x, dev id 0x%x\n", map->name,
404 base, mfr_id, dev_id);
405 map->fldrv_priv = NULL;
406
407 return -1;
408 }
409
410 private->device_type = temp.device_type;
411 private->interleave = temp.interleave;
412
413 return i;
414}
415
416
417
418static struct mtd_info *amd_flash_probe(struct map_info *map)
419{
420 static const struct amd_flash_info table[] = {
421 {
422 .mfr_id = MANUFACTURER_AMD,
423 .dev_id = AM29LV160DT,
424 .name = "AMD AM29LV160DT",
425 .size = 0x00200000,
426 .numeraseregions = 4,
427 .regions = {
428 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
429 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
430 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
431 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
432 }
433 }, {
434 .mfr_id = MANUFACTURER_AMD,
435 .dev_id = AM29LV160DB,
436 .name = "AMD AM29LV160DB",
437 .size = 0x00200000,
438 .numeraseregions = 4,
439 .regions = {
440 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
441 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
442 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
443 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
444 }
445 }, {
446 .mfr_id = MANUFACTURER_TOSHIBA,
447 .dev_id = TC58FVT160,
448 .name = "Toshiba TC58FVT160",
449 .size = 0x00200000,
450 .numeraseregions = 4,
451 .regions = {
452 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
453 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
454 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
455 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
456 }
457 }, {
458 .mfr_id = MANUFACTURER_FUJITSU,
459 .dev_id = MBM29LV160TE,
460 .name = "Fujitsu MBM29LV160TE",
461 .size = 0x00200000,
462 .numeraseregions = 4,
463 .regions = {
464 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
465 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
466 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
467 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
468 }
469 }, {
470 .mfr_id = MANUFACTURER_TOSHIBA,
471 .dev_id = TC58FVB160,
472 .name = "Toshiba TC58FVB160",
473 .size = 0x00200000,
474 .numeraseregions = 4,
475 .regions = {
476 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
477 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
478 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
479 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
480 }
481 }, {
482 .mfr_id = MANUFACTURER_FUJITSU,
483 .dev_id = MBM29LV160BE,
484 .name = "Fujitsu MBM29LV160BE",
485 .size = 0x00200000,
486 .numeraseregions = 4,
487 .regions = {
488 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
489 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
490 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
491 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
492 }
493 }, {
494 .mfr_id = MANUFACTURER_AMD,
495 .dev_id = AM29LV800BB,
496 .name = "AMD AM29LV800BB",
497 .size = 0x00100000,
498 .numeraseregions = 4,
499 .regions = {
500 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
501 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
502 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
503 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
504 }
505 }, {
506 .mfr_id = MANUFACTURER_AMD,
507 .dev_id = AM29F800BB,
508 .name = "AMD AM29F800BB",
509 .size = 0x00100000,
510 .numeraseregions = 4,
511 .regions = {
512 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
513 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
514 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
515 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
516 }
517 }, {
518 .mfr_id = MANUFACTURER_AMD,
519 .dev_id = AM29LV800BT,
520 .name = "AMD AM29LV800BT",
521 .size = 0x00100000,
522 .numeraseregions = 4,
523 .regions = {
524 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
525 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
526 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
527 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
528 }
529 }, {
530 .mfr_id = MANUFACTURER_AMD,
531 .dev_id = AM29F800BT,
532 .name = "AMD AM29F800BT",
533 .size = 0x00100000,
534 .numeraseregions = 4,
535 .regions = {
536 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
537 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
538 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
539 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
540 }
541 }, {
542 .mfr_id = MANUFACTURER_AMD,
543 .dev_id = AM29LV800BB,
544 .name = "AMD AM29LV800BB",
545 .size = 0x00100000,
546 .numeraseregions = 4,
547 .regions = {
548 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
549 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
550 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
551 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
552 }
553 }, {
554 .mfr_id = MANUFACTURER_FUJITSU,
555 .dev_id = MBM29LV800BB,
556 .name = "Fujitsu MBM29LV800BB",
557 .size = 0x00100000,
558 .numeraseregions = 4,
559 .regions = {
560 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
561 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
562 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
563 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
564 }
565 }, {
566 .mfr_id = MANUFACTURER_ST,
567 .dev_id = M29W800T,
568 .name = "ST M29W800T",
569 .size = 0x00100000,
570 .numeraseregions = 4,
571 .regions = {
572 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
573 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
574 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
575 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
576 }
577 }, {
578 .mfr_id = MANUFACTURER_ST,
579 .dev_id = M29W160DT,
580 .name = "ST M29W160DT",
581 .size = 0x00200000,
582 .numeraseregions = 4,
583 .regions = {
584 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
585 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
586 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
587 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
588 }
589 }, {
590 .mfr_id = MANUFACTURER_ST,
591 .dev_id = M29W160DB,
592 .name = "ST M29W160DB",
593 .size = 0x00200000,
594 .numeraseregions = 4,
595 .regions = {
596 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
597 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
598 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
599 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
600 }
601 }, {
602 .mfr_id = MANUFACTURER_AMD,
603 .dev_id = AM29BDS323D,
604 .name = "AMD AM29BDS323D",
605 .size = 0x00400000,
606 .numeraseregions = 3,
607 .regions = {
608 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
609 { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
610 { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
611 }
612 }, {
613 .mfr_id = MANUFACTURER_ATMEL,
614 .dev_id = AT49xV16x,
615 .name = "Atmel AT49xV16x",
616 .size = 0x00200000,
617 .numeraseregions = 2,
618 .regions = {
619 { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
620 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
621 }
622 }, {
623 .mfr_id = MANUFACTURER_ATMEL,
624 .dev_id = AT49xV16xT,
625 .name = "Atmel AT49xV16xT",
626 .size = 0x00200000,
627 .numeraseregions = 2,
628 .regions = {
629 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
630 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
631 }
632 }
633 };
634
635 struct mtd_info *mtd;
636 struct flchip chips[MAX_AMD_CHIPS];
637 int table_pos[MAX_AMD_CHIPS];
638 struct amd_flash_private temp;
639 struct amd_flash_private *private;
640 u_long size;
641 unsigned long base;
642 int i;
643 int reg_idx;
644 int offset;
645
646 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
647 if (!mtd) {
648 printk(KERN_WARNING
649 "%s: kmalloc failed for info structure\n", map->name);
650 return NULL;
651 }
652 mtd->priv = map;
653
654 memset(&temp, 0, sizeof(temp));
655
656 printk("%s: Probing for AMD compatible flash...\n", map->name);
657
658 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
659 ARRAY_SIZE(table)))
660 == -1) {
661 printk(KERN_WARNING
662 "%s: Found no AMD compatible device at location zero\n",
663 map->name);
664 kfree(mtd);
665
666 return NULL;
667 }
668
669 chips[0].start = 0;
670 chips[0].state = FL_READY;
671 chips[0].mutex = &chips[0]._spinlock;
672 temp.numchips = 1;
673 for (size = mtd->size; size > 1; size >>= 1) {
674 temp.chipshift++;
675 }
676 switch (temp.interleave) {
677 case 2:
678 temp.chipshift += 1;
679 break;
680 case 4:
681 temp.chipshift += 2;
682 break;
683 }
684
685 /* Find out if there are any more chips in the map. */
686 for (base = (1 << temp.chipshift);
687 base < map->size;
688 base += (1 << temp.chipshift)) {
689 int numchips = temp.numchips;
690 table_pos[numchips] = probe_new_chip(mtd, base, chips,
691 &temp, table, ARRAY_SIZE(table));
692 }
693
694 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
695 mtd->numeraseregions, GFP_KERNEL);
696 if (!mtd->eraseregions) {
697 printk(KERN_WARNING "%s: Failed to allocate "
698 "memory for MTD erase region info\n", map->name);
699 kfree(mtd);
700 map->fldrv_priv = NULL;
701 return NULL;
702 }
703
704 reg_idx = 0;
705 offset = 0;
706 for (i = 0; i < temp.numchips; i++) {
707 int dev_size;
708 int j;
709
710 dev_size = 0;
711 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
712 mtd->eraseregions[reg_idx].offset = offset +
713 (table[table_pos[i]].regions[j].offset *
714 temp.interleave);
715 mtd->eraseregions[reg_idx].erasesize =
716 table[table_pos[i]].regions[j].erasesize *
717 temp.interleave;
718 mtd->eraseregions[reg_idx].numblocks =
719 table[table_pos[i]].regions[j].numblocks;
720 if (mtd->erasesize <
721 mtd->eraseregions[reg_idx].erasesize) {
722 mtd->erasesize =
723 mtd->eraseregions[reg_idx].erasesize;
724 }
725 dev_size += mtd->eraseregions[reg_idx].erasesize *
726 mtd->eraseregions[reg_idx].numblocks;
727 reg_idx++;
728 }
729 offset += dev_size;
730 }
731 mtd->type = MTD_NORFLASH;
732 mtd->writesize = 1;
733 mtd->flags = MTD_CAP_NORFLASH;
734 mtd->name = map->name;
735 mtd->erase = amd_flash_erase;
736 mtd->read = amd_flash_read;
737 mtd->write = amd_flash_write;
738 mtd->sync = amd_flash_sync;
739 mtd->suspend = amd_flash_suspend;
740 mtd->resume = amd_flash_resume;
741 mtd->lock = amd_flash_lock;
742 mtd->unlock = amd_flash_unlock;
743
744 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
745 temp.numchips), GFP_KERNEL);
746 if (!private) {
747 printk(KERN_WARNING
748 "%s: kmalloc failed for private structure\n", map->name);
749 kfree(mtd);
750 map->fldrv_priv = NULL;
751 return NULL;
752 }
753 memcpy(private, &temp, sizeof(temp));
754 memcpy(private->chips, chips,
755 sizeof(struct flchip) * private->numchips);
756 for (i = 0; i < private->numchips; i++) {
757 init_waitqueue_head(&private->chips[i].wq);
758 spin_lock_init(&private->chips[i]._spinlock);
759 }
760
761 map->fldrv_priv = private;
762
763 map->fldrv = &amd_flash_chipdrv;
764
765 __module_get(THIS_MODULE);
766 return mtd;
767}
768
769
770
771static inline int read_one_chip(struct map_info *map, struct flchip *chip,
772 loff_t adr, size_t len, u_char *buf)
773{
774 DECLARE_WAITQUEUE(wait, current);
775 unsigned long timeo = jiffies + HZ;
776
777retry:
778 spin_lock_bh(chip->mutex);
779
780 if (chip->state != FL_READY){
781 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
782 map->name, chip->state);
783 set_current_state(TASK_UNINTERRUPTIBLE);
784 add_wait_queue(&chip->wq, &wait);
785
786 spin_unlock_bh(chip->mutex);
787
788 schedule();
789 remove_wait_queue(&chip->wq, &wait);
790
791 if(signal_pending(current)) {
792 return -EINTR;
793 }
794
795 timeo = jiffies + HZ;
796
797 goto retry;
798 }
799
800 adr += chip->start;
801
802 chip->state = FL_READY;
803
804 map_copy_from(map, buf, adr, len);
805
806 wake_up(&chip->wq);
807 spin_unlock_bh(chip->mutex);
808
809 return 0;
810}
811
812
813
814static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
815 size_t *retlen, u_char *buf)
816{
817 struct map_info *map = mtd->priv;
818 struct amd_flash_private *private = map->fldrv_priv;
819 unsigned long ofs;
820 int chipnum;
821 int ret = 0;
822
823 if ((from + len) > mtd->size) {
824 printk(KERN_WARNING "%s: read request past end of device "
825 "(0x%lx)\n", map->name, (unsigned long)from + len);
826
827 return -EINVAL;
828 }
829
830 /* Offset within the first chip that the first read should start. */
831 chipnum = (from >> private->chipshift);
832 ofs = from - (chipnum << private->chipshift);
833
834 *retlen = 0;
835
836 while (len) {
837 unsigned long this_len;
838
839 if (chipnum >= private->numchips) {
840 break;
841 }
842
843 if ((len + ofs - 1) >> private->chipshift) {
844 this_len = (1 << private->chipshift) - ofs;
845 } else {
846 this_len = len;
847 }
848
849 ret = read_one_chip(map, &private->chips[chipnum], ofs,
850 this_len, buf);
851 if (ret) {
852 break;
853 }
854
855 *retlen += this_len;
856 len -= this_len;
857 buf += this_len;
858
859 ofs = 0;
860 chipnum++;
861 }
862
863 return ret;
864}
865
866
867
868static int write_one_word(struct map_info *map, struct flchip *chip,
869 unsigned long adr, __u32 datum)
870{
871 unsigned long timeo = jiffies + HZ;
872 struct amd_flash_private *private = map->fldrv_priv;
873 DECLARE_WAITQUEUE(wait, current);
874 int ret = 0;
875 int times_left;
876
877retry:
878 spin_lock_bh(chip->mutex);
879
880 if (chip->state != FL_READY){
881 printk("%s: waiting for chip to write, state = %d\n",
882 map->name, chip->state);
883 set_current_state(TASK_UNINTERRUPTIBLE);
884 add_wait_queue(&chip->wq, &wait);
885
886 spin_unlock_bh(chip->mutex);
887
888 schedule();
889 remove_wait_queue(&chip->wq, &wait);
890 printk(KERN_INFO "%s: woke up to write\n", map->name);
891 if(signal_pending(current))
892 return -EINTR;
893
894 timeo = jiffies + HZ;
895
896 goto retry;
897 }
898
899 chip->state = FL_WRITING;
900
901 adr += chip->start;
902 ENABLE_VPP(map);
903 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
904 wide_write(map, datum, adr);
905
906 times_left = 500000;
907 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
908 if (need_resched()) {
909 spin_unlock_bh(chip->mutex);
910 schedule();
911 spin_lock_bh(chip->mutex);
912 }
913 }
914
915 if (!times_left) {
916 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
917 map->name, adr);
918 ret = -EIO;
919 } else {
920 __u32 verify;
921 if ((verify = wide_read(map, adr)) != datum) {
922 printk(KERN_WARNING "%s: write to 0x%lx failed. "
923 "datum = %x, verify = %x\n",
924 map->name, adr, datum, verify);
925 ret = -EIO;
926 }
927 }
928
929 DISABLE_VPP(map);
930 chip->state = FL_READY;
931 wake_up(&chip->wq);
932 spin_unlock_bh(chip->mutex);
933
934 return ret;
935}
936
937
938
939static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
940 size_t *retlen, const u_char *buf)
941{
942 struct map_info *map = mtd->priv;
943 struct amd_flash_private *private = map->fldrv_priv;
944 int ret = 0;
945 int chipnum;
946 unsigned long ofs;
947 unsigned long chipstart;
948
949 *retlen = 0;
950 if (!len) {
951 return 0;
952 }
953
954 chipnum = to >> private->chipshift;
955 ofs = to - (chipnum << private->chipshift);
956 chipstart = private->chips[chipnum].start;
957
958 /* If it's not bus-aligned, do the first byte write. */
959 if (ofs & (map->buswidth - 1)) {
960 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
961 int i = ofs - bus_ofs;
962 int n = 0;
963 u_char tmp_buf[4];
964 __u32 datum;
965
966 map_copy_from(map, tmp_buf,
967 bus_ofs + private->chips[chipnum].start,
968 map->buswidth);
969 while (len && i < map->buswidth)
970 tmp_buf[i++] = buf[n++], len--;
971
972 if (map->buswidth == 2) {
973 datum = *(__u16*)tmp_buf;
974 } else if (map->buswidth == 4) {
975 datum = *(__u32*)tmp_buf;
976 } else {
977 return -EINVAL; /* should never happen, but be safe */
978 }
979
980 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
981 datum);
982 if (ret) {
983 return ret;
984 }
985
986 ofs += n;
987 buf += n;
988 (*retlen) += n;
989
990 if (ofs >> private->chipshift) {
991 chipnum++;
992 ofs = 0;
993 if (chipnum == private->numchips) {
994 return 0;
995 }
996 }
997 }
998
999 /* We are now aligned, write as much as possible. */
1000 while(len >= map->buswidth) {
1001 __u32 datum;
1002
1003 if (map->buswidth == 1) {
1004 datum = *(__u8*)buf;
1005 } else if (map->buswidth == 2) {
1006 datum = *(__u16*)buf;
1007 } else if (map->buswidth == 4) {
1008 datum = *(__u32*)buf;
1009 } else {
1010 return -EINVAL;
1011 }
1012
1013 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1014
1015 if (ret) {
1016 return ret;
1017 }
1018
1019 ofs += map->buswidth;
1020 buf += map->buswidth;
1021 (*retlen) += map->buswidth;
1022 len -= map->buswidth;
1023
1024 if (ofs >> private->chipshift) {
1025 chipnum++;
1026 ofs = 0;
1027 if (chipnum == private->numchips) {
1028 return 0;
1029 }
1030 chipstart = private->chips[chipnum].start;
1031 }
1032 }
1033
1034 if (len & (map->buswidth - 1)) {
1035 int i = 0, n = 0;
1036 u_char tmp_buf[2];
1037 __u32 datum;
1038
1039 map_copy_from(map, tmp_buf,
1040 ofs + private->chips[chipnum].start,
1041 map->buswidth);
1042 while (len--) {
1043 tmp_buf[i++] = buf[n++];
1044 }
1045
1046 if (map->buswidth == 2) {
1047 datum = *(__u16*)tmp_buf;
1048 } else if (map->buswidth == 4) {
1049 datum = *(__u32*)tmp_buf;
1050 } else {
1051 return -EINVAL; /* should never happen, but be safe */
1052 }
1053
1054 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1055
1056 if (ret) {
1057 return ret;
1058 }
1059
1060 (*retlen) += n;
1061 }
1062
1063 return 0;
1064}
1065
1066
1067
1068static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1069 unsigned long adr, u_long size)
1070{
1071 unsigned long timeo = jiffies + HZ;
1072 struct amd_flash_private *private = map->fldrv_priv;
1073 DECLARE_WAITQUEUE(wait, current);
1074
1075retry:
1076 spin_lock_bh(chip->mutex);
1077
1078 if (chip->state != FL_READY){
1079 set_current_state(TASK_UNINTERRUPTIBLE);
1080 add_wait_queue(&chip->wq, &wait);
1081
1082 spin_unlock_bh(chip->mutex);
1083
1084 schedule();
1085 remove_wait_queue(&chip->wq, &wait);
1086
1087 if (signal_pending(current)) {
1088 return -EINTR;
1089 }
1090
1091 timeo = jiffies + HZ;
1092
1093 goto retry;
1094 }
1095
1096 chip->state = FL_ERASING;
1097
1098 adr += chip->start;
1099 ENABLE_VPP(map);
1100 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1101 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1102
1103 timeo = jiffies + (HZ * 20);
1104
1105 spin_unlock_bh(chip->mutex);
1106 msleep(1000);
1107 spin_lock_bh(chip->mutex);
1108
1109 while (flash_is_busy(map, adr, private->interleave)) {
1110
1111 if (chip->state != FL_ERASING) {
1112 /* Someone's suspended the erase. Sleep */
1113 set_current_state(TASK_UNINTERRUPTIBLE);
1114 add_wait_queue(&chip->wq, &wait);
1115
1116 spin_unlock_bh(chip->mutex);
1117 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1118 map->name);
1119 schedule();
1120 remove_wait_queue(&chip->wq, &wait);
1121
1122 if (signal_pending(current)) {
1123 return -EINTR;
1124 }
1125
1126 timeo = jiffies + (HZ*2); /* FIXME */
1127 spin_lock_bh(chip->mutex);
1128 continue;
1129 }
1130
1131 /* OK Still waiting */
1132 if (time_after(jiffies, timeo)) {
1133 chip->state = FL_READY;
1134 spin_unlock_bh(chip->mutex);
1135 printk(KERN_WARNING "%s: waiting for erase to complete "
1136 "timed out.\n", map->name);
1137 DISABLE_VPP(map);
1138
1139 return -EIO;
1140 }
1141
1142 /* Latency issues. Drop the lock, wait a while and retry */
1143 spin_unlock_bh(chip->mutex);
1144
1145 if (need_resched())
1146 schedule();
1147 else
1148 udelay(1);
1149
1150 spin_lock_bh(chip->mutex);
1151 }
1152
1153 /* Verify every single word */
1154 {
1155 int address;
1156 int error = 0;
1157 __u8 verify;
1158
1159 for (address = adr; address < (adr + size); address++) {
1160 if ((verify = map_read8(map, address)) != 0xFF) {
1161 error = 1;
1162 break;
1163 }
1164 }
1165 if (error) {
1166 chip->state = FL_READY;
1167 spin_unlock_bh(chip->mutex);
1168 printk(KERN_WARNING
1169 "%s: verify error at 0x%x, size %ld.\n",
1170 map->name, address, size);
1171 DISABLE_VPP(map);
1172
1173 return -EIO;
1174 }
1175 }
1176
1177 DISABLE_VPP(map);
1178 chip->state = FL_READY;
1179 wake_up(&chip->wq);
1180 spin_unlock_bh(chip->mutex);
1181
1182 return 0;
1183}
1184
1185
1186
1187static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1188{
1189 struct map_info *map = mtd->priv;
1190 struct amd_flash_private *private = map->fldrv_priv;
1191 unsigned long adr, len;
1192 int chipnum;
1193 int ret = 0;
1194 int i;
1195 int first;
1196 struct mtd_erase_region_info *regions = mtd->eraseregions;
1197
1198 if (instr->addr > mtd->size) {
1199 return -EINVAL;
1200 }
1201
1202 if ((instr->len + instr->addr) > mtd->size) {
1203 return -EINVAL;
1204 }
1205
1206 /* Check that both start and end of the requested erase are
1207 * aligned with the erasesize at the appropriate addresses.
1208 */
1209
1210 i = 0;
1211
1212 /* Skip all erase regions which are ended before the start of
1213 the requested erase. Actually, to save on the calculations,
1214 we skip to the first erase region which starts after the
1215 start of the requested erase, and then go back one.
1216 */
1217
1218 while ((i < mtd->numeraseregions) &&
1219 (instr->addr >= regions[i].offset)) {
1220 i++;
1221 }
1222 i--;
1223
1224 /* OK, now i is pointing at the erase region in which this
1225 * erase request starts. Check the start of the requested
1226 * erase range is aligned with the erase size which is in
1227 * effect here.
1228 */
1229
1230 if (instr->addr & (regions[i].erasesize-1)) {
1231 return -EINVAL;
1232 }
1233
1234 /* Remember the erase region we start on. */
1235
1236 first = i;
1237
1238 /* Next, check that the end of the requested erase is aligned
1239 * with the erase region at that address.
1240 */
1241
1242 while ((i < mtd->numeraseregions) &&
1243 ((instr->addr + instr->len) >= regions[i].offset)) {
1244 i++;
1245 }
1246
1247 /* As before, drop back one to point at the region in which
1248 * the address actually falls.
1249 */
1250
1251 i--;
1252
1253 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1254 return -EINVAL;
1255 }
1256
1257 chipnum = instr->addr >> private->chipshift;
1258 adr = instr->addr - (chipnum << private->chipshift);
1259 len = instr->len;
1260
1261 i = first;
1262
1263 while (len) {
1264 ret = erase_one_block(map, &private->chips[chipnum], adr,
1265 regions[i].erasesize);
1266
1267 if (ret) {
1268 return ret;
1269 }
1270
1271 adr += regions[i].erasesize;
1272 len -= regions[i].erasesize;
1273
1274 if ((adr % (1 << private->chipshift)) ==
1275 ((regions[i].offset + (regions[i].erasesize *
1276 regions[i].numblocks))
1277 % (1 << private->chipshift))) {
1278 i++;
1279 }
1280
1281 if (adr >> private->chipshift) {
1282 adr = 0;
1283 chipnum++;
1284 if (chipnum >= private->numchips) {
1285 break;
1286 }
1287 }
1288 }
1289
1290 instr->state = MTD_ERASE_DONE;
1291 mtd_erase_callback(instr);
1292
1293 return 0;
1294}
1295
1296
1297
1298static void amd_flash_sync(struct mtd_info *mtd)
1299{
1300 struct map_info *map = mtd->priv;
1301 struct amd_flash_private *private = map->fldrv_priv;
1302 int i;
1303 struct flchip *chip;
1304 int ret = 0;
1305 DECLARE_WAITQUEUE(wait, current);
1306
1307 for (i = 0; !ret && (i < private->numchips); i++) {
1308 chip = &private->chips[i];
1309
1310 retry:
1311 spin_lock_bh(chip->mutex);
1312
1313 switch(chip->state) {
1314 case FL_READY:
1315 case FL_STATUS:
1316 case FL_CFI_QUERY:
1317 case FL_JEDEC_QUERY:
1318 chip->oldstate = chip->state;
1319 chip->state = FL_SYNCING;
1320 /* No need to wake_up() on this state change -
1321 * as the whole point is that nobody can do anything
1322 * with the chip now anyway.
1323 */
1324 case FL_SYNCING:
1325 spin_unlock_bh(chip->mutex);
1326 break;
1327
1328 default:
1329 /* Not an idle state */
1330 add_wait_queue(&chip->wq, &wait);
1331
1332 spin_unlock_bh(chip->mutex);
1333
1334 schedule();
1335
1336 remove_wait_queue(&chip->wq, &wait);
1337
1338 goto retry;
1339 }
1340 }
1341
1342 /* Unlock the chips again */
1343 for (i--; i >= 0; i--) {
1344 chip = &private->chips[i];
1345
1346 spin_lock_bh(chip->mutex);
1347
1348 if (chip->state == FL_SYNCING) {
1349 chip->state = chip->oldstate;
1350 wake_up(&chip->wq);
1351 }
1352 spin_unlock_bh(chip->mutex);
1353 }
1354}
1355
1356
1357
1358static int amd_flash_suspend(struct mtd_info *mtd)
1359{
1360printk("amd_flash_suspend(): not implemented!\n");
1361 return -EINVAL;
1362}
1363
1364
1365
1366static void amd_flash_resume(struct mtd_info *mtd)
1367{
1368printk("amd_flash_resume(): not implemented!\n");
1369}
1370
1371
1372
1373static void amd_flash_destroy(struct mtd_info *mtd)
1374{
1375 struct map_info *map = mtd->priv;
1376 struct amd_flash_private *private = map->fldrv_priv;
1377 kfree(private);
1378}
1379
1380int __init amd_flash_init(void)
1381{
1382 register_mtd_chip_driver(&amd_flash_chipdrv);
1383 return 0;
1384}
1385
1386void __exit amd_flash_exit(void)
1387{
1388 unregister_mtd_chip_driver(&amd_flash_chipdrv);
1389}
1390
1391module_init(amd_flash_init);
1392module_exit(amd_flash_exit);
1393
1394MODULE_LICENSE("GPL");
1395MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1396MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
deleted file mode 100644
index 14e57b2bf842..000000000000
--- a/drivers/mtd/chips/jedec.c
+++ /dev/null
@@ -1,935 +0,0 @@
1
2/* JEDEC Flash Interface.
3 * This is an older type of interface for self programming flash. It is
4 * commonly use in older AMD chips and is obsolete compared with CFI.
5 * It is called JEDEC because the JEDEC association distributes the ID codes
6 * for the chips.
7 *
8 * See the AMD flash databook for information on how to operate the interface.
9 *
10 * This code does not support anything wider than 8 bit flash chips, I am
11 * not going to guess how to send commands to them, plus I expect they will
12 * all speak CFI..
13 *
14 * $Id: jedec.c,v 1.22 2005/01/05 18:05:11 dwmw2 Exp $
15 */
16
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/mtd/jedec.h>
22#include <linux/mtd/map.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/compatmac.h>
25
26static struct mtd_info *jedec_probe(struct map_info *);
27static int jedec_probe8(struct map_info *map,unsigned long base,
28 struct jedec_private *priv);
29static int jedec_probe16(struct map_info *map,unsigned long base,
30 struct jedec_private *priv);
31static int jedec_probe32(struct map_info *map,unsigned long base,
32 struct jedec_private *priv);
33static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
34 unsigned long len);
35static int flash_erase(struct mtd_info *mtd, struct erase_info *instr);
36static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
37 size_t *retlen, const u_char *buf);
38
39static unsigned long my_bank_size;
40
41/* Listing of parts and sizes. We need this table to learn the sector
42 size of the chip and the total length */
43static const struct JEDECTable JEDEC_table[] = {
44 {
45 .jedec = 0x013D,
46 .name = "AMD Am29F017D",
47 .size = 2*1024*1024,
48 .sectorsize = 64*1024,
49 .capabilities = MTD_CAP_NORFLASH
50 },
51 {
52 .jedec = 0x01AD,
53 .name = "AMD Am29F016",
54 .size = 2*1024*1024,
55 .sectorsize = 64*1024,
56 .capabilities = MTD_CAP_NORFLASH
57 },
58 {
59 .jedec = 0x01D5,
60 .name = "AMD Am29F080",
61 .size = 1*1024*1024,
62 .sectorsize = 64*1024,
63 .capabilities = MTD_CAP_NORFLASH
64 },
65 {
66 .jedec = 0x01A4,
67 .name = "AMD Am29F040",
68 .size = 512*1024,
69 .sectorsize = 64*1024,
70 .capabilities = MTD_CAP_NORFLASH
71 },
72 {
73 .jedec = 0x20E3,
74 .name = "AMD Am29W040B",
75 .size = 512*1024,
76 .sectorsize = 64*1024,
77 .capabilities = MTD_CAP_NORFLASH
78 },
79 {
80 .jedec = 0xC2AD,
81 .name = "Macronix MX29F016",
82 .size = 2*1024*1024,
83 .sectorsize = 64*1024,
84 .capabilities = MTD_CAP_NORFLASH
85 },
86 { .jedec = 0x0 }
87};
88
89static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
90static void jedec_sync(struct mtd_info *mtd) {};
91static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
92 size_t *retlen, u_char *buf);
93static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, u_char *buf);
95
96static struct mtd_info *jedec_probe(struct map_info *map);
97
98
99
100static struct mtd_chip_driver jedec_chipdrv = {
101 .probe = jedec_probe,
102 .name = "jedec",
103 .module = THIS_MODULE
104};
105
106/* Probe entry point */
107
108static struct mtd_info *jedec_probe(struct map_info *map)
109{
110 struct mtd_info *MTD;
111 struct jedec_private *priv;
112 unsigned long Base;
113 unsigned long SectorSize;
114 unsigned count;
115 unsigned I,Uniq;
116 char Part[200];
117 memset(&priv,0,sizeof(priv));
118
119 MTD = kzalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL);
120 if (!MTD)
121 return NULL;
122
123 priv = (struct jedec_private *)&MTD[1];
124
125 my_bank_size = map->size;
126
127 if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
128 {
129 printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
130 kfree(MTD);
131 return NULL;
132 }
133
134 for (Base = 0; Base < map->size; Base += my_bank_size)
135 {
136 // Perhaps zero could designate all tests?
137 if (map->buswidth == 0)
138 map->buswidth = 1;
139
140 if (map->buswidth == 1){
141 if (jedec_probe8(map,Base,priv) == 0) {
142 printk("did recognize jedec chip\n");
143 kfree(MTD);
144 return NULL;
145 }
146 }
147 if (map->buswidth == 2)
148 jedec_probe16(map,Base,priv);
149 if (map->buswidth == 4)
150 jedec_probe32(map,Base,priv);
151 }
152
153 // Get the biggest sector size
154 SectorSize = 0;
155 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
156 {
157 // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec);
158 // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize);
159 if (priv->chips[I].sectorsize > SectorSize)
160 SectorSize = priv->chips[I].sectorsize;
161 }
162
163 // Quickly ensure that the other sector sizes are factors of the largest
164 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
165 {
166 if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize)
167 {
168 printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
169 kfree(MTD);
170 return NULL;
171 }
172 }
173
174 /* Generate a part name that includes the number of different chips and
175 other configuration information */
176 count = 1;
177 strlcpy(Part,map->name,sizeof(Part)-10);
178 strcat(Part," ");
179 Uniq = 0;
180 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
181 {
182 const struct JEDECTable *JEDEC;
183
184 if (priv->chips[I+1].jedec == priv->chips[I].jedec)
185 {
186 count++;
187 continue;
188 }
189
190 // Locate the chip in the jedec table
191 JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
192 if (JEDEC == 0)
193 {
194 printk("mtd: Internal Error, JEDEC not set\n");
195 kfree(MTD);
196 return NULL;
197 }
198
199 if (Uniq != 0)
200 strcat(Part,",");
201 Uniq++;
202
203 if (count != 1)
204 sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
205 else
206 sprintf(Part+strlen(Part),"%s",JEDEC->name);
207 if (strlen(Part) > sizeof(Part)*2/3)
208 break;
209 count = 1;
210 }
211
212 /* Determine if the chips are organized in a linear fashion, or if there
213 are empty banks. Note, the last bank does not count here, only the
214 first banks are important. Holes on non-bank boundaries can not exist
215 due to the way the detection algorithm works. */
216 if (priv->size < my_bank_size)
217 my_bank_size = priv->size;
218 priv->is_banked = 0;
219 //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size);
220 //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]);
221 if (!priv->size) {
222 printk("priv->size is zero\n");
223 kfree(MTD);
224 return NULL;
225 }
226 if (priv->size/my_bank_size) {
227 if (priv->size/my_bank_size == 1) {
228 priv->size = my_bank_size;
229 }
230 else {
231 for (I = 0; I != priv->size/my_bank_size - 1; I++)
232 {
233 if (priv->bank_fill[I] != my_bank_size)
234 priv->is_banked = 1;
235
236 /* This even could be eliminated, but new de-optimized read/write
237 functions have to be written */
238 printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
239 if (priv->bank_fill[I] != priv->bank_fill[0])
240 {
241 printk("mtd: Failed. Cannot handle unsymmetric banking\n");
242 kfree(MTD);
243 return NULL;
244 }
245 }
246 }
247 }
248 if (priv->is_banked == 1)
249 strcat(Part,", banked");
250
251 // printk("Part: '%s'\n",Part);
252
253 memset(MTD,0,sizeof(*MTD));
254 // strlcpy(MTD->name,Part,sizeof(MTD->name));
255 MTD->name = map->name;
256 MTD->type = MTD_NORFLASH;
257 MTD->flags = MTD_CAP_NORFLASH;
258 MTD->writesize = 1;
259 MTD->erasesize = SectorSize*(map->buswidth);
260 // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
261 MTD->size = priv->size;
262 // printk("MTD->size is %x\n",(unsigned int)MTD->size);
263 //MTD->module = THIS_MODULE; // ? Maybe this should be the low level module?
264 MTD->erase = flash_erase;
265 if (priv->is_banked == 1)
266 MTD->read = jedec_read_banked;
267 else
268 MTD->read = jedec_read;
269 MTD->write = flash_write;
270 MTD->sync = jedec_sync;
271 MTD->priv = map;
272 map->fldrv_priv = priv;
273 map->fldrv = &jedec_chipdrv;
274 __module_get(THIS_MODULE);
275 return MTD;
276}
277
278/* Helper for the JEDEC function, JEDEC numbers all have odd parity */
279static int checkparity(u_char C)
280{
281 u_char parity = 0;
282 while (C != 0)
283 {
284 parity ^= C & 1;
285 C >>= 1;
286 }
287
288 return parity == 1;
289}
290
291
292/* Take an array of JEDEC numbers that represent interleved flash chips
293 and process them. Check to make sure they are good JEDEC numbers, look
294 them up and then add them to the chip list */
295static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
296 unsigned long base,struct jedec_private *priv)
297{
298 unsigned I,J;
299 unsigned long Size;
300 unsigned long SectorSize;
301 const struct JEDECTable *JEDEC;
302
303 // Test #2 JEDEC numbers exhibit odd parity
304 for (I = 0; I != Count; I++)
305 {
306 if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
307 return 0;
308 }
309
310 // Finally, just make sure all the chip sizes are the same
311 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
312
313 if (JEDEC == 0)
314 {
315 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
316 return 0;
317 }
318
319 Size = JEDEC->size;
320 SectorSize = JEDEC->sectorsize;
321 for (I = 0; I != Count; I++)
322 {
323 JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
324 if (JEDEC == 0)
325 {
326 printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
327 return 0;
328 }
329
330 if (Size != JEDEC->size || SectorSize != JEDEC->sectorsize)
331 {
332 printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
333 return 0;
334 }
335 }
336
337 // Load the Chips
338 for (I = 0; I != MAX_JEDEC_CHIPS; I++)
339 {
340 if (priv->chips[I].jedec == 0)
341 break;
342 }
343
344 if (I + Count > MAX_JEDEC_CHIPS)
345 {
346 printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
347 return 0;
348 }
349
350 // Add them to the table
351 for (J = 0; J != Count; J++)
352 {
353 unsigned long Bank;
354
355 JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
356 priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
357 priv->chips[I].size = JEDEC->size;
358 priv->chips[I].sectorsize = JEDEC->sectorsize;
359 priv->chips[I].base = base + J;
360 priv->chips[I].datashift = J*8;
361 priv->chips[I].capabilities = JEDEC->capabilities;
362 priv->chips[I].offset = priv->size + J;
363
364 // log2 n :|
365 priv->chips[I].addrshift = 0;
366 for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
367
368 // Determine how filled this bank is.
369 Bank = base & (~(my_bank_size-1));
370 if (priv->bank_fill[Bank/my_bank_size] < base +
371 (JEDEC->size << priv->chips[I].addrshift) - Bank)
372 priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
373 I++;
374 }
375
376 priv->size += priv->chips[I-1].size*Count;
377
378 return priv->chips[I-1].size;
379}
380
381/* Lookup the chip information from the JEDEC ID table. */
382static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
383{
384 __u16 Id = (mfr << 8) | id;
385 unsigned long I = 0;
386 for (I = 0; JEDEC_table[I].jedec != 0; I++)
387 if (JEDEC_table[I].jedec == Id)
388 return JEDEC_table + I;
389 return NULL;
390}
391
392// Look for flash using an 8 bit bus interface
393static int jedec_probe8(struct map_info *map,unsigned long base,
394 struct jedec_private *priv)
395{
396 #define flread(x) map_read8(map,base+x)
397 #define flwrite(v,x) map_write8(map,v,base+x)
398
399 const unsigned long AutoSel1 = 0xAA;
400 const unsigned long AutoSel2 = 0x55;
401 const unsigned long AutoSel3 = 0x90;
402 const unsigned long Reset = 0xF0;
403 __u32 OldVal;
404 __u8 Mfg[1];
405 __u8 Id[1];
406 unsigned I;
407 unsigned long Size;
408
409 // Wait for any write/erase operation to settle
410 OldVal = flread(base);
411 for (I = 0; OldVal != flread(base) && I < 10000; I++)
412 OldVal = flread(base);
413
414 // Reset the chip
415 flwrite(Reset,0x555);
416
417 // Send the sequence
418 flwrite(AutoSel1,0x555);
419 flwrite(AutoSel2,0x2AA);
420 flwrite(AutoSel3,0x555);
421
422 // Get the JEDEC numbers
423 Mfg[0] = flread(0);
424 Id[0] = flread(1);
425 // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
426
427 Size = handle_jedecs(map,Mfg,Id,1,base,priv);
428 // printk("handle_jedecs Size is %x\n",(unsigned int)Size);
429 if (Size == 0)
430 {
431 flwrite(Reset,0x555);
432 return 0;
433 }
434
435
436 // Reset.
437 flwrite(Reset,0x555);
438
439 return 1;
440
441 #undef flread
442 #undef flwrite
443}
444
445// Look for flash using a 16 bit bus interface (ie 2 8-bit chips)
446static int jedec_probe16(struct map_info *map,unsigned long base,
447 struct jedec_private *priv)
448{
449 return 0;
450}
451
452// Look for flash using a 32 bit bus interface (ie 4 8-bit chips)
453static int jedec_probe32(struct map_info *map,unsigned long base,
454 struct jedec_private *priv)
455{
456 #define flread(x) map_read32(map,base+((x)<<2))
457 #define flwrite(v,x) map_write32(map,v,base+((x)<<2))
458
459 const unsigned long AutoSel1 = 0xAAAAAAAA;
460 const unsigned long AutoSel2 = 0x55555555;
461 const unsigned long AutoSel3 = 0x90909090;
462 const unsigned long Reset = 0xF0F0F0F0;
463 __u32 OldVal;
464 __u8 Mfg[4];
465 __u8 Id[4];
466 unsigned I;
467 unsigned long Size;
468
469 // Wait for any write/erase operation to settle
470 OldVal = flread(base);
471 for (I = 0; OldVal != flread(base) && I < 10000; I++)
472 OldVal = flread(base);
473
474 // Reset the chip
475 flwrite(Reset,0x555);
476
477 // Send the sequence
478 flwrite(AutoSel1,0x555);
479 flwrite(AutoSel2,0x2AA);
480 flwrite(AutoSel3,0x555);
481
482 // Test #1, JEDEC numbers are readable from 0x??00/0x??01
483 if (flread(0) != flread(0x100) ||
484 flread(1) != flread(0x101))
485 {
486 flwrite(Reset,0x555);
487 return 0;
488 }
489
490 // Split up the JEDEC numbers
491 OldVal = flread(0);
492 for (I = 0; I != 4; I++)
493 Mfg[I] = (OldVal >> (I*8));
494 OldVal = flread(1);
495 for (I = 0; I != 4; I++)
496 Id[I] = (OldVal >> (I*8));
497
498 Size = handle_jedecs(map,Mfg,Id,4,base,priv);
499 if (Size == 0)
500 {
501 flwrite(Reset,0x555);
502 return 0;
503 }
504
505 /* Check if there is address wrap around within a single bank, if this
506 returns JEDEC numbers then we assume that it is wrap around. Notice
507 we call this routine with the JEDEC return still enabled, if two or
508 more flashes have a truncated address space the probe test will still
509 work */
510 if (base + (Size<<2)+0x555 < map->size &&
511 base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
512 {
513 if (flread(base+Size) != flread(base+Size + 0x100) ||
514 flread(base+Size + 1) != flread(base+Size + 0x101))
515 {
516 jedec_probe32(map,base+Size,priv);
517 }
518 }
519
520 // Reset.
521 flwrite(0xF0F0F0F0,0x555);
522
523 return 1;
524
525 #undef flread
526 #undef flwrite
527}
528
529/* Linear read. */
530static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
531 size_t *retlen, u_char *buf)
532{
533 struct map_info *map = mtd->priv;
534
535 map_copy_from(map, buf, from, len);
536 *retlen = len;
537 return 0;
538}
539
540/* Banked read. Take special care to jump past the holes in the bank
541 mapping. This version assumes symetry in the holes.. */
542static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
543 size_t *retlen, u_char *buf)
544{
545 struct map_info *map = mtd->priv;
546 struct jedec_private *priv = map->fldrv_priv;
547
548 *retlen = 0;
549 while (len > 0)
550 {
551 // Determine what bank and offset into that bank the first byte is
552 unsigned long bank = from & (~(priv->bank_fill[0]-1));
553 unsigned long offset = from & (priv->bank_fill[0]-1);
554 unsigned long get = len;
555 if (priv->bank_fill[0] - offset < len)
556 get = priv->bank_fill[0] - offset;
557
558 bank /= priv->bank_fill[0];
559 map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
560
561 len -= get;
562 *retlen += get;
563 from += get;
564 }
565 return 0;
566}
567
568/* Pass the flags value that the flash return before it re-entered read
569 mode. */
570static void jedec_flash_failed(unsigned char code)
571{
572 /* Bit 5 being high indicates that there was an internal device
573 failure, erasure time limits exceeded or something */
574 if ((code & (1 << 5)) != 0)
575 {
576 printk("mtd: Internal Flash failure\n");
577 return;
578 }
579 printk("mtd: Programming didn't take\n");
580}
581
582/* This uses the erasure function described in the AMD Flash Handbook,
583 it will work for flashes with a fixed sector size only. Flashes with
584 a selection of sector sizes (ie the AMD Am29F800B) will need a different
585 routine. This routine tries to parallize erasing multiple chips/sectors
586 where possible */
587static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
588{
589 // Does IO to the currently selected chip
590 #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
591 #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
592
593 unsigned long Time = 0;
594 unsigned long NoTime = 0;
595 unsigned long start = instr->addr, len = instr->len;
596 unsigned int I;
597 struct map_info *map = mtd->priv;
598 struct jedec_private *priv = map->fldrv_priv;
599
600 // Verify the arguments..
601 if (start + len > mtd->size ||
602 (start % mtd->erasesize) != 0 ||
603 (len % mtd->erasesize) != 0 ||
604 (len/mtd->erasesize) == 0)
605 return -EINVAL;
606
607 jedec_flash_chip_scan(priv,start,len);
608
609 // Start the erase sequence on each chip
610 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
611 {
612 unsigned long off;
613 struct jedec_flash_chip *chip = priv->chips + I;
614
615 if (chip->length == 0)
616 continue;
617
618 if (chip->start + chip->length > chip->size)
619 {
620 printk("DIE\n");
621 return -EIO;
622 }
623
624 flwrite(0xF0,chip->start + 0x555);
625 flwrite(0xAA,chip->start + 0x555);
626 flwrite(0x55,chip->start + 0x2AA);
627 flwrite(0x80,chip->start + 0x555);
628 flwrite(0xAA,chip->start + 0x555);
629 flwrite(0x55,chip->start + 0x2AA);
630
631 /* Once we start selecting the erase sectors the delay between each
632 command must not exceed 50us or it will immediately start erasing
633 and ignore the other sectors */
634 for (off = 0; off < len; off += chip->sectorsize)
635 {
636 // Check to make sure we didn't timeout
637 flwrite(0x30,chip->start + off);
638 if (off == 0)
639 continue;
640 if ((flread(chip->start + off) & (1 << 3)) != 0)
641 {
642 printk("mtd: Ack! We timed out the erase timer!\n");
643 return -EIO;
644 }
645 }
646 }
647
648 /* We could split this into a timer routine and return early, performing
649 background erasure.. Maybe later if the need warrents */
650
651 /* Poll the flash for erasure completion, specs say this can take as long
652 as 480 seconds to do all the sectors (for a 2 meg flash).
653 Erasure time is dependent on chip age, temp and wear.. */
654
655 /* This being a generic routine assumes a 32 bit bus. It does read32s
656 and bundles interleved chips into the same grouping. This will work
657 for all bus widths */
658 Time = 0;
659 NoTime = 0;
660 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
661 {
662 struct jedec_flash_chip *chip = priv->chips + I;
663 unsigned long off = 0;
664 unsigned todo[4] = {0,0,0,0};
665 unsigned todo_left = 0;
666 unsigned J;
667
668 if (chip->length == 0)
669 continue;
670
671 /* Find all chips in this data line, realistically this is all
672 or nothing up to the interleve count */
673 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
674 {
675 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
676 (chip->base & (~((1<<chip->addrshift)-1))))
677 {
678 todo_left++;
679 todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
680 }
681 }
682
683 /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
684 (short)todo[2],(short)todo[3]);
685 */
686 while (1)
687 {
688 __u32 Last[4];
689 unsigned long Count = 0;
690
691 /* During erase bit 7 is held low and bit 6 toggles, we watch this,
692 should it stop toggling or go high then the erase is completed,
693 or this is not really flash ;> */
694 switch (map->buswidth) {
695 case 1:
696 Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
697 Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
698 Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
699 break;
700 case 2:
701 Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
702 Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
703 Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
704 break;
705 case 3:
706 Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
707 Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
708 Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
709 break;
710 }
711 Count = 3;
712 while (todo_left != 0)
713 {
714 for (J = 0; J != 4; J++)
715 {
716 __u8 Byte1 = (Last[(Count-1)%4] >> (J*8)) & 0xFF;
717 __u8 Byte2 = (Last[(Count-2)%4] >> (J*8)) & 0xFF;
718 __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
719 if (todo[J] == 0)
720 continue;
721
722 if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
723 {
724// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
725 continue;
726 }
727
728 if (Byte1 == Byte2)
729 {
730 jedec_flash_failed(Byte3);
731 return -EIO;
732 }
733
734 todo[J] = 0;
735 todo_left--;
736 }
737
738/* if (NoTime == 0)
739 Time += HZ/10 - schedule_timeout(HZ/10);*/
740 NoTime = 0;
741
742 switch (map->buswidth) {
743 case 1:
744 Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
745 break;
746 case 2:
747 Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
748 break;
749 case 4:
750 Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
751 break;
752 }
753 Count++;
754
755/* // Count time, max of 15s per sector (according to AMD)
756 if (Time > 15*len/mtd->erasesize*HZ)
757 {
758 printk("mtd: Flash Erase Timed out\n");
759 return -EIO;
760 } */
761 }
762
763 // Skip to the next chip if we used chip erase
764 if (chip->length == chip->size)
765 off = chip->size;
766 else
767 off += chip->sectorsize;
768
769 if (off >= chip->length)
770 break;
771 NoTime = 1;
772 }
773
774 for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
775 {
776 if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
777 (chip->base & (~((1<<chip->addrshift)-1))))
778 priv->chips[J].length = 0;
779 }
780 }
781
782 //printk("done\n");
783 instr->state = MTD_ERASE_DONE;
784 mtd_erase_callback(instr);
785 return 0;
786
787 #undef flread
788 #undef flwrite
789}
790
791/* This is the simple flash writing function. It writes to every byte, in
792 sequence. It takes care of how to properly address the flash if
793 the flash is interleved. It can only be used if all the chips in the
794 array are identical!*/
795static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
796 size_t *retlen, const u_char *buf)
797{
798 /* Does IO to the currently selected chip. It takes the bank addressing
799 base (which is divisible by the chip size) adds the necessary lower bits
800 of addrshift (interleave index) and then adds the control register index. */
801 #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
802 #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
803
804 struct map_info *map = mtd->priv;
805 struct jedec_private *priv = map->fldrv_priv;
806 unsigned long base;
807 unsigned long off;
808 size_t save_len = len;
809
810 if (start + len > mtd->size)
811 return -EIO;
812
813 //printk("Here");
814
815 //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
816 while (len != 0)
817 {
818 struct jedec_flash_chip *chip = priv->chips;
819 unsigned long bank;
820 unsigned long boffset;
821
822 // Compute the base of the flash.
823 off = ((unsigned long)start) % (chip->size << chip->addrshift);
824 base = start - off;
825
826 // Perform banked addressing translation.
827 bank = base & (~(priv->bank_fill[0]-1));
828 boffset = base & (priv->bank_fill[0]-1);
829 bank = (bank/priv->bank_fill[0])*my_bank_size;
830 base = bank + boffset;
831
832 // printk("Flasing %X %X %X\n",base,chip->size,len);
833 // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
834
835 // Loop over this page
836 for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
837 {
838 unsigned char oldbyte = map_read8(map,base+off);
839 unsigned char Last[4];
840 unsigned long Count = 0;
841
842 if (oldbyte == *buf) {
843 // printk("oldbyte and *buf is %x,len is %x\n",oldbyte,len);
844 continue;
845 }
846 if (((~oldbyte) & *buf) != 0)
847 printk("mtd: warn: Trying to set a 0 to a 1\n");
848
849 // Write
850 flwrite(0xAA,0x555);
851 flwrite(0x55,0x2AA);
852 flwrite(0xA0,0x555);
853 map_write8(map,*buf,base + off);
854 Last[0] = map_read8(map,base + off);
855 Last[1] = map_read8(map,base + off);
856 Last[2] = map_read8(map,base + off);
857
858 /* Wait for the flash to finish the operation. We store the last 4
859 status bytes that have been retrieved so we can determine why
860 it failed. The toggle bits keep toggling when there is a
861 failure */
862 for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
863 Count < 10000; Count++)
864 Last[Count % 4] = map_read8(map,base + off);
865 if (Last[(Count - 1) % 4] != *buf)
866 {
867 jedec_flash_failed(Last[(Count - 3) % 4]);
868 return -EIO;
869 }
870 }
871 }
872 *retlen = save_len;
873 return 0;
874}
875
876/* This is used to enhance the speed of the erase routine,
877 when things are being done to multiple chips it is possible to
878 parallize the operations, particularly full memory erases of multi
879 chip memories benifit */
880static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start,
881 unsigned long len)
882{
883 unsigned int I;
884
885 // Zero the records
886 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
887 priv->chips[I].start = priv->chips[I].length = 0;
888
889 // Intersect the region with each chip
890 for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
891 {
892 struct jedec_flash_chip *chip = priv->chips + I;
893 unsigned long ByteStart;
894 unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
895
896 // End is before this chip or the start is after it
897 if (start+len < chip->offset ||
898 ChipEndByte - (1 << chip->addrshift) < start)
899 continue;
900
901 if (start < chip->offset)
902 {
903 ByteStart = chip->offset;
904 chip->start = 0;
905 }
906 else
907 {
908 chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
909 ByteStart = start;
910 }
911
912 if (start + len >= ChipEndByte)
913 chip->length = (ChipEndByte - ByteStart) >> chip->addrshift;
914 else
915 chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift;
916 }
917}
918
919int __init jedec_init(void)
920{
921 register_mtd_chip_driver(&jedec_chipdrv);
922 return 0;
923}
924
925static void __exit jedec_exit(void)
926{
927 unregister_mtd_chip_driver(&jedec_chipdrv);
928}
929
930module_init(jedec_init);
931module_exit(jedec_exit);
932
933MODULE_LICENSE("GPL");
934MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al.");
935MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips");
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
deleted file mode 100644
index c9cd3d21ccfa..000000000000
--- a/drivers/mtd/chips/sharp.c
+++ /dev/null
@@ -1,601 +0,0 @@
1/*
2 * MTD chip driver for pre-CFI Sharp flash chips
3 *
4 * Copyright 2000,2001 David A. Schleef <ds@schleef.org>
5 * 2000,2001 Lineo, Inc.
6 *
7 * $Id: sharp.c,v 1.17 2005/11/29 14:28:28 gleixner Exp $
8 *
9 * Devices supported:
10 * LH28F016SCT Symmetrical block flash memory, 2Mx8
11 * LH28F008SCT Symmetrical block flash memory, 1Mx8
12 *
13 * Documentation:
14 * http://www.sharpmeg.com/datasheets/memic/flashcmp/
15 * http://www.sharpmeg.com/datasheets/memic/flashcmp/01symf/16m/016sctl9.pdf
16 * 016sctl9.pdf
17 *
18 * Limitations:
19 * This driver only supports 4x1 arrangement of chips.
20 * Not tested on anything but PowerPC.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/mtd/map.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/cfi.h>
32#include <linux/delay.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35
36#define CMD_RESET 0xffffffff
37#define CMD_READ_ID 0x90909090
38#define CMD_READ_STATUS 0x70707070
39#define CMD_CLEAR_STATUS 0x50505050
40#define CMD_BLOCK_ERASE_1 0x20202020
41#define CMD_BLOCK_ERASE_2 0xd0d0d0d0
42#define CMD_BYTE_WRITE 0x40404040
43#define CMD_SUSPEND 0xb0b0b0b0
44#define CMD_RESUME 0xd0d0d0d0
45#define CMD_SET_BLOCK_LOCK_1 0x60606060
46#define CMD_SET_BLOCK_LOCK_2 0x01010101
47#define CMD_SET_MASTER_LOCK_1 0x60606060
48#define CMD_SET_MASTER_LOCK_2 0xf1f1f1f1
49#define CMD_CLEAR_BLOCK_LOCKS_1 0x60606060
50#define CMD_CLEAR_BLOCK_LOCKS_2 0xd0d0d0d0
51
52#define SR_READY 0x80808080 // 1 = ready
53#define SR_ERASE_SUSPEND 0x40404040 // 1 = block erase suspended
54#define SR_ERROR_ERASE 0x20202020 // 1 = error in block erase or clear lock bits
55#define SR_ERROR_WRITE 0x10101010 // 1 = error in byte write or set lock bit
56#define SR_VPP 0x08080808 // 1 = Vpp is low
57#define SR_WRITE_SUSPEND 0x04040404 // 1 = byte write suspended
58#define SR_PROTECT 0x02020202 // 1 = lock bit set
59#define SR_RESERVED 0x01010101
60
61#define SR_ERRORS (SR_ERROR_ERASE|SR_ERROR_WRITE|SR_VPP|SR_PROTECT)
62
63/* Configuration options */
64
65#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
66
67static struct mtd_info *sharp_probe(struct map_info *);
68
69static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
70
71static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
72 size_t *retlen, u_char *buf);
73static int sharp_write(struct mtd_info *mtd, loff_t from, size_t len,
74 size_t *retlen, const u_char *buf);
75static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr);
76static void sharp_sync(struct mtd_info *mtd);
77static int sharp_suspend(struct mtd_info *mtd);
78static void sharp_resume(struct mtd_info *mtd);
79static void sharp_destroy(struct mtd_info *mtd);
80
81static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
82 unsigned long adr, __u32 datum);
83static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
84 unsigned long adr);
85#ifdef AUTOUNLOCK
86static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
87 unsigned long adr);
88#endif
89
90
91struct sharp_info{
92 struct flchip *chip;
93 int bogus;
94 int chipshift;
95 int numchips;
96 struct flchip chips[1];
97};
98
99static void sharp_destroy(struct mtd_info *mtd);
100
101static struct mtd_chip_driver sharp_chipdrv = {
102 .probe = sharp_probe,
103 .destroy = sharp_destroy,
104 .name = "sharp",
105 .module = THIS_MODULE
106};
107
108
109static struct mtd_info *sharp_probe(struct map_info *map)
110{
111 struct mtd_info *mtd = NULL;
112 struct sharp_info *sharp = NULL;
113 int width;
114
115 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
116 if(!mtd)
117 return NULL;
118
119 sharp = kzalloc(sizeof(*sharp), GFP_KERNEL);
120 if(!sharp) {
121 kfree(mtd);
122 return NULL;
123 }
124
125 width = sharp_probe_map(map,mtd);
126 if(!width){
127 kfree(mtd);
128 kfree(sharp);
129 return NULL;
130 }
131
132 mtd->priv = map;
133 mtd->type = MTD_NORFLASH;
134 mtd->erase = sharp_erase;
135 mtd->read = sharp_read;
136 mtd->write = sharp_write;
137 mtd->sync = sharp_sync;
138 mtd->suspend = sharp_suspend;
139 mtd->resume = sharp_resume;
140 mtd->flags = MTD_CAP_NORFLASH;
141 mtd->writesize = 1;
142 mtd->name = map->name;
143
144 sharp->chipshift = 23;
145 sharp->numchips = 1;
146 sharp->chips[0].start = 0;
147 sharp->chips[0].state = FL_READY;
148 sharp->chips[0].mutex = &sharp->chips[0]._spinlock;
149 sharp->chips[0].word_write_time = 0;
150 init_waitqueue_head(&sharp->chips[0].wq);
151 spin_lock_init(&sharp->chips[0]._spinlock);
152
153 map->fldrv = &sharp_chipdrv;
154 map->fldrv_priv = sharp;
155
156 __module_get(THIS_MODULE);
157 return mtd;
158}
159
160static inline void sharp_send_cmd(struct map_info *map, unsigned long cmd, unsigned long adr)
161{
162 map_word map_cmd;
163 map_cmd.x[0] = cmd;
164 map_write(map, map_cmd, adr);
165}
166
167static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
168{
169 map_word tmp, read0, read4;
170 unsigned long base = 0;
171 int width = 4;
172
173 tmp = map_read(map, base+0);
174
175 sharp_send_cmd(map, CMD_READ_ID, base+0);
176
177 read0 = map_read(map, base+0);
178 read4 = map_read(map, base+4);
179 if(read0.x[0] == 0x89898989){
180 printk("Looks like sharp flash\n");
181 switch(read4.x[0]){
182 case 0xaaaaaaaa:
183 case 0xa0a0a0a0:
184 /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
185 /* a0 - LH28F016SCT-Z4 2Mx8, 32 64k blocks*/
186 mtd->erasesize = 0x10000 * width;
187 mtd->size = 0x200000 * width;
188 return width;
189 case 0xa6a6a6a6:
190 /* a6 - LH28F008SCT-L12 1Mx8, 16 64k blocks*/
191 /* a6 - LH28F008SCR-L85 1Mx8, 16 64k blocks*/
192 mtd->erasesize = 0x10000 * width;
193 mtd->size = 0x100000 * width;
194 return width;
195#if 0
196 case 0x00000000: /* unknown */
197 /* XX - LH28F004SCT 512kx8, 8 64k blocks*/
198 mtd->erasesize = 0x10000 * width;
199 mtd->size = 0x80000 * width;
200 return width;
201#endif
202 default:
203 printk("Sort-of looks like sharp flash, 0x%08lx 0x%08lx\n",
204 read0.x[0], read4.x[0]);
205 }
206 }else if((map_read(map, base+0).x[0] == CMD_READ_ID)){
207 /* RAM, probably */
208 printk("Looks like RAM\n");
209 map_write(map, tmp, base+0);
210 }else{
211 printk("Doesn't look like sharp flash, 0x%08lx 0x%08lx\n",
212 read0.x[0], read4.x[0]);
213 }
214
215 return 0;
216}
217
218/* This function returns with the chip->mutex lock held. */
219static int sharp_wait(struct map_info *map, struct flchip *chip)
220{
221 int i;
222 map_word status;
223 unsigned long timeo = jiffies + HZ;
224 DECLARE_WAITQUEUE(wait, current);
225 int adr = 0;
226
227retry:
228 spin_lock_bh(chip->mutex);
229
230 switch(chip->state){
231 case FL_READY:
232 sharp_send_cmd(map, CMD_READ_STATUS, adr);
233 chip->state = FL_STATUS;
234 case FL_STATUS:
235 for(i=0;i<100;i++){
236 status = map_read(map, adr);
237 if((status.x[0] & SR_READY)==SR_READY)
238 break;
239 udelay(1);
240 }
241 break;
242 default:
243 printk("Waiting for chip\n");
244
245 set_current_state(TASK_INTERRUPTIBLE);
246 add_wait_queue(&chip->wq, &wait);
247
248 spin_unlock_bh(chip->mutex);
249
250 schedule();
251 remove_wait_queue(&chip->wq, &wait);
252
253 if(signal_pending(current))
254 return -EINTR;
255
256 timeo = jiffies + HZ;
257
258 goto retry;
259 }
260
261 sharp_send_cmd(map, CMD_RESET, adr);
262
263 chip->state = FL_READY;
264
265 return 0;
266}
267
268static void sharp_release(struct flchip *chip)
269{
270 wake_up(&chip->wq);
271 spin_unlock_bh(chip->mutex);
272}
273
274static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
275 size_t *retlen, u_char *buf)
276{
277 struct map_info *map = mtd->priv;
278 struct sharp_info *sharp = map->fldrv_priv;
279 int chipnum;
280 int ret = 0;
281 int ofs = 0;
282
283 chipnum = (from >> sharp->chipshift);
284 ofs = from & ((1 << sharp->chipshift)-1);
285
286 *retlen = 0;
287
288 while(len){
289 unsigned long thislen;
290
291 if(chipnum>=sharp->numchips)
292 break;
293
294 thislen = len;
295 if(ofs+thislen >= (1<<sharp->chipshift))
296 thislen = (1<<sharp->chipshift) - ofs;
297
298 ret = sharp_wait(map,&sharp->chips[chipnum]);
299 if(ret<0)
300 break;
301
302 map_copy_from(map,buf,ofs,thislen);
303
304 sharp_release(&sharp->chips[chipnum]);
305
306 *retlen += thislen;
307 len -= thislen;
308 buf += thislen;
309
310 ofs = 0;
311 chipnum++;
312 }
313 return ret;
314}
315
316static int sharp_write(struct mtd_info *mtd, loff_t to, size_t len,
317 size_t *retlen, const u_char *buf)
318{
319 struct map_info *map = mtd->priv;
320 struct sharp_info *sharp = map->fldrv_priv;
321 int ret = 0;
322 int i,j;
323 int chipnum;
324 unsigned long ofs;
325 union { u32 l; unsigned char uc[4]; } tbuf;
326
327 *retlen = 0;
328
329 while(len){
330 tbuf.l = 0xffffffff;
331 chipnum = to >> sharp->chipshift;
332 ofs = to & ((1<<sharp->chipshift)-1);
333
334 j=0;
335 for(i=ofs&3;i<4 && len;i++){
336 tbuf.uc[i] = *buf;
337 buf++;
338 to++;
339 len--;
340 j++;
341 }
342 sharp_write_oneword(map, &sharp->chips[chipnum], ofs&~3, tbuf.l);
343 if(ret<0)
344 return ret;
345 (*retlen)+=j;
346 }
347
348 return 0;
349}
350
351static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
352 unsigned long adr, __u32 datum)
353{
354 int ret;
355 int timeo;
356 int try;
357 int i;
358 map_word data, status;
359
360 status.x[0] = 0;
361 ret = sharp_wait(map,chip);
362
363 for(try=0;try<10;try++){
364 sharp_send_cmd(map, CMD_BYTE_WRITE, adr);
365 /* cpu_to_le32 -> hack to fix the writel be->le conversion */
366 data.x[0] = cpu_to_le32(datum);
367 map_write(map, data, adr);
368
369 chip->state = FL_WRITING;
370
371 timeo = jiffies + (HZ/2);
372
373 sharp_send_cmd(map, CMD_READ_STATUS, adr);
374 for(i=0;i<100;i++){
375 status = map_read(map, adr);
376 if((status.x[0] & SR_READY) == SR_READY)
377 break;
378 }
379 if(i==100){
380 printk("sharp: timed out writing\n");
381 }
382
383 if(!(status.x[0] & SR_ERRORS))
384 break;
385
386 printk("sharp: error writing byte at addr=%08lx status=%08lx\n", adr, status.x[0]);
387
388 sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
389 }
390 sharp_send_cmd(map, CMD_RESET, adr);
391 chip->state = FL_READY;
392
393 wake_up(&chip->wq);
394 spin_unlock_bh(chip->mutex);
395
396 return 0;
397}
398
399static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr)
400{
401 struct map_info *map = mtd->priv;
402 struct sharp_info *sharp = map->fldrv_priv;
403 unsigned long adr,len;
404 int chipnum, ret=0;
405
406//printk("sharp_erase()\n");
407 if(instr->addr & (mtd->erasesize - 1))
408 return -EINVAL;
409 if(instr->len & (mtd->erasesize - 1))
410 return -EINVAL;
411 if(instr->len + instr->addr > mtd->size)
412 return -EINVAL;
413
414 chipnum = instr->addr >> sharp->chipshift;
415 adr = instr->addr & ((1<<sharp->chipshift)-1);
416 len = instr->len;
417
418 while(len){
419 ret = sharp_erase_oneblock(map, &sharp->chips[chipnum], adr);
420 if(ret)return ret;
421
422 adr += mtd->erasesize;
423 len -= mtd->erasesize;
424 if(adr >> sharp->chipshift){
425 adr = 0;
426 chipnum++;
427 if(chipnum>=sharp->numchips)
428 break;
429 }
430 }
431
432 instr->state = MTD_ERASE_DONE;
433 mtd_erase_callback(instr);
434
435 return 0;
436}
437
438static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
439 unsigned long adr)
440{
441 int ret;
442 unsigned long timeo;
443 map_word status;
444 DECLARE_WAITQUEUE(wait, current);
445
446 sharp_send_cmd(map, CMD_READ_STATUS, adr);
447 status = map_read(map, adr);
448
449 timeo = jiffies + HZ;
450
451 while(time_before(jiffies, timeo)){
452 sharp_send_cmd(map, CMD_READ_STATUS, adr);
453 status = map_read(map, adr);
454 if((status.x[0] & SR_READY)==SR_READY){
455 ret = 0;
456 goto out;
457 }
458 set_current_state(TASK_INTERRUPTIBLE);
459 add_wait_queue(&chip->wq, &wait);
460
461 //spin_unlock_bh(chip->mutex);
462
463 schedule_timeout(1);
464 schedule();
465 remove_wait_queue(&chip->wq, &wait);
466
467 //spin_lock_bh(chip->mutex);
468
469 if (signal_pending(current)){
470 ret = -EINTR;
471 goto out;
472 }
473
474 }
475 ret = -ETIME;
476out:
477 return ret;
478}
479
480static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
481 unsigned long adr)
482{
483 int ret;
484 //int timeo;
485 map_word status;
486 //int i;
487
488//printk("sharp_erase_oneblock()\n");
489
490#ifdef AUTOUNLOCK
491 /* This seems like a good place to do an unlock */
492 sharp_unlock_oneblock(map,chip,adr);
493#endif
494
495 sharp_send_cmd(map, CMD_BLOCK_ERASE_1, adr);
496 sharp_send_cmd(map, CMD_BLOCK_ERASE_2, adr);
497
498 chip->state = FL_ERASING;
499
500 ret = sharp_do_wait_for_ready(map,chip,adr);
501 if(ret<0)return ret;
502
503 sharp_send_cmd(map, CMD_READ_STATUS, adr);
504 status = map_read(map, adr);
505
506 if(!(status.x[0] & SR_ERRORS)){
507 sharp_send_cmd(map, CMD_RESET, adr);
508 chip->state = FL_READY;
509 //spin_unlock_bh(chip->mutex);
510 return 0;
511 }
512
513 printk("sharp: error erasing block at addr=%08lx status=%08lx\n", adr, status.x[0]);
514 sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
515
516 //spin_unlock_bh(chip->mutex);
517
518 return -EIO;
519}
520
521#ifdef AUTOUNLOCK
522static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
523 unsigned long adr)
524{
525 int i;
526 map_word status;
527
528 sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_1, adr);
529 sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_2, adr);
530
531 udelay(100);
532
533 status = map_read(map, adr);
534 printk("status=%08lx\n", status.x[0]);
535
536 for(i=0;i<1000;i++){
537 //sharp_send_cmd(map, CMD_READ_STATUS, adr);
538 status = map_read(map, adr);
539 if((status.x[0] & SR_READY) == SR_READY)
540 break;
541 udelay(100);
542 }
543 if(i==1000){
544 printk("sharp: timed out unlocking block\n");
545 }
546
547 if(!(status.x[0] & SR_ERRORS)){
548 sharp_send_cmd(map, CMD_RESET, adr);
549 chip->state = FL_READY;
550 return;
551 }
552
553 printk("sharp: error unlocking block at addr=%08lx status=%08lx\n", adr, status.x[0]);
554 sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
555}
556#endif
557
558static void sharp_sync(struct mtd_info *mtd)
559{
560 //printk("sharp_sync()\n");
561}
562
563static int sharp_suspend(struct mtd_info *mtd)
564{
565 printk("sharp_suspend()\n");
566 return -EINVAL;
567}
568
569static void sharp_resume(struct mtd_info *mtd)
570{
571 printk("sharp_resume()\n");
572
573}
574
575static void sharp_destroy(struct mtd_info *mtd)
576{
577 printk("sharp_destroy()\n");
578
579}
580
581static int __init sharp_probe_init(void)
582{
583 printk("MTD Sharp chip driver <ds@lineo.com>\n");
584
585 register_mtd_chip_driver(&sharp_chipdrv);
586
587 return 0;
588}
589
590static void __exit sharp_probe_exit(void)
591{
592 unregister_mtd_chip_driver(&sharp_chipdrv);
593}
594
595module_init(sharp_probe_init);
596module_exit(sharp_probe_exit);
597
598
599MODULE_LICENSE("GPL");
600MODULE_AUTHOR("David Schleef <ds@schleef.org>");
601MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips");
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index fc4cc8ba9e29..be4b9948c762 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -373,7 +373,7 @@ static inline void kill_final_newline(char *str)
373 373
374#ifndef MODULE 374#ifndef MODULE
375static int block2mtd_init_called = 0; 375static int block2mtd_init_called = 0;
376static __initdata char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */ 376static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
377#endif 377#endif
378 378
379 379
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index d990d8141ef5..b665e4ac2208 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -60,7 +60,7 @@ config MTD_PHYSMAP_BANKWIDTH
60 (i.e., run-time calling physmap_configure()). 60 (i.e., run-time calling physmap_configure()).
61 61
62config MTD_PHYSMAP_OF 62config MTD_PHYSMAP_OF
63 tristate "Flash device in physical memory map based on OF descirption" 63 tristate "Flash device in physical memory map based on OF description"
64 depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM) 64 depends on PPC_OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM)
65 help 65 help
66 This provides a 'mapping' driver which allows the NOR Flash and 66 This provides a 'mapping' driver which allows the NOR Flash and
@@ -358,22 +358,6 @@ config MTD_CFI_FLAGADM
358 Mapping for the Flaga digital module. If you don't have one, ignore 358 Mapping for the Flaga digital module. If you don't have one, ignore
359 this setting. 359 this setting.
360 360
361config MTD_BEECH
362 tristate "CFI Flash device mapped on IBM 405LP Beech"
363 depends on MTD_CFI && BEECH
364 help
365 This enables access routines for the flash chips on the IBM
366 405LP Beech board. If you have one of these boards and would like
367 to use the flash chips on it, say 'Y'.
368
369config MTD_ARCTIC
370 tristate "CFI Flash device mapped on IBM 405LP Arctic"
371 depends on MTD_CFI && ARCTIC2
372 help
373 This enables access routines for the flash chips on the IBM 405LP
374 Arctic board. If you have one of these boards and would like to
375 use the flash chips on it, say 'Y'.
376
377config MTD_WALNUT 361config MTD_WALNUT
378 tristate "Flash device mapped on IBM 405GP Walnut" 362 tristate "Flash device mapped on IBM 405GP Walnut"
379 depends on MTD_JEDECPROBE && WALNUT 363 depends on MTD_JEDECPROBE && WALNUT
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index de036c5e6139..3acbb5d01ca4 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -58,8 +58,6 @@ obj-$(CONFIG_MTD_NETtel) += nettel.o
58obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o 58obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
59obj-$(CONFIG_MTD_EBONY) += ebony.o 59obj-$(CONFIG_MTD_EBONY) += ebony.o
60obj-$(CONFIG_MTD_OCOTEA) += ocotea.o 60obj-$(CONFIG_MTD_OCOTEA) += ocotea.o
61obj-$(CONFIG_MTD_BEECH) += beech-mtd.o
62obj-$(CONFIG_MTD_ARCTIC) += arctic-mtd.o
63obj-$(CONFIG_MTD_WALNUT) += walnut.o 61obj-$(CONFIG_MTD_WALNUT) += walnut.o
64obj-$(CONFIG_MTD_H720X) += h720x-flash.o 62obj-$(CONFIG_MTD_H720X) += h720x-flash.o
65obj-$(CONFIG_MTD_SBC8240) += sbc8240.o 63obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c
deleted file mode 100644
index 2cc902436275..000000000000
--- a/drivers/mtd/maps/arctic-mtd.c
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 * $Id: arctic-mtd.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
3 *
4 * drivers/mtd/maps/arctic-mtd.c MTD mappings and partition tables for
5 * IBM 405LP Arctic boards.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (C) 2002, International Business Machines Corporation
22 * All Rights Reserved.
23 *
24 * Bishop Brock
25 * IBM Research, Austin Center for Low-Power Computing
26 * bcbrock@us.ibm.com
27 * March 2002
28 *
29 * modified for Arctic by,
30 * David Gibson
31 * IBM OzLabs, Canberra, Australia
32 * <arctic@gibson.dropbear.id.au>
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/init.h>
39
40#include <linux/mtd/mtd.h>
41#include <linux/mtd/map.h>
42#include <linux/mtd/partitions.h>
43
44#include <asm/io.h>
45#include <asm/ibm4xx.h>
46
47/*
48 * 0 : 0xFE00 0000 - 0xFEFF FFFF : Filesystem 1 (16MiB)
49 * 1 : 0xFF00 0000 - 0xFF4F FFFF : kernel (5.12MiB)
50 * 2 : 0xFF50 0000 - 0xFFF5 FFFF : Filesystem 2 (10.624MiB) (if non-XIP)
51 * 3 : 0xFFF6 0000 - 0xFFFF FFFF : PIBS Firmware (640KiB)
52 */
53
54#define FFS1_SIZE 0x01000000 /* 16MiB */
55#define KERNEL_SIZE 0x00500000 /* 5.12MiB */
56#define FFS2_SIZE 0x00a60000 /* 10.624MiB */
57#define FIRMWARE_SIZE 0x000a0000 /* 640KiB */
58
59
60#define NAME "Arctic Linux Flash"
61#define PADDR SUBZERO_BOOTFLASH_PADDR
62#define BUSWIDTH 2
63#define SIZE SUBZERO_BOOTFLASH_SIZE
64#define PARTITIONS 4
65
66/* Flash memories on these boards are memory resources, accessed big-endian. */
67
68{
69 /* do nothing for now */
70}
71
72static struct map_info arctic_mtd_map = {
73 .name = NAME,
74 .size = SIZE,
75 .bankwidth = BUSWIDTH,
76 .phys = PADDR,
77};
78
79static struct mtd_info *arctic_mtd;
80
81static struct mtd_partition arctic_partitions[PARTITIONS] = {
82 { .name = "Filesystem",
83 .size = FFS1_SIZE,
84 .offset = 0,},
85 { .name = "Kernel",
86 .size = KERNEL_SIZE,
87 .offset = FFS1_SIZE,},
88 { .name = "Filesystem",
89 .size = FFS2_SIZE,
90 .offset = FFS1_SIZE + KERNEL_SIZE,},
91 { .name = "Firmware",
92 .size = FIRMWARE_SIZE,
93 .offset = SUBZERO_BOOTFLASH_SIZE - FIRMWARE_SIZE,},
94};
95
96static int __init
97init_arctic_mtd(void)
98{
99 int err;
100
101 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
102
103 arctic_mtd_map.virt = ioremap(PADDR, SIZE);
104
105 if (!arctic_mtd_map.virt) {
106 printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
107 return -EIO;
108 }
109 simple_map_init(&arctic_mtd_map);
110
111 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
112 arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
113
114 if (!arctic_mtd) {
115 iounmap(arctic_mtd_map.virt);
116 return -ENXIO;
117 }
118
119 arctic_mtd->owner = THIS_MODULE;
120
121 err = add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
122 if (err) {
123 printk("%s: add_mtd_partitions failed\n", NAME);
124 iounmap(arctic_mtd_map.virt);
125 }
126
127 return err;
128}
129
130static void __exit
131cleanup_arctic_mtd(void)
132{
133 if (arctic_mtd) {
134 del_mtd_partitions(arctic_mtd);
135 map_destroy(arctic_mtd);
136 iounmap((void *) arctic_mtd_map.virt);
137 }
138}
139
140module_init(init_arctic_mtd);
141module_exit(cleanup_arctic_mtd);
142
143MODULE_LICENSE("GPL");
144MODULE_AUTHOR("David Gibson <arctic@gibson.dropbear.id.au>");
145MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Arctic boards");
diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c
deleted file mode 100644
index d76d5981b863..000000000000
--- a/drivers/mtd/maps/beech-mtd.c
+++ /dev/null
@@ -1,122 +0,0 @@
1/*
2 * $Id: beech-mtd.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
3 *
4 * drivers/mtd/maps/beech-mtd.c MTD mappings and partition tables for
5 * IBM 405LP Beech boards.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Copyright (C) 2002, International Business Machines Corporation
22 * All Rights Reserved.
23 *
24 * Bishop Brock
25 * IBM Research, Austin Center for Low-Power Computing
26 * bcbrock@us.ibm.com
27 * March 2002
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/init.h>
35
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/partitions.h>
39
40#include <asm/io.h>
41#include <asm/ibm4xx.h>
42
43#define NAME "Beech Linux Flash"
44#define PADDR BEECH_BIGFLASH_PADDR
45#define SIZE BEECH_BIGFLASH_SIZE
46#define BUSWIDTH 1
47
48/* Flash memories on these boards are memory resources, accessed big-endian. */
49
50
51static struct map_info beech_mtd_map = {
52 .name = NAME,
53 .size = SIZE,
54 .bankwidth = BUSWIDTH,
55 .phys = PADDR
56};
57
58static struct mtd_info *beech_mtd;
59
60static struct mtd_partition beech_partitions[2] = {
61 {
62 .name = "Linux Kernel",
63 .size = BEECH_KERNEL_SIZE,
64 .offset = BEECH_KERNEL_OFFSET
65 }, {
66 .name = "Free Area",
67 .size = BEECH_FREE_AREA_SIZE,
68 .offset = BEECH_FREE_AREA_OFFSET
69 }
70};
71
72static int __init
73init_beech_mtd(void)
74{
75 int err;
76
77 printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
78
79 beech_mtd_map.virt = ioremap(PADDR, SIZE);
80
81 if (!beech_mtd_map.virt) {
82 printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
83 return -EIO;
84 }
85
86 simple_map_init(&beech_mtd_map);
87
88 printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
89 beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
90
91 if (!beech_mtd) {
92 iounmap(beech_mtd_map.virt);
93 return -ENXIO;
94 }
95
96 beech_mtd->owner = THIS_MODULE;
97
98 err = add_mtd_partitions(beech_mtd, beech_partitions, 2);
99 if (err) {
100 printk("%s: add_mtd_partitions failed\n", NAME);
101 iounmap(beech_mtd_map.virt);
102 }
103
104 return err;
105}
106
107static void __exit
108cleanup_beech_mtd(void)
109{
110 if (beech_mtd) {
111 del_mtd_partitions(beech_mtd);
112 map_destroy(beech_mtd);
113 iounmap((void *) beech_mtd_map.virt);
114 }
115}
116
117module_init(init_beech_mtd);
118module_exit(cleanup_beech_mtd);
119
120MODULE_LICENSE("GPL");
121MODULE_AUTHOR("Bishop Brock <bcbrock@us.ibm.com>");
122MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Beech boards");
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 9f53c655af3a..7b96cd02f82b 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -358,7 +358,7 @@ int __init nettel_init(void)
358 /* Turn other PAR off so the first probe doesn't find it */ 358 /* Turn other PAR off so the first probe doesn't find it */
359 *intel1par = 0; 359 *intel1par = 0;
360 360
361 /* Probe for the the size of the first Intel flash */ 361 /* Probe for the size of the first Intel flash */
362 nettel_intel_map.size = maxsize; 362 nettel_intel_map.size = maxsize;
363 nettel_intel_map.phys = intel0addr; 363 nettel_intel_map.phys = intel0addr;
364 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); 364 nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 72107dc06d67..bbb42c35b69b 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -186,7 +186,7 @@ static int __devinit of_physmap_probe(struct of_device *dev, const struct of_dev
186 else { 186 else {
187 if (strcmp(of_probe, "ROM")) 187 if (strcmp(of_probe, "ROM"))
188 dev_dbg(&dev->dev, "map_probe: don't know probe type " 188 dev_dbg(&dev->dev, "map_probe: don't know probe type "
189 "'%s', mapping as rom\n"); 189 "'%s', mapping as rom\n", of_probe);
190 info->mtd = do_map_probe("mtd_rom", &info->map); 190 info->mtd = do_map_probe("mtd_rom", &info->map);
191 } 191 }
192 if (info->mtd == NULL) { 192 if (info->mtd == NULL) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 1af989023c66..9c6236852942 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -347,7 +347,6 @@ int add_mtd_partitions(struct mtd_info *master,
347 slave->mtd.subpage_sft = master->subpage_sft; 347 slave->mtd.subpage_sft = master->subpage_sft;
348 348
349 slave->mtd.name = parts[i].name; 349 slave->mtd.name = parts[i].name;
350 slave->mtd.bank_size = master->bank_size;
351 slave->mtd.owner = master->owner; 350 slave->mtd.owner = master->owner;
352 351
353 slave->mtd.read = part_read; 352 slave->mtd.read = part_read;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index d05873b8c155..f1d60b6f048e 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -232,11 +232,13 @@ config MTD_NAND_BASLER_EXCITE
232 will be named "excite_nandflash.ko". 232 will be named "excite_nandflash.ko".
233 233
234config MTD_NAND_CAFE 234config MTD_NAND_CAFE
235 tristate "NAND support for OLPC CAFÉ chip" 235 tristate "NAND support for OLPC CAFÉ chip"
236 depends on PCI 236 depends on PCI
237 help 237 select REED_SOLOMON
238 Use NAND flash attached to the CAFÉ chip designed for the $100 238 select REED_SOLOMON_DEC16
239 laptop. 239 help
240 Use NAND flash attached to the CAFÉ chip designed for the $100
241 laptop.
240 242
241config MTD_NAND_CS553X 243config MTD_NAND_CS553X
242 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)" 244 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
@@ -270,4 +272,13 @@ config MTD_NAND_NANDSIM
270 The simulator may simulate various NAND flash chips for the 272 The simulator may simulate various NAND flash chips for the
271 MTD nand layer. 273 MTD nand layer.
272 274
275config MTD_NAND_PLATFORM
276 tristate "Support for generic platform NAND driver"
277 depends on MTD_NAND
278 help
279 This implements a generic NAND driver for on-SOC platform
280 devices. You will need to provide platform-specific functions
281 via platform_data.
282
283
273endif # MTD_NAND 284endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6872031a3fb2..edba1db14bfa 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -26,6 +26,6 @@ obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
26obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o 26obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
29obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
29 30
30nand-objs := nand_base.o nand_bbt.o 31nand-objs := nand_base.o nand_bbt.o
31cafe_nand-objs := cafe.o cafe_ecc.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index 14b80cc90a7b..512e999177f7 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -82,6 +82,10 @@ static void at91_nand_disable(struct at91_nand_host *host)
82 at91_set_gpio_value(host->board->enable_pin, 1); 82 at91_set_gpio_value(host->board->enable_pin, 1);
83} 83}
84 84
85#ifdef CONFIG_MTD_PARTITIONS
86const char *part_probes[] = { "cmdlinepart", NULL };
87#endif
88
85/* 89/*
86 * Probe for the NAND device. 90 * Probe for the NAND device.
87 */ 91 */
@@ -151,6 +155,12 @@ static int __init at91_nand_probe(struct platform_device *pdev)
151#ifdef CONFIG_MTD_PARTITIONS 155#ifdef CONFIG_MTD_PARTITIONS
152 if (host->board->partition_info) 156 if (host->board->partition_info)
153 partitions = host->board->partition_info(mtd->size, &num_partitions); 157 partitions = host->board->partition_info(mtd->size, &num_partitions);
158#ifdef CONFIG_MTD_CMDLINE_PARTS
159 else {
160 mtd->name = "at91_nand";
161 num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
162 }
163#endif
154 164
155 if ((!partitions) || (num_partitions == 0)) { 165 if ((!partitions) || (num_partitions == 0)) {
156 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); 166 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/cafe_ecc.c b/drivers/mtd/nand/cafe_ecc.c
deleted file mode 100644
index ea5c8491d2c5..000000000000
--- a/drivers/mtd/nand/cafe_ecc.c
+++ /dev/null
@@ -1,1381 +0,0 @@
1/* Error correction for CAFÉ NAND controller
2 *
3 * © 2006 Marvell, Inc.
4 * Author: Tom Chiou
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59
18 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24
25static unsigned short gf4096_mul(unsigned short, unsigned short);
26static unsigned short gf64_mul(unsigned short, unsigned short);
27static unsigned short gf4096_inv(unsigned short);
28static unsigned short err_pos(unsigned short);
29static void find_4bit_err_coefs(unsigned short, unsigned short, unsigned short,
30 unsigned short, unsigned short, unsigned short,
31 unsigned short, unsigned short, unsigned short *);
32static void zero_4x5_col3(unsigned short[4][5]);
33static void zero_4x5_col2(unsigned short[4][5]);
34static void zero_4x5_col1(unsigned short[4][5]);
35static void swap_4x5_rows(unsigned short[4][5], int, int, int);
36static void swap_2x3_rows(unsigned short m[2][3]);
37static void solve_4x5(unsigned short m[4][5], unsigned short *, int *);
38static void sort_coefs(int *, unsigned short *, int);
39static void find_4bit_err_pats(unsigned short, unsigned short, unsigned short,
40 unsigned short, unsigned short, unsigned short,
41 unsigned short, unsigned short, unsigned short *);
42static void find_3bit_err_coefs(unsigned short, unsigned short, unsigned short,
43 unsigned short, unsigned short, unsigned short,
44 unsigned short *);
45static void zero_3x4_col2(unsigned short[3][4]);
46static void zero_3x4_col1(unsigned short[3][4]);
47static void swap_3x4_rows(unsigned short[3][4], int, int, int);
48static void solve_3x4(unsigned short[3][4], unsigned short *, int *);
49static void find_3bit_err_pats(unsigned short, unsigned short, unsigned short,
50 unsigned short, unsigned short, unsigned short,
51 unsigned short *);
52
53static void find_2bit_err_pats(unsigned short, unsigned short, unsigned short,
54 unsigned short, unsigned short *);
55static void find_2x2_soln(unsigned short, unsigned short, unsigned short,
56 unsigned short, unsigned short, unsigned short,
57 unsigned short *);
58static void solve_2x3(unsigned short[2][3], unsigned short *);
59static int chk_no_err_only(unsigned short *, unsigned short *);
60static int chk_1_err_only(unsigned short *, unsigned short *);
61static int chk_2_err_only(unsigned short *, unsigned short *);
62static int chk_3_err_only(unsigned short *, unsigned short *);
63static int chk_4_err_only(unsigned short *, unsigned short *);
64
65static unsigned short gf64_mul(unsigned short a, unsigned short b)
66{
67 unsigned short tmp1, tmp2, tmp3, tmp4, tmp5;
68 unsigned short c_bit0, c_bit1, c_bit2, c_bit3, c_bit4, c_bit5, c;
69
70 tmp1 = ((a) ^ (a >> 5));
71 tmp2 = ((a >> 4) ^ (a >> 5));
72 tmp3 = ((a >> 3) ^ (a >> 4));
73 tmp4 = ((a >> 2) ^ (a >> 3));
74 tmp5 = ((a >> 1) ^ (a >> 2));
75
76 c_bit0 = ((a & b) ^ ((a >> 5) & (b >> 1)) ^ ((a >> 4) & (b >> 2)) ^
77 ((a >> 3) & (b >> 3)) ^ ((a >> 2) & (b >> 4)) ^ ((a >> 1) & (b >> 5))) & 0x1;
78
79 c_bit1 = (((a >> 1) & b) ^ (tmp1 & (b >> 1)) ^ (tmp2 & (b >> 2)) ^
80 (tmp3 & (b >> 3)) ^ (tmp4 & (b >> 4)) ^ (tmp5 & (b >> 5))) & 0x1;
81
82 c_bit2 = (((a >> 2) & b) ^ ((a >> 1) & (b >> 1)) ^ (tmp1 & (b >> 2)) ^
83 (tmp2 & (b >> 3)) ^ (tmp3 & (b >> 4)) ^ (tmp4 & (b >> 5))) & 0x1;
84
85 c_bit3 = (((a >> 3) & b) ^ ((a >> 2) & (b >> 1)) ^ ((a >> 1) & (b >> 2)) ^
86 (tmp1 & (b >> 3)) ^ (tmp2 & (b >> 4)) ^ (tmp3 & (b >> 5))) & 0x1;
87
88 c_bit4 = (((a >> 4) & b) ^ ((a >> 3) & (b >> 1)) ^ ((a >> 2) & (b >> 2)) ^
89 ((a >> 1) & (b >> 3)) ^ (tmp1 & (b >> 4)) ^ (tmp2 & (b >> 5))) & 0x1;
90
91 c_bit5 = (((a >> 5) & b) ^ ((a >> 4) & (b >> 1)) ^ ((a >> 3) & (b >> 2)) ^
92 ((a >> 2) & (b >> 3)) ^ ((a >> 1) & (b >> 4)) ^ (tmp1 & (b >> 5))) & 0x1;
93
94 c = c_bit0 | (c_bit1 << 1) | (c_bit2 << 2) | (c_bit3 << 3) | (c_bit4 << 4) | (c_bit5 << 5);
95
96 return c;
97}
98
99static unsigned short gf4096_mul(unsigned short a, unsigned short b)
100{
101 unsigned short ah, al, bh, bl, alxah, blxbh, ablh, albl, ahbh, ahbhB, c;
102
103 ah = (a >> 6) & 0x3f;
104 al = a & 0x3f;
105 bh = (b >> 6) & 0x3f;
106 bl = b & 0x3f;
107 alxah = al ^ ah;
108 blxbh = bl ^ bh;
109
110 ablh = gf64_mul(alxah, blxbh);
111 albl = gf64_mul(al, bl);
112 ahbh = gf64_mul(ah, bh);
113
114 ahbhB = ((ahbh & 0x1) << 5) |
115 ((ahbh & 0x20) >> 1) |
116 ((ahbh & 0x10) >> 1) | ((ahbh & 0x8) >> 1) | ((ahbh & 0x4) >> 1) | (((ahbh >> 1) ^ ahbh) & 0x1);
117
118 c = ((ablh ^ albl) << 6) | (ahbhB ^ albl);
119 return c;
120}
121
122static void find_2bit_err_pats(unsigned short s0, unsigned short s1, unsigned short r0, unsigned short r1, unsigned short *pats)
123{
124 find_2x2_soln(0x1, 0x1, r0, r1, s0, s1, pats);
125}
126
127static void find_3bit_err_coefs(unsigned short s0, unsigned short s1,
128 unsigned short s2, unsigned short s3, unsigned short s4, unsigned short s5, unsigned short *coefs)
129{
130 unsigned short m[3][4];
131 int row_order[3];
132
133 row_order[0] = 0;
134 row_order[1] = 1;
135 row_order[2] = 2;
136 m[0][0] = s2;
137 m[0][1] = s1;
138 m[0][2] = s0;
139 m[0][3] = s3;
140 m[1][0] = s3;
141 m[1][1] = s2;
142 m[1][2] = s1;
143 m[1][3] = s4;
144 m[2][0] = s4;
145 m[2][1] = s3;
146 m[2][2] = s2;
147 m[2][3] = s5;
148
149 if (m[0][2] != 0x0) {
150 zero_3x4_col2(m);
151 } else if (m[1][2] != 0x0) {
152 swap_3x4_rows(m, 0, 1, 4);
153 zero_3x4_col2(m);
154 } else if (m[2][2] != 0x0) {
155 swap_3x4_rows(m, 0, 2, 4);
156 zero_3x4_col2(m);
157 } else {
158 printk(KERN_ERR "Error: find_3bit_err_coefs, s0,s1,s2 all zeros!\n");
159 }
160
161 if (m[1][1] != 0x0) {
162 zero_3x4_col1(m);
163 } else if (m[2][1] != 0x0) {
164 swap_3x4_rows(m, 1, 2, 4);
165 zero_3x4_col1(m);
166 } else {
167 printk(KERN_ERR "Error: find_3bit_err_coefs, cannot resolve col 1!\n");
168 }
169
170 /* solve coefs */
171 solve_3x4(m, coefs, row_order);
172}
173
174static void zero_3x4_col2(unsigned short m[3][4])
175{
176 unsigned short minv1, minv2;
177
178 minv1 = gf4096_mul(m[1][2], gf4096_inv(m[0][2]));
179 minv2 = gf4096_mul(m[2][2], gf4096_inv(m[0][2]));
180 m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv1);
181 m[1][1] = m[1][1] ^ gf4096_mul(m[0][1], minv1);
182 m[1][3] = m[1][3] ^ gf4096_mul(m[0][3], minv1);
183 m[2][0] = m[2][0] ^ gf4096_mul(m[0][0], minv2);
184 m[2][1] = m[2][1] ^ gf4096_mul(m[0][1], minv2);
185 m[2][3] = m[2][3] ^ gf4096_mul(m[0][3], minv2);
186}
187
188static void zero_3x4_col1(unsigned short m[3][4])
189{
190 unsigned short minv;
191 minv = gf4096_mul(m[2][1], gf4096_inv(m[1][1]));
192 m[2][0] = m[2][0] ^ gf4096_mul(m[1][0], minv);
193 m[2][3] = m[2][3] ^ gf4096_mul(m[1][3], minv);
194}
195
196static void swap_3x4_rows(unsigned short m[3][4], int i, int j, int col_width)
197{
198 unsigned short tmp0;
199 int cnt;
200 for (cnt = 0; cnt < col_width; cnt++) {
201 tmp0 = m[i][cnt];
202 m[i][cnt] = m[j][cnt];
203 m[j][cnt] = tmp0;
204 }
205}
206
207static void solve_3x4(unsigned short m[3][4], unsigned short *coefs, int *row_order)
208{
209 unsigned short tmp[3];
210 tmp[0] = gf4096_mul(m[2][3], gf4096_inv(m[2][0]));
211 tmp[1] = gf4096_mul((gf4096_mul(tmp[0], m[1][0]) ^ m[1][3]), gf4096_inv(m[1][1]));
212 tmp[2] = gf4096_mul((gf4096_mul(tmp[0], m[0][0]) ^ gf4096_mul(tmp[1], m[0][1]) ^ m[0][3]), gf4096_inv(m[0][2]));
213 sort_coefs(row_order, tmp, 3);
214 coefs[0] = tmp[0];
215 coefs[1] = tmp[1];
216 coefs[2] = tmp[2];
217}
218
219static void find_3bit_err_pats(unsigned short s0, unsigned short s1,
220 unsigned short s2, unsigned short r0,
221 unsigned short r1, unsigned short r2,
222 unsigned short *pats)
223{
224 find_2x2_soln(r0 ^ r2, r1 ^ r2,
225 gf4096_mul(r0, r0 ^ r2), gf4096_mul(r1, r1 ^ r2),
226 gf4096_mul(s0, r2) ^ s1, gf4096_mul(s1, r2) ^ s2, pats);
227 pats[2] = s0 ^ pats[0] ^ pats[1];
228}
229
230static void find_4bit_err_coefs(unsigned short s0, unsigned short s1,
231 unsigned short s2, unsigned short s3,
232 unsigned short s4, unsigned short s5,
233 unsigned short s6, unsigned short s7,
234 unsigned short *coefs)
235{
236 unsigned short m[4][5];
237 int row_order[4];
238
239 row_order[0] = 0;
240 row_order[1] = 1;
241 row_order[2] = 2;
242 row_order[3] = 3;
243
244 m[0][0] = s3;
245 m[0][1] = s2;
246 m[0][2] = s1;
247 m[0][3] = s0;
248 m[0][4] = s4;
249 m[1][0] = s4;
250 m[1][1] = s3;
251 m[1][2] = s2;
252 m[1][3] = s1;
253 m[1][4] = s5;
254 m[2][0] = s5;
255 m[2][1] = s4;
256 m[2][2] = s3;
257 m[2][3] = s2;
258 m[2][4] = s6;
259 m[3][0] = s6;
260 m[3][1] = s5;
261 m[3][2] = s4;
262 m[3][3] = s3;
263 m[3][4] = s7;
264
265 if (m[0][3] != 0x0) {
266 zero_4x5_col3(m);
267 } else if (m[1][3] != 0x0) {
268 swap_4x5_rows(m, 0, 1, 5);
269 zero_4x5_col3(m);
270 } else if (m[2][3] != 0x0) {
271 swap_4x5_rows(m, 0, 2, 5);
272 zero_4x5_col3(m);
273 } else if (m[3][3] != 0x0) {
274 swap_4x5_rows(m, 0, 3, 5);
275 zero_4x5_col3(m);
276 } else {
277 printk(KERN_ERR "Error: find_4bit_err_coefs, s0,s1,s2,s3 all zeros!\n");
278 }
279
280 if (m[1][2] != 0x0) {
281 zero_4x5_col2(m);
282 } else if (m[2][2] != 0x0) {
283 swap_4x5_rows(m, 1, 2, 5);
284 zero_4x5_col2(m);
285 } else if (m[3][2] != 0x0) {
286 swap_4x5_rows(m, 1, 3, 5);
287 zero_4x5_col2(m);
288 } else {
289 printk(KERN_ERR "Error: find_4bit_err_coefs, cannot resolve col 2!\n");
290 }
291
292 if (m[2][1] != 0x0) {
293 zero_4x5_col1(m);
294 } else if (m[3][1] != 0x0) {
295 swap_4x5_rows(m, 2, 3, 5);
296 zero_4x5_col1(m);
297 } else {
298 printk(KERN_ERR "Error: find_4bit_err_coefs, cannot resolve col 1!\n");
299 }
300
301 solve_4x5(m, coefs, row_order);
302}
303
304static void zero_4x5_col3(unsigned short m[4][5])
305{
306 unsigned short minv1, minv2, minv3;
307
308 minv1 = gf4096_mul(m[1][3], gf4096_inv(m[0][3]));
309 minv2 = gf4096_mul(m[2][3], gf4096_inv(m[0][3]));
310 minv3 = gf4096_mul(m[3][3], gf4096_inv(m[0][3]));
311
312 m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv1);
313 m[1][1] = m[1][1] ^ gf4096_mul(m[0][1], minv1);
314 m[1][2] = m[1][2] ^ gf4096_mul(m[0][2], minv1);
315 m[1][4] = m[1][4] ^ gf4096_mul(m[0][4], minv1);
316 m[2][0] = m[2][0] ^ gf4096_mul(m[0][0], minv2);
317 m[2][1] = m[2][1] ^ gf4096_mul(m[0][1], minv2);
318 m[2][2] = m[2][2] ^ gf4096_mul(m[0][2], minv2);
319 m[2][4] = m[2][4] ^ gf4096_mul(m[0][4], minv2);
320 m[3][0] = m[3][0] ^ gf4096_mul(m[0][0], minv3);
321 m[3][1] = m[3][1] ^ gf4096_mul(m[0][1], minv3);
322 m[3][2] = m[3][2] ^ gf4096_mul(m[0][2], minv3);
323 m[3][4] = m[3][4] ^ gf4096_mul(m[0][4], minv3);
324}
325
326static void zero_4x5_col2(unsigned short m[4][5])
327{
328 unsigned short minv2, minv3;
329
330 minv2 = gf4096_mul(m[2][2], gf4096_inv(m[1][2]));
331 minv3 = gf4096_mul(m[3][2], gf4096_inv(m[1][2]));
332
333 m[2][0] = m[2][0] ^ gf4096_mul(m[1][0], minv2);
334 m[2][1] = m[2][1] ^ gf4096_mul(m[1][1], minv2);
335 m[2][4] = m[2][4] ^ gf4096_mul(m[1][4], minv2);
336 m[3][0] = m[3][0] ^ gf4096_mul(m[1][0], minv3);
337 m[3][1] = m[3][1] ^ gf4096_mul(m[1][1], minv3);
338 m[3][4] = m[3][4] ^ gf4096_mul(m[1][4], minv3);
339}
340
341static void zero_4x5_col1(unsigned short m[4][5])
342{
343 unsigned short minv;
344
345 minv = gf4096_mul(m[3][1], gf4096_inv(m[2][1]));
346
347 m[3][0] = m[3][0] ^ gf4096_mul(m[2][0], minv);
348 m[3][4] = m[3][4] ^ gf4096_mul(m[2][4], minv);
349}
350
351static void swap_4x5_rows(unsigned short m[4][5], int i, int j, int col_width)
352{
353 unsigned short tmp0;
354 int cnt;
355
356 for (cnt = 0; cnt < col_width; cnt++) {
357 tmp0 = m[i][cnt];
358 m[i][cnt] = m[j][cnt];
359 m[j][cnt] = tmp0;
360 }
361}
362
363static void solve_4x5(unsigned short m[4][5], unsigned short *coefs, int *row_order)
364{
365 unsigned short tmp[4];
366
367 tmp[0] = gf4096_mul(m[3][4], gf4096_inv(m[3][0]));
368 tmp[1] = gf4096_mul((gf4096_mul(tmp[0], m[2][0]) ^ m[2][4]), gf4096_inv(m[2][1]));
369 tmp[2] = gf4096_mul((gf4096_mul(tmp[0], m[1][0]) ^ gf4096_mul(tmp[1], m[1][1]) ^ m[1][4]), gf4096_inv(m[1][2]));
370 tmp[3] = gf4096_mul((gf4096_mul(tmp[0], m[0][0]) ^
371 gf4096_mul(tmp[1], m[0][1]) ^ gf4096_mul(tmp[2], m[0][2]) ^ m[0][4]), gf4096_inv(m[0][3]));
372 sort_coefs(row_order, tmp, 4);
373 coefs[0] = tmp[0];
374 coefs[1] = tmp[1];
375 coefs[2] = tmp[2];
376 coefs[3] = tmp[3];
377}
378
379static void sort_coefs(int *order, unsigned short *soln, int len)
380{
381 int cnt, start_cnt, least_ord, least_cnt;
382 unsigned short tmp0;
383 for (start_cnt = 0; start_cnt < len; start_cnt++) {
384 for (cnt = start_cnt; cnt < len; cnt++) {
385 if (cnt == start_cnt) {
386 least_ord = order[cnt];
387 least_cnt = start_cnt;
388 } else {
389 if (least_ord > order[cnt]) {
390 least_ord = order[cnt];
391 least_cnt = cnt;
392 }
393 }
394 }
395 if (least_cnt != start_cnt) {
396 tmp0 = order[least_cnt];
397 order[least_cnt] = order[start_cnt];
398 order[start_cnt] = tmp0;
399 tmp0 = soln[least_cnt];
400 soln[least_cnt] = soln[start_cnt];
401 soln[start_cnt] = tmp0;
402 }
403 }
404}
405
406static void find_4bit_err_pats(unsigned short s0, unsigned short s1,
407 unsigned short s2, unsigned short s3,
408 unsigned short z1, unsigned short z2,
409 unsigned short z3, unsigned short z4,
410 unsigned short *pats)
411{
412 unsigned short z4_z1, z3z4_z3z3, z4_z2, s0z4_s1, z1z4_z1z1,
413 z4_z3, z2z4_z2z2, s1z4_s2, z3z3z4_z3z3z3, z1z1z4_z1z1z1, z2z2z4_z2z2z2, s2z4_s3;
414 unsigned short tmp0, tmp1, tmp2, tmp3;
415
416 z4_z1 = z4 ^ z1;
417 z3z4_z3z3 = gf4096_mul(z3, z4) ^ gf4096_mul(z3, z3);
418 z4_z2 = z4 ^ z2;
419 s0z4_s1 = gf4096_mul(s0, z4) ^ s1;
420 z1z4_z1z1 = gf4096_mul(z1, z4) ^ gf4096_mul(z1, z1);
421 z4_z3 = z4 ^ z3;
422 z2z4_z2z2 = gf4096_mul(z2, z4) ^ gf4096_mul(z2, z2);
423 s1z4_s2 = gf4096_mul(s1, z4) ^ s2;
424 z3z3z4_z3z3z3 = gf4096_mul(gf4096_mul(z3, z3), z4) ^ gf4096_mul(gf4096_mul(z3, z3), z3);
425 z1z1z4_z1z1z1 = gf4096_mul(gf4096_mul(z1, z1), z4) ^ gf4096_mul(gf4096_mul(z1, z1), z1);
426 z2z2z4_z2z2z2 = gf4096_mul(gf4096_mul(z2, z2), z4) ^ gf4096_mul(gf4096_mul(z2, z2), z2);
427 s2z4_s3 = gf4096_mul(s2, z4) ^ s3;
428
429 //find err pat 0,1
430 find_2x2_soln(gf4096_mul(z4_z1, z3z4_z3z3) ^
431 gf4096_mul(z1z4_z1z1, z4_z3), gf4096_mul(z4_z2,
432 z3z4_z3z3) ^
433 gf4096_mul(z2z4_z2z2, z4_z3), gf4096_mul(z1z4_z1z1,
434 z3z3z4_z3z3z3) ^
435 gf4096_mul(z1z1z4_z1z1z1, z3z4_z3z3),
436 gf4096_mul(z2z4_z2z2,
437 z3z3z4_z3z3z3) ^ gf4096_mul(z2z2z4_z2z2z2,
438 z3z4_z3z3),
439 gf4096_mul(s0z4_s1, z3z4_z3z3) ^ gf4096_mul(s1z4_s2,
440 z4_z3),
441 gf4096_mul(s1z4_s2, z3z3z4_z3z3z3) ^ gf4096_mul(s2z4_s3, z3z4_z3z3), pats);
442 tmp0 = pats[0];
443 tmp1 = pats[1];
444 tmp2 = pats[0] ^ pats[1] ^ s0;
445 tmp3 = gf4096_mul(pats[0], z1) ^ gf4096_mul(pats[1], z2) ^ s1;
446
447 //find err pat 2,3
448 find_2x2_soln(0x1, 0x1, z3, z4, tmp2, tmp3, pats);
449 pats[2] = pats[0];
450 pats[3] = pats[1];
451 pats[0] = tmp0;
452 pats[1] = tmp1;
453}
454
455static void find_2x2_soln(unsigned short c00, unsigned short c01,
456 unsigned short c10, unsigned short c11,
457 unsigned short lval0, unsigned short lval1,
458 unsigned short *soln)
459{
460 unsigned short m[2][3];
461 m[0][0] = c00;
462 m[0][1] = c01;
463 m[0][2] = lval0;
464 m[1][0] = c10;
465 m[1][1] = c11;
466 m[1][2] = lval1;
467
468 if (m[0][1] != 0x0) {
469 /* */
470 } else if (m[1][1] != 0x0) {
471 swap_2x3_rows(m);
472 } else {
473 printk(KERN_ERR "Warning: find_2bit_err_coefs, s0,s1 all zeros!\n");
474 }
475
476 solve_2x3(m, soln);
477}
478
479static void swap_2x3_rows(unsigned short m[2][3])
480{
481 unsigned short tmp0;
482 int cnt;
483
484 for (cnt = 0; cnt < 3; cnt++) {
485 tmp0 = m[0][cnt];
486 m[0][cnt] = m[1][cnt];
487 m[1][cnt] = tmp0;
488 }
489}
490
491static void solve_2x3(unsigned short m[2][3], unsigned short *coefs)
492{
493 unsigned short minv;
494
495 minv = gf4096_mul(m[1][1], gf4096_inv(m[0][1]));
496 m[1][0] = m[1][0] ^ gf4096_mul(m[0][0], minv);
497 m[1][2] = m[1][2] ^ gf4096_mul(m[0][2], minv);
498 coefs[0] = gf4096_mul(m[1][2], gf4096_inv(m[1][0]));
499 coefs[1] = gf4096_mul((gf4096_mul(coefs[0], m[0][0]) ^ m[0][2]), gf4096_inv(m[0][1]));
500}
501
502static unsigned char gf64_inv[64] = {
503 0, 1, 33, 62, 49, 43, 31, 44, 57, 37, 52, 28, 46, 40, 22, 25,
504 61, 54, 51, 39, 26, 35, 14, 24, 23, 15, 20, 34, 11, 53, 45, 6,
505 63, 2, 27, 21, 56, 9, 50, 19, 13, 47, 48, 5, 7, 30, 12, 41,
506 42, 4, 38, 18, 10, 29, 17, 60, 36, 8, 59, 58, 55, 16, 3, 32
507};
508
509static unsigned short gf4096_inv(unsigned short din)
510{
511 unsigned short alahxal, ah2B, deno, inv, bl, bh;
512 unsigned short ah, al, ahxal;
513 unsigned short dout;
514
515 ah = (din >> 6) & 0x3f;
516 al = din & 0x3f;
517 ahxal = ah ^ al;
518 ah2B = (((ah ^ (ah >> 3)) & 0x1) << 5) |
519 ((ah >> 1) & 0x10) |
520 ((((ah >> 5) ^ (ah >> 2)) & 0x1) << 3) |
521 ((ah >> 2) & 0x4) | ((((ah >> 4) ^ (ah >> 1)) & 0x1) << 1) | (ah & 0x1);
522 alahxal = gf64_mul(ahxal, al);
523 deno = alahxal ^ ah2B;
524 inv = gf64_inv[deno];
525 bl = gf64_mul(inv, ahxal);
526 bh = gf64_mul(inv, ah);
527 dout = ((bh & 0x3f) << 6) | (bl & 0x3f);
528 return (((bh & 0x3f) << 6) | (bl & 0x3f));
529}
530
531static unsigned short err_pos_lut[4096] = {
532 0xfff, 0x000, 0x451, 0xfff, 0xfff, 0x3cf, 0xfff, 0x041,
533 0xfff, 0xfff, 0xfff, 0xfff, 0x28a, 0xfff, 0x492, 0xfff,
534 0x145, 0xfff, 0xfff, 0x514, 0xfff, 0x082, 0xfff, 0xfff,
535 0xfff, 0x249, 0x38e, 0x410, 0xfff, 0x104, 0x208, 0x1c7,
536 0xfff, 0xfff, 0xfff, 0xfff, 0x2cb, 0xfff, 0xfff, 0xfff,
537 0x0c3, 0x34d, 0x4d3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
538 0xfff, 0xfff, 0xfff, 0x186, 0xfff, 0xfff, 0xfff, 0xfff,
539 0xfff, 0x30c, 0x555, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
540 0xfff, 0xfff, 0xfff, 0x166, 0xfff, 0xfff, 0xfff, 0xfff,
541 0x385, 0x14e, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e1,
542 0xfff, 0xfff, 0xfff, 0xfff, 0x538, 0xfff, 0x16d, 0xfff,
543 0xfff, 0xfff, 0x45b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
544 0xfff, 0xfff, 0xfff, 0x29c, 0x2cc, 0x30b, 0x2b3, 0xfff,
545 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0b3, 0xfff, 0x2f7,
546 0xfff, 0x32b, 0xfff, 0xfff, 0xfff, 0xfff, 0x0a7, 0xfff,
547 0xfff, 0x2da, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
548 0xfff, 0x07e, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
549 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x11c, 0xfff, 0xfff,
550 0xfff, 0xfff, 0xfff, 0x22f, 0xfff, 0x1f4, 0xfff, 0xfff,
551 0x2b0, 0x504, 0xfff, 0x114, 0xfff, 0xfff, 0xfff, 0x21d,
552 0xfff, 0xfff, 0xfff, 0xfff, 0x00d, 0x3c4, 0x340, 0x10f,
553 0xfff, 0xfff, 0x266, 0x02e, 0xfff, 0xfff, 0xfff, 0x4f8,
554 0x337, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
555 0xfff, 0xfff, 0xfff, 0x07b, 0x168, 0xfff, 0xfff, 0x0fe,
556 0xfff, 0xfff, 0x51a, 0xfff, 0x458, 0xfff, 0x36d, 0xfff,
557 0xfff, 0xfff, 0xfff, 0x073, 0x37d, 0x415, 0x550, 0xfff,
558 0xfff, 0xfff, 0x23b, 0x4b4, 0xfff, 0xfff, 0xfff, 0x1a1,
559 0xfff, 0xfff, 0x3aa, 0xfff, 0x117, 0x04d, 0x341, 0xfff,
560 0xfff, 0xfff, 0xfff, 0x518, 0x03e, 0x0f2, 0xfff, 0xfff,
561 0xfff, 0xfff, 0xfff, 0x363, 0xfff, 0x0b9, 0xfff, 0xfff,
562 0x241, 0xfff, 0xfff, 0x049, 0xfff, 0xfff, 0xfff, 0xfff,
563 0x15f, 0x52d, 0xfff, 0xfff, 0xfff, 0x29e, 0xfff, 0xfff,
564 0xfff, 0xfff, 0x4cf, 0x0fc, 0xfff, 0x36f, 0x3d3, 0xfff,
565 0x228, 0xfff, 0xfff, 0x45e, 0xfff, 0xfff, 0xfff, 0xfff,
566 0x238, 0xfff, 0xfff, 0xfff, 0xfff, 0x47f, 0xfff, 0xfff,
567 0x43a, 0x265, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3e8,
568 0xfff, 0xfff, 0x01a, 0xfff, 0xfff, 0xfff, 0xfff, 0x21e,
569 0x1fc, 0x40b, 0xfff, 0xfff, 0xfff, 0x2d0, 0x159, 0xfff,
570 0xfff, 0x313, 0xfff, 0xfff, 0x05c, 0x4cc, 0xfff, 0xfff,
571 0x0f6, 0x3d5, 0xfff, 0xfff, 0xfff, 0x54f, 0xfff, 0xfff,
572 0xfff, 0x172, 0x1e4, 0x07c, 0xfff, 0xfff, 0xfff, 0xfff,
573 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x53c, 0x1ad, 0x535,
574 0x19b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
575 0xfff, 0xfff, 0x092, 0xfff, 0x2be, 0xfff, 0xfff, 0x482,
576 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0e6, 0xfff, 0xfff,
577 0xfff, 0xfff, 0xfff, 0x476, 0xfff, 0x51d, 0xfff, 0xfff,
578 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
579 0xfff, 0xfff, 0x342, 0x2b5, 0x22e, 0x09a, 0xfff, 0x08d,
580 0x44f, 0x3ed, 0xfff, 0xfff, 0xfff, 0xfff, 0x3d1, 0xfff,
581 0xfff, 0x543, 0xfff, 0x48f, 0xfff, 0x3d2, 0xfff, 0x0d5,
582 0x113, 0x0ec, 0x427, 0xfff, 0xfff, 0xfff, 0x4c4, 0xfff,
583 0xfff, 0x50a, 0xfff, 0x144, 0xfff, 0x105, 0x39f, 0x294,
584 0x164, 0xfff, 0x31a, 0xfff, 0xfff, 0x49a, 0xfff, 0x130,
585 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
586 0x1be, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
587 0xfff, 0xfff, 0x49e, 0x371, 0xfff, 0xfff, 0xfff, 0xfff,
588 0xfff, 0xfff, 0xfff, 0xfff, 0x0e8, 0x49c, 0x0f4, 0xfff,
589 0x338, 0x1a7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
590 0xfff, 0x36c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
591 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
592 0xfff, 0x1ae, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
593 0xfff, 0x31b, 0xfff, 0xfff, 0x2dd, 0x522, 0xfff, 0xfff,
594 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2f4,
595 0x3c6, 0x30d, 0xfff, 0xfff, 0xfff, 0xfff, 0x34c, 0x18f,
596 0x30a, 0xfff, 0x01f, 0x079, 0xfff, 0xfff, 0x54d, 0x46b,
597 0x28c, 0x37f, 0xfff, 0xfff, 0xfff, 0xfff, 0x355, 0xfff,
598 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x14f, 0xfff, 0xfff,
599 0xfff, 0xfff, 0xfff, 0x359, 0x3fe, 0x3c5, 0xfff, 0xfff,
600 0xfff, 0xfff, 0x423, 0xfff, 0xfff, 0x34a, 0x22c, 0xfff,
601 0x25a, 0xfff, 0xfff, 0x4ad, 0xfff, 0x28d, 0xfff, 0xfff,
602 0xfff, 0xfff, 0xfff, 0x547, 0xfff, 0xfff, 0xfff, 0xfff,
603 0x2e2, 0xfff, 0xfff, 0x1d5, 0xfff, 0x2a8, 0xfff, 0xfff,
604 0x03f, 0xfff, 0xfff, 0xfff, 0xfff, 0x3eb, 0x0fa, 0xfff,
605 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x55b, 0xfff,
606 0x08e, 0xfff, 0x3ae, 0xfff, 0x3a4, 0xfff, 0x282, 0x158,
607 0xfff, 0x382, 0xfff, 0xfff, 0x499, 0xfff, 0xfff, 0x08a,
608 0xfff, 0xfff, 0xfff, 0x456, 0x3be, 0xfff, 0x1e2, 0xfff,
609 0xfff, 0xfff, 0xfff, 0xfff, 0x559, 0xfff, 0x1a0, 0xfff,
610 0xfff, 0x0b4, 0xfff, 0xfff, 0xfff, 0x2df, 0xfff, 0xfff,
611 0xfff, 0x07f, 0x4f5, 0xfff, 0xfff, 0x27c, 0x133, 0x017,
612 0xfff, 0x3fd, 0xfff, 0xfff, 0xfff, 0x44d, 0x4cd, 0x17a,
613 0x0d7, 0x537, 0xfff, 0xfff, 0x353, 0xfff, 0xfff, 0x351,
614 0x366, 0xfff, 0x44a, 0xfff, 0x1a6, 0xfff, 0xfff, 0xfff,
615 0x291, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1e3,
616 0xfff, 0xfff, 0xfff, 0xfff, 0x389, 0xfff, 0x07a, 0xfff,
617 0x1b6, 0x2ed, 0xfff, 0xfff, 0xfff, 0xfff, 0x24e, 0x074,
618 0xfff, 0xfff, 0x3dc, 0xfff, 0x4e3, 0xfff, 0xfff, 0xfff,
619 0xfff, 0x4eb, 0xfff, 0xfff, 0x3b8, 0x4de, 0xfff, 0x19c,
620 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x262,
621 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x076, 0x4e8, 0x3da,
622 0xfff, 0x531, 0xfff, 0xfff, 0x14a, 0xfff, 0x0a2, 0x433,
623 0x3df, 0x1e9, 0xfff, 0xfff, 0xfff, 0xfff, 0x3e7, 0x285,
624 0x2d8, 0xfff, 0xfff, 0xfff, 0x349, 0x18d, 0x098, 0xfff,
625 0x0df, 0x4bf, 0xfff, 0xfff, 0x0b2, 0xfff, 0x346, 0x24d,
626 0xfff, 0xfff, 0xfff, 0x24f, 0x4fa, 0x2f9, 0xfff, 0xfff,
627 0x3c9, 0xfff, 0x2b4, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
628 0xfff, 0x056, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
629 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
630 0xfff, 0x179, 0xfff, 0x0e9, 0x3f0, 0x33d, 0xfff, 0xfff,
631 0xfff, 0xfff, 0xfff, 0x1fd, 0xfff, 0xfff, 0x526, 0xfff,
632 0xfff, 0xfff, 0x53d, 0xfff, 0xfff, 0xfff, 0x170, 0x331,
633 0xfff, 0x068, 0xfff, 0xfff, 0xfff, 0x3f7, 0xfff, 0x3d8,
634 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
635 0xfff, 0x09f, 0x556, 0xfff, 0xfff, 0x02d, 0xfff, 0xfff,
636 0x553, 0xfff, 0xfff, 0xfff, 0x1f0, 0xfff, 0xfff, 0x4d6,
637 0x41e, 0xfff, 0xfff, 0xfff, 0xfff, 0x4d5, 0xfff, 0xfff,
638 0xfff, 0xfff, 0xfff, 0x248, 0xfff, 0xfff, 0xfff, 0x0a3,
639 0xfff, 0x217, 0xfff, 0xfff, 0xfff, 0x4f1, 0x209, 0xfff,
640 0xfff, 0x475, 0x234, 0x52b, 0x398, 0xfff, 0x08b, 0xfff,
641 0xfff, 0xfff, 0xfff, 0x2c2, 0xfff, 0xfff, 0xfff, 0xfff,
642 0xfff, 0xfff, 0x268, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
643 0xfff, 0x4a3, 0xfff, 0x0aa, 0xfff, 0x1d9, 0xfff, 0xfff,
644 0xfff, 0xfff, 0x155, 0xfff, 0xfff, 0xfff, 0xfff, 0x0bf,
645 0x539, 0xfff, 0xfff, 0x2f1, 0x545, 0xfff, 0xfff, 0xfff,
646 0xfff, 0xfff, 0xfff, 0x2a7, 0x06f, 0xfff, 0x378, 0xfff,
647 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x25e, 0xfff,
648 0xfff, 0xfff, 0xfff, 0x15d, 0x02a, 0xfff, 0xfff, 0x0bc,
649 0x235, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
650 0x150, 0xfff, 0x1a9, 0xfff, 0xfff, 0xfff, 0xfff, 0x381,
651 0xfff, 0x04e, 0x270, 0x13f, 0xfff, 0xfff, 0x405, 0xfff,
652 0x3cd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
653 0xfff, 0x2ef, 0xfff, 0x06a, 0xfff, 0xfff, 0xfff, 0x34f,
654 0x212, 0xfff, 0xfff, 0x0e2, 0xfff, 0x083, 0x298, 0xfff,
655 0xfff, 0xfff, 0x0c2, 0xfff, 0xfff, 0x52e, 0xfff, 0x488,
656 0xfff, 0xfff, 0xfff, 0x36b, 0xfff, 0xfff, 0xfff, 0x442,
657 0x091, 0xfff, 0x41c, 0xfff, 0xfff, 0x3a5, 0xfff, 0x4e6,
658 0xfff, 0xfff, 0x40d, 0x31d, 0xfff, 0xfff, 0xfff, 0x4c1,
659 0x053, 0xfff, 0x418, 0x13c, 0xfff, 0x350, 0xfff, 0x0ae,
660 0xfff, 0xfff, 0x41f, 0xfff, 0x470, 0xfff, 0x4ca, 0xfff,
661 0xfff, 0xfff, 0x02b, 0x450, 0xfff, 0x1f8, 0xfff, 0xfff,
662 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x293, 0xfff,
663 0xfff, 0xfff, 0xfff, 0x411, 0xfff, 0xfff, 0xfff, 0xfff,
664 0xfff, 0xfff, 0xfff, 0xfff, 0x0b8, 0xfff, 0xfff, 0xfff,
665 0x3e1, 0xfff, 0xfff, 0xfff, 0xfff, 0x43c, 0xfff, 0x2b2,
666 0x2ab, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ec,
667 0xfff, 0xfff, 0xfff, 0x3f8, 0x034, 0xfff, 0xfff, 0xfff,
668 0xfff, 0xfff, 0xfff, 0x11a, 0xfff, 0x541, 0x45c, 0x134,
669 0x1cc, 0xfff, 0xfff, 0xfff, 0x469, 0xfff, 0xfff, 0x44b,
670 0x161, 0xfff, 0xfff, 0xfff, 0x055, 0xfff, 0xfff, 0xfff,
671 0xfff, 0x307, 0xfff, 0xfff, 0xfff, 0xfff, 0x2d1, 0xfff,
672 0xfff, 0xfff, 0x124, 0x37b, 0x26b, 0x336, 0xfff, 0xfff,
673 0x2e4, 0x3cb, 0xfff, 0xfff, 0x0f8, 0x3c8, 0xfff, 0xfff,
674 0xfff, 0x461, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4b5,
675 0x2cf, 0xfff, 0xfff, 0xfff, 0x20f, 0xfff, 0x35a, 0xfff,
676 0x490, 0xfff, 0x185, 0xfff, 0xfff, 0xfff, 0xfff, 0x42e,
677 0xfff, 0xfff, 0xfff, 0xfff, 0x54b, 0xfff, 0xfff, 0xfff,
678 0x146, 0xfff, 0x412, 0xfff, 0xfff, 0xfff, 0x1ff, 0xfff,
679 0xfff, 0x3e0, 0xfff, 0xfff, 0xfff, 0xfff, 0x2d5, 0xfff,
680 0x4df, 0x505, 0xfff, 0x413, 0xfff, 0x1a5, 0xfff, 0x3b2,
681 0xfff, 0xfff, 0xfff, 0x35b, 0xfff, 0x116, 0xfff, 0xfff,
682 0x171, 0x4d0, 0xfff, 0x154, 0x12d, 0xfff, 0xfff, 0xfff,
683 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x468, 0x4db, 0xfff,
684 0xfff, 0x1df, 0xfff, 0xfff, 0xfff, 0xfff, 0x05a, 0xfff,
685 0x0f1, 0x403, 0xfff, 0x22b, 0x2e0, 0xfff, 0xfff, 0xfff,
686 0x2b7, 0x373, 0xfff, 0xfff, 0xfff, 0xfff, 0x13e, 0xfff,
687 0xfff, 0xfff, 0x0d0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
688 0x329, 0x1d2, 0x3fa, 0x047, 0xfff, 0x2f2, 0xfff, 0xfff,
689 0x141, 0x0ac, 0x1d7, 0xfff, 0x07d, 0xfff, 0xfff, 0xfff,
690 0x1c1, 0xfff, 0x487, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
691 0xfff, 0xfff, 0xfff, 0x045, 0xfff, 0xfff, 0xfff, 0xfff,
692 0x288, 0x0cd, 0xfff, 0xfff, 0xfff, 0xfff, 0x226, 0x1d8,
693 0xfff, 0x153, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4cb,
694 0x528, 0xfff, 0xfff, 0xfff, 0x20a, 0x343, 0x3a1, 0xfff,
695 0xfff, 0xfff, 0x2d7, 0x2d3, 0x1aa, 0x4c5, 0xfff, 0xfff,
696 0xfff, 0x42b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
697 0xfff, 0xfff, 0xfff, 0xfff, 0x3e9, 0xfff, 0x20b, 0x260,
698 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x37c, 0x2fd,
699 0xfff, 0xfff, 0x2c8, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
700 0xfff, 0x31e, 0xfff, 0x335, 0xfff, 0xfff, 0xfff, 0xfff,
701 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
702 0xfff, 0xfff, 0x135, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
703 0xfff, 0xfff, 0x35c, 0x4dd, 0x129, 0xfff, 0xfff, 0xfff,
704 0xfff, 0xfff, 0x1ef, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
705 0xfff, 0x34e, 0xfff, 0xfff, 0xfff, 0xfff, 0x407, 0xfff,
706 0xfff, 0xfff, 0xfff, 0xfff, 0x3ad, 0xfff, 0xfff, 0xfff,
707 0x379, 0xfff, 0xfff, 0x1d0, 0x38d, 0xfff, 0xfff, 0x1e8,
708 0x184, 0x3c1, 0x1c4, 0xfff, 0x1f9, 0xfff, 0xfff, 0x424,
709 0xfff, 0xfff, 0xfff, 0xfff, 0x1d3, 0x0d4, 0xfff, 0x4e9,
710 0xfff, 0xfff, 0xfff, 0x530, 0x107, 0xfff, 0x106, 0x04f,
711 0xfff, 0xfff, 0x4c7, 0x503, 0xfff, 0xfff, 0xfff, 0xfff,
712 0xfff, 0x15c, 0xfff, 0x23f, 0xfff, 0xfff, 0xfff, 0xfff,
713 0xfff, 0xfff, 0xfff, 0xfff, 0x4f3, 0xfff, 0xfff, 0x3c7,
714 0xfff, 0x278, 0xfff, 0xfff, 0x0a6, 0xfff, 0xfff, 0xfff,
715 0x122, 0x1cf, 0xfff, 0x327, 0xfff, 0x2e5, 0xfff, 0x29d,
716 0xfff, 0xfff, 0x3f1, 0xfff, 0xfff, 0x48d, 0xfff, 0xfff,
717 0xfff, 0xfff, 0x054, 0xfff, 0xfff, 0xfff, 0xfff, 0x178,
718 0x27e, 0x4e0, 0x352, 0x02f, 0x09c, 0xfff, 0x2a0, 0xfff,
719 0xfff, 0x46a, 0x457, 0xfff, 0xfff, 0x501, 0xfff, 0x2ba,
720 0xfff, 0xfff, 0xfff, 0x54e, 0x2e7, 0xfff, 0xfff, 0xfff,
721 0xfff, 0xfff, 0x551, 0xfff, 0xfff, 0x1db, 0x2aa, 0xfff,
722 0xfff, 0x4bc, 0xfff, 0xfff, 0x395, 0xfff, 0x0de, 0xfff,
723 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x455, 0xfff, 0x17e,
724 0xfff, 0x221, 0x4a7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
725 0x388, 0xfff, 0xfff, 0xfff, 0x308, 0xfff, 0xfff, 0xfff,
726 0x20e, 0x4b9, 0xfff, 0x273, 0x20c, 0x09e, 0xfff, 0x057,
727 0xfff, 0xfff, 0xfff, 0xfff, 0x3f2, 0xfff, 0x1a8, 0x3a6,
728 0x14c, 0xfff, 0xfff, 0x071, 0xfff, 0xfff, 0x53a, 0xfff,
729 0xfff, 0xfff, 0xfff, 0x109, 0xfff, 0xfff, 0x399, 0xfff,
730 0x061, 0x4f0, 0x39e, 0x244, 0xfff, 0x035, 0xfff, 0xfff,
731 0x305, 0x47e, 0x297, 0xfff, 0xfff, 0x2b8, 0xfff, 0xfff,
732 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1bc, 0xfff, 0x2fc,
733 0xfff, 0xfff, 0x554, 0xfff, 0xfff, 0xfff, 0xfff, 0x3b6,
734 0xfff, 0xfff, 0xfff, 0x515, 0x397, 0xfff, 0xfff, 0x12f,
735 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e5,
736 0xfff, 0x4fc, 0xfff, 0xfff, 0x05e, 0xfff, 0xfff, 0xfff,
737 0xfff, 0xfff, 0x0a8, 0x3af, 0x015, 0xfff, 0xfff, 0xfff,
738 0xfff, 0x138, 0xfff, 0xfff, 0xfff, 0x540, 0xfff, 0xfff,
739 0xfff, 0x027, 0x523, 0x2f0, 0xfff, 0xfff, 0xfff, 0xfff,
740 0xfff, 0xfff, 0x16c, 0xfff, 0x27d, 0xfff, 0xfff, 0xfff,
741 0xfff, 0x04c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4dc,
742 0xfff, 0xfff, 0x059, 0x301, 0xfff, 0xfff, 0xfff, 0xfff,
743 0xfff, 0xfff, 0xfff, 0x1a3, 0xfff, 0x15a, 0xfff, 0xfff,
744 0x0a5, 0xfff, 0x435, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
745 0xfff, 0x051, 0xfff, 0xfff, 0x131, 0xfff, 0x4f4, 0xfff,
746 0xfff, 0xfff, 0xfff, 0x441, 0xfff, 0x4fb, 0xfff, 0x03b,
747 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ed, 0x274,
748 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d3, 0x55e, 0x1b3,
749 0xfff, 0x0bd, 0xfff, 0xfff, 0xfff, 0xfff, 0x225, 0xfff,
750 0xfff, 0xfff, 0xfff, 0xfff, 0x4b7, 0xfff, 0xfff, 0x2ff,
751 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4c3, 0xfff,
752 0x383, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2f6,
753 0xfff, 0xfff, 0x1ee, 0xfff, 0x03d, 0xfff, 0xfff, 0xfff,
754 0xfff, 0xfff, 0x26f, 0x1dc, 0xfff, 0x0db, 0xfff, 0xfff,
755 0xfff, 0xfff, 0xfff, 0x0ce, 0xfff, 0xfff, 0x127, 0x03a,
756 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x311, 0xfff,
757 0xfff, 0x13d, 0x09d, 0x47b, 0x2a6, 0x50d, 0x510, 0x19a,
758 0xfff, 0x354, 0x414, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
759 0xfff, 0xfff, 0x44c, 0x3b0, 0xfff, 0x23d, 0x429, 0xfff,
760 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
761 0x4c0, 0x416, 0xfff, 0x05b, 0xfff, 0xfff, 0x137, 0xfff,
762 0x25f, 0x49f, 0xfff, 0x279, 0x013, 0xfff, 0xfff, 0xfff,
763 0x269, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
764 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3d0, 0xfff, 0xfff,
765 0xfff, 0xfff, 0xfff, 0xfff, 0x077, 0xfff, 0xfff, 0x3fb,
766 0xfff, 0xfff, 0xfff, 0xfff, 0x271, 0x3a0, 0xfff, 0xfff,
767 0x40f, 0xfff, 0xfff, 0x3de, 0xfff, 0xfff, 0xfff, 0xfff,
768 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ab, 0x26a,
769 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x489, 0xfff, 0xfff,
770 0x252, 0xfff, 0xfff, 0xfff, 0xfff, 0x1b7, 0x42f, 0xfff,
771 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x3b7,
772 0xfff, 0x2bb, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
773 0xfff, 0xfff, 0xfff, 0x0f7, 0x01d, 0xfff, 0x067, 0xfff,
774 0xfff, 0xfff, 0xfff, 0x4e2, 0xfff, 0xfff, 0x4bb, 0xfff,
775 0xfff, 0xfff, 0x17b, 0xfff, 0x0ee, 0xfff, 0xfff, 0xfff,
776 0xfff, 0xfff, 0x36e, 0xfff, 0xfff, 0xfff, 0x533, 0xfff,
777 0xfff, 0xfff, 0x4d4, 0x356, 0xfff, 0xfff, 0x375, 0xfff,
778 0xfff, 0xfff, 0xfff, 0x4a4, 0x513, 0xfff, 0xfff, 0xfff,
779 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4ff, 0xfff, 0x2af,
780 0xfff, 0xfff, 0x026, 0xfff, 0x0ad, 0xfff, 0xfff, 0xfff,
781 0xfff, 0x26e, 0xfff, 0xfff, 0xfff, 0xfff, 0x493, 0xfff,
782 0x463, 0x4d2, 0x4be, 0xfff, 0xfff, 0xfff, 0xfff, 0x4f2,
783 0x0b6, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
784 0xfff, 0x32d, 0x315, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
785 0xfff, 0x13a, 0x4a1, 0xfff, 0x27a, 0xfff, 0xfff, 0xfff,
786 0x47a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
787 0x334, 0xfff, 0xfff, 0xfff, 0xfff, 0x54c, 0xfff, 0xfff,
788 0xfff, 0x0c9, 0x007, 0xfff, 0xfff, 0x12e, 0xfff, 0x0ff,
789 0xfff, 0xfff, 0x3f5, 0x509, 0xfff, 0xfff, 0xfff, 0xfff,
790 0x1c3, 0x2ad, 0xfff, 0xfff, 0x47c, 0x261, 0xfff, 0xfff,
791 0xfff, 0xfff, 0xfff, 0x152, 0xfff, 0xfff, 0xfff, 0x339,
792 0xfff, 0x243, 0x1c0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
793 0x063, 0xfff, 0xfff, 0x254, 0xfff, 0xfff, 0x173, 0xfff,
794 0x0c7, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
795 0xfff, 0x362, 0x259, 0x485, 0x374, 0x0dc, 0x3ab, 0xfff,
796 0x1c5, 0x534, 0x544, 0xfff, 0xfff, 0x508, 0xfff, 0x402,
797 0x408, 0xfff, 0x0e7, 0xfff, 0xfff, 0x00a, 0x205, 0xfff,
798 0xfff, 0x2b9, 0xfff, 0xfff, 0xfff, 0x465, 0xfff, 0xfff,
799 0xfff, 0xfff, 0xfff, 0xfff, 0x23a, 0xfff, 0xfff, 0xfff,
800 0xfff, 0x147, 0x19d, 0x115, 0x214, 0xfff, 0x090, 0x368,
801 0xfff, 0x210, 0xfff, 0xfff, 0x280, 0x52a, 0x163, 0x148,
802 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x326, 0xfff, 0xfff,
803 0xfff, 0xfff, 0xfff, 0x2de, 0xfff, 0xfff, 0xfff, 0xfff,
804 0x206, 0x2c1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
805 0x189, 0xfff, 0xfff, 0xfff, 0xfff, 0x367, 0xfff, 0x1a4,
806 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x443, 0xfff, 0x27b,
807 0xfff, 0xfff, 0x251, 0x549, 0xfff, 0xfff, 0xfff, 0xfff,
808 0xfff, 0xfff, 0x188, 0x04b, 0xfff, 0xfff, 0xfff, 0x31f,
809 0x4a6, 0xfff, 0x246, 0x1de, 0x156, 0xfff, 0xfff, 0xfff,
810 0x3a9, 0xfff, 0xfff, 0xfff, 0x2fa, 0xfff, 0x128, 0x0d1,
811 0x449, 0x255, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
812 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
813 0xfff, 0xfff, 0xfff, 0xfff, 0x258, 0xfff, 0xfff, 0xfff,
814 0x532, 0xfff, 0xfff, 0xfff, 0x303, 0x517, 0xfff, 0xfff,
815 0x2a9, 0x24a, 0xfff, 0xfff, 0x231, 0xfff, 0xfff, 0xfff,
816 0xfff, 0xfff, 0x4b6, 0x516, 0xfff, 0xfff, 0x0e4, 0x0eb,
817 0xfff, 0x4e4, 0xfff, 0x275, 0xfff, 0xfff, 0x031, 0xfff,
818 0xfff, 0xfff, 0xfff, 0xfff, 0x025, 0x21a, 0xfff, 0x0cc,
819 0x45f, 0x3d9, 0x289, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
820 0xfff, 0xfff, 0x23e, 0xfff, 0xfff, 0xfff, 0x438, 0x097,
821 0x419, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
822 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
823 0xfff, 0xfff, 0x0a9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
824 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
825 0x37e, 0x0e0, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x431,
826 0x372, 0xfff, 0xfff, 0xfff, 0x1ba, 0x06e, 0xfff, 0x1b1,
827 0xfff, 0xfff, 0x12a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
828 0xfff, 0xfff, 0x193, 0xfff, 0xfff, 0xfff, 0xfff, 0x10a,
829 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x048, 0x1b4,
830 0xfff, 0xfff, 0xfff, 0xfff, 0x295, 0x140, 0x108, 0xfff,
831 0xfff, 0xfff, 0xfff, 0x16f, 0xfff, 0x0a4, 0x37a, 0xfff,
832 0x29a, 0xfff, 0x284, 0xfff, 0xfff, 0xfff, 0xfff, 0x4c6,
833 0x2a2, 0x3a3, 0xfff, 0x201, 0xfff, 0xfff, 0xfff, 0x4bd,
834 0x005, 0x54a, 0x3b5, 0x204, 0x2ee, 0x11d, 0x436, 0xfff,
835 0xfff, 0xfff, 0xfff, 0xfff, 0x3ec, 0xfff, 0xfff, 0xfff,
836 0xfff, 0xfff, 0xfff, 0xfff, 0x11f, 0x498, 0x21c, 0xfff,
837 0xfff, 0xfff, 0x3d6, 0xfff, 0x4ab, 0xfff, 0x432, 0x2eb,
838 0x542, 0x4fd, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
839 0xfff, 0xfff, 0xfff, 0x4ce, 0xfff, 0xfff, 0x2fb, 0xfff,
840 0xfff, 0x2e1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
841 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x1b9, 0x037, 0x0dd,
842 0xfff, 0xfff, 0xfff, 0x2bf, 0x521, 0x496, 0x095, 0xfff,
843 0xfff, 0x328, 0x070, 0x1bf, 0xfff, 0x393, 0xfff, 0xfff,
844 0x102, 0xfff, 0xfff, 0x21b, 0xfff, 0x142, 0x263, 0x519,
845 0xfff, 0x2a5, 0x177, 0xfff, 0x14d, 0x471, 0x4ae, 0xfff,
846 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
847 0x1f6, 0xfff, 0x481, 0xfff, 0xfff, 0xfff, 0x151, 0xfff,
848 0xfff, 0xfff, 0x085, 0x33f, 0xfff, 0xfff, 0xfff, 0x084,
849 0xfff, 0xfff, 0xfff, 0x345, 0x3a2, 0xfff, 0xfff, 0x0a0,
850 0x0da, 0x024, 0xfff, 0xfff, 0xfff, 0x1bd, 0xfff, 0x55c,
851 0x467, 0x445, 0xfff, 0xfff, 0xfff, 0x052, 0xfff, 0xfff,
852 0xfff, 0xfff, 0x51e, 0xfff, 0xfff, 0x39d, 0xfff, 0x35f,
853 0xfff, 0x376, 0x3ee, 0xfff, 0xfff, 0xfff, 0xfff, 0x448,
854 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x16a,
855 0xfff, 0x036, 0x38f, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
856 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x211,
857 0xfff, 0xfff, 0xfff, 0x230, 0xfff, 0xfff, 0x3ba, 0xfff,
858 0xfff, 0xfff, 0x3ce, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
859 0xfff, 0xfff, 0xfff, 0x229, 0xfff, 0x176, 0xfff, 0xfff,
860 0xfff, 0xfff, 0xfff, 0x00b, 0xfff, 0x162, 0x018, 0xfff,
861 0xfff, 0x233, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
862 0x400, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
863 0xfff, 0xfff, 0xfff, 0x12b, 0xfff, 0xfff, 0xfff, 0xfff,
864 0xfff, 0x3f4, 0xfff, 0x0f0, 0xfff, 0x1ac, 0xfff, 0xfff,
865 0x119, 0xfff, 0x2c0, 0xfff, 0xfff, 0xfff, 0x49b, 0xfff,
866 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x23c, 0xfff,
867 0x4b3, 0x010, 0x064, 0xfff, 0xfff, 0x4ba, 0xfff, 0xfff,
868 0xfff, 0xfff, 0xfff, 0x3c2, 0xfff, 0xfff, 0xfff, 0xfff,
869 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x006, 0x196, 0xfff,
870 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x100, 0x191, 0xfff,
871 0x1ea, 0x29f, 0xfff, 0xfff, 0xfff, 0x276, 0xfff, 0xfff,
872 0x2b1, 0x3b9, 0xfff, 0x03c, 0xfff, 0xfff, 0xfff, 0x180,
873 0xfff, 0x08f, 0xfff, 0xfff, 0x19e, 0x019, 0xfff, 0x0b0,
874 0x0fd, 0x332, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
875 0xfff, 0x06b, 0x2e8, 0xfff, 0x446, 0xfff, 0xfff, 0x004,
876 0x247, 0x197, 0xfff, 0x112, 0x169, 0x292, 0xfff, 0x302,
877 0xfff, 0xfff, 0x33b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
878 0xfff, 0xfff, 0xfff, 0x287, 0x21f, 0xfff, 0x3ea, 0xfff,
879 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x4e7, 0xfff, 0xfff,
880 0xfff, 0xfff, 0xfff, 0x3a8, 0xfff, 0xfff, 0x2bc, 0xfff,
881 0x484, 0x296, 0xfff, 0x1c9, 0x08c, 0x1e5, 0x48a, 0xfff,
882 0x360, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
883 0x1ca, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
884 0xfff, 0xfff, 0xfff, 0x10d, 0xfff, 0xfff, 0xfff, 0xfff,
885 0xfff, 0xfff, 0x066, 0x2ea, 0x28b, 0x25b, 0xfff, 0x072,
886 0xfff, 0xfff, 0xfff, 0xfff, 0x2b6, 0xfff, 0xfff, 0x272,
887 0xfff, 0xfff, 0x525, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
888 0x2ca, 0xfff, 0xfff, 0xfff, 0x299, 0xfff, 0xfff, 0xfff,
889 0x558, 0x41a, 0xfff, 0x4f7, 0x557, 0xfff, 0x4a0, 0x344,
890 0x12c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x125,
891 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
892 0x40e, 0xfff, 0xfff, 0x502, 0xfff, 0x103, 0x3e6, 0xfff,
893 0x527, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
894 0xfff, 0xfff, 0xfff, 0x45d, 0xfff, 0xfff, 0xfff, 0xfff,
895 0x44e, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d2, 0x4c9, 0x35e,
896 0x459, 0x2d9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x17d,
897 0x0c4, 0xfff, 0xfff, 0xfff, 0x3ac, 0x390, 0x094, 0xfff,
898 0x483, 0x0ab, 0xfff, 0x253, 0xfff, 0x391, 0xfff, 0xfff,
899 0xfff, 0xfff, 0x123, 0x0ef, 0xfff, 0xfff, 0xfff, 0x330,
900 0x38c, 0xfff, 0xfff, 0x2ae, 0xfff, 0xfff, 0xfff, 0x042,
901 0x012, 0x06d, 0xfff, 0xfff, 0xfff, 0x32a, 0x3db, 0x364,
902 0x2dc, 0xfff, 0x30f, 0x3d7, 0x4a5, 0x050, 0xfff, 0xfff,
903 0x029, 0xfff, 0xfff, 0xfff, 0xfff, 0x1d1, 0xfff, 0xfff,
904 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x480, 0xfff,
905 0x4ed, 0x081, 0x0a1, 0xfff, 0xfff, 0xfff, 0x30e, 0x52f,
906 0x257, 0xfff, 0xfff, 0x447, 0xfff, 0xfff, 0xfff, 0xfff,
907 0xfff, 0xfff, 0xfff, 0x401, 0x3cc, 0xfff, 0xfff, 0x0fb,
908 0x2c9, 0x42a, 0x314, 0x33e, 0x3bd, 0x318, 0xfff, 0x10e,
909 0x2a1, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x24c,
910 0x506, 0xfff, 0x267, 0xfff, 0xfff, 0x219, 0xfff, 0x1eb,
911 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
912 0x309, 0x3e2, 0x46c, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
913 0x384, 0xfff, 0xfff, 0xfff, 0xfff, 0x50c, 0xfff, 0x24b,
914 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x038,
915 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x194,
916 0x143, 0x3e3, 0xfff, 0xfff, 0xfff, 0x4c2, 0xfff, 0xfff,
917 0x0e1, 0x25c, 0xfff, 0x237, 0xfff, 0x1fe, 0xfff, 0xfff,
918 0xfff, 0x065, 0x2a4, 0xfff, 0x386, 0x55a, 0x11b, 0xfff,
919 0xfff, 0x192, 0xfff, 0x183, 0x00e, 0xfff, 0xfff, 0xfff,
920 0xfff, 0xfff, 0xfff, 0x4b2, 0x18e, 0xfff, 0xfff, 0xfff,
921 0xfff, 0x486, 0x4ef, 0x0c6, 0x380, 0xfff, 0x4a8, 0xfff,
922 0x0c5, 0xfff, 0xfff, 0xfff, 0xfff, 0x093, 0x1b8, 0xfff,
923 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2e6,
924 0xfff, 0x0f3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
925 0x28e, 0xfff, 0x53b, 0x420, 0x22a, 0x33a, 0xfff, 0x387,
926 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2a3, 0xfff, 0xfff,
927 0xfff, 0x428, 0x500, 0xfff, 0xfff, 0x120, 0x2c6, 0x290,
928 0x2f5, 0x0e3, 0xfff, 0x0b7, 0xfff, 0x319, 0x474, 0xfff,
929 0xfff, 0xfff, 0x529, 0x014, 0xfff, 0x41b, 0x40a, 0x18b,
930 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x0d9,
931 0xfff, 0x38a, 0xfff, 0xfff, 0xfff, 0xfff, 0x1ce, 0xfff,
932 0xfff, 0xfff, 0xfff, 0xfff, 0x3b1, 0xfff, 0xfff, 0x05d,
933 0x2c4, 0xfff, 0xfff, 0x4af, 0xfff, 0x030, 0xfff, 0xfff,
934 0x203, 0xfff, 0x277, 0x256, 0xfff, 0xfff, 0xfff, 0x4f9,
935 0xfff, 0x2c7, 0xfff, 0x466, 0x016, 0x1cd, 0xfff, 0x167,
936 0xfff, 0xfff, 0x0c8, 0xfff, 0x43d, 0xfff, 0xfff, 0x020,
937 0xfff, 0xfff, 0x232, 0x1cb, 0x1e0, 0xfff, 0xfff, 0x347,
938 0xfff, 0x478, 0xfff, 0x365, 0xfff, 0xfff, 0xfff, 0xfff,
939 0x358, 0xfff, 0x10b, 0xfff, 0x35d, 0xfff, 0xfff, 0xfff,
940 0xfff, 0xfff, 0x452, 0x22d, 0xfff, 0xfff, 0x47d, 0xfff,
941 0x2f3, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x460, 0xfff,
942 0xfff, 0xfff, 0x50b, 0xfff, 0xfff, 0xfff, 0x2ec, 0xfff,
943 0xfff, 0xfff, 0xfff, 0xfff, 0x4b1, 0x422, 0xfff, 0xfff,
944 0xfff, 0x2d4, 0xfff, 0x239, 0xfff, 0xfff, 0xfff, 0x439,
945 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
946 0xfff, 0x491, 0x075, 0xfff, 0xfff, 0xfff, 0x06c, 0xfff,
947 0xfff, 0x0f9, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
948 0xfff, 0x139, 0xfff, 0x4f6, 0xfff, 0xfff, 0x409, 0xfff,
949 0xfff, 0x15b, 0xfff, 0xfff, 0x348, 0xfff, 0xfff, 0xfff,
950 0xfff, 0x4a2, 0x49d, 0xfff, 0x033, 0x175, 0xfff, 0x039,
951 0xfff, 0x312, 0x40c, 0xfff, 0xfff, 0x325, 0xfff, 0xfff,
952 0xfff, 0xfff, 0xfff, 0xfff, 0x4aa, 0xfff, 0xfff, 0xfff,
953 0xfff, 0xfff, 0xfff, 0x165, 0x3bc, 0x48c, 0x310, 0x096,
954 0xfff, 0xfff, 0x250, 0x1a2, 0xfff, 0xfff, 0xfff, 0xfff,
955 0x20d, 0x2ac, 0xfff, 0xfff, 0x39b, 0xfff, 0x377, 0xfff,
956 0x512, 0x495, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
957 0xfff, 0xfff, 0xfff, 0xfff, 0x357, 0x4ea, 0xfff, 0xfff,
958 0xfff, 0xfff, 0x198, 0xfff, 0xfff, 0xfff, 0x434, 0x04a,
959 0xfff, 0xfff, 0xfff, 0xfff, 0x062, 0xfff, 0x1d6, 0x1c8,
960 0xfff, 0x1f3, 0x281, 0xfff, 0x462, 0xfff, 0xfff, 0xfff,
961 0x4b0, 0xfff, 0x207, 0xfff, 0xfff, 0xfff, 0xfff, 0x3dd,
962 0xfff, 0xfff, 0x55d, 0xfff, 0x552, 0x494, 0x1af, 0xfff,
963 0xfff, 0xfff, 0xfff, 0xfff, 0x227, 0xfff, 0xfff, 0x069,
964 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x43e,
965 0x0b5, 0xfff, 0x524, 0x2d2, 0xfff, 0xfff, 0xfff, 0x28f,
966 0xfff, 0x01b, 0x50e, 0xfff, 0xfff, 0x1bb, 0xfff, 0xfff,
967 0x41d, 0xfff, 0x32e, 0x48e, 0xfff, 0x1f7, 0x224, 0xfff,
968 0xfff, 0xfff, 0xfff, 0xfff, 0x394, 0xfff, 0xfff, 0xfff,
969 0xfff, 0x52c, 0xfff, 0xfff, 0xfff, 0x392, 0xfff, 0x1e7,
970 0xfff, 0xfff, 0x3f9, 0x3a7, 0xfff, 0x51f, 0xfff, 0x0bb,
971 0x118, 0x3ca, 0xfff, 0x1dd, 0xfff, 0x48b, 0xfff, 0xfff,
972 0xfff, 0xfff, 0x50f, 0xfff, 0x0d6, 0xfff, 0x1fa, 0xfff,
973 0x11e, 0xfff, 0xfff, 0xfff, 0xfff, 0x4d7, 0xfff, 0x078,
974 0x008, 0xfff, 0x25d, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
975 0x032, 0x33c, 0xfff, 0x4d9, 0x160, 0xfff, 0xfff, 0x300,
976 0x0b1, 0xfff, 0x322, 0xfff, 0x4ec, 0xfff, 0xfff, 0x200,
977 0x00c, 0x369, 0x473, 0xfff, 0xfff, 0x32c, 0xfff, 0xfff,
978 0xfff, 0xfff, 0xfff, 0xfff, 0x53e, 0x3d4, 0x417, 0xfff,
979 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
980 0x34b, 0x001, 0x39a, 0x02c, 0xfff, 0xfff, 0x2ce, 0x00f,
981 0xfff, 0x0ba, 0xfff, 0xfff, 0xfff, 0xfff, 0x060, 0xfff,
982 0x406, 0xfff, 0xfff, 0xfff, 0x4ee, 0x4ac, 0xfff, 0x43f,
983 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x29b, 0xfff, 0xfff,
984 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x216,
985 0x190, 0xfff, 0x396, 0x464, 0xfff, 0xfff, 0x323, 0xfff,
986 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x2e9, 0xfff, 0x26d,
987 0x2cd, 0x040, 0xfff, 0xfff, 0xfff, 0xfff, 0x38b, 0x3c0,
988 0xfff, 0xfff, 0xfff, 0x1f2, 0xfff, 0x0ea, 0xfff, 0xfff,
989 0x472, 0xfff, 0x1fb, 0xfff, 0xfff, 0x0af, 0x27f, 0xfff,
990 0xfff, 0xfff, 0x479, 0x023, 0xfff, 0x0d8, 0x3b3, 0xfff,
991 0xfff, 0xfff, 0x121, 0xfff, 0xfff, 0x3bf, 0xfff, 0xfff,
992 0x16b, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
993 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
994 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
995 0x45a, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
996 0xfff, 0x0be, 0xfff, 0xfff, 0xfff, 0x111, 0xfff, 0x220,
997 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
998 0xfff, 0xfff, 0x09b, 0x218, 0xfff, 0x022, 0x202, 0xfff,
999 0x4c8, 0xfff, 0x0ed, 0xfff, 0xfff, 0x182, 0xfff, 0xfff,
1000 0xfff, 0x17f, 0x213, 0xfff, 0x321, 0x36a, 0xfff, 0x086,
1001 0xfff, 0xfff, 0xfff, 0x43b, 0x088, 0xfff, 0xfff, 0xfff,
1002 0xfff, 0x26c, 0xfff, 0x2f8, 0x3b4, 0xfff, 0xfff, 0xfff,
1003 0x132, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x333, 0x444,
1004 0x0c1, 0x4d8, 0x46d, 0x264, 0xfff, 0xfff, 0xfff, 0xfff,
1005 0x426, 0xfff, 0xfff, 0xfff, 0xfff, 0x2fe, 0xfff, 0xfff,
1006 0xfff, 0xfff, 0x011, 0xfff, 0x05f, 0xfff, 0xfff, 0xfff,
1007 0xfff, 0x10c, 0x101, 0xfff, 0xfff, 0xfff, 0xfff, 0x110,
1008 0xfff, 0x044, 0x304, 0x361, 0x404, 0xfff, 0x51b, 0x099,
1009 0xfff, 0x440, 0xfff, 0xfff, 0xfff, 0x222, 0xfff, 0xfff,
1010 0xfff, 0xfff, 0x1b5, 0xfff, 0x136, 0x430, 0xfff, 0x1da,
1011 0xfff, 0xfff, 0xfff, 0x043, 0xfff, 0x17c, 0xfff, 0xfff,
1012 0xfff, 0x01c, 0xfff, 0xfff, 0xfff, 0x425, 0x236, 0xfff,
1013 0x317, 0xfff, 0xfff, 0x437, 0x3fc, 0xfff, 0x1f1, 0xfff,
1014 0x324, 0xfff, 0xfff, 0x0ca, 0x306, 0xfff, 0x548, 0xfff,
1015 0x46e, 0xfff, 0xfff, 0xfff, 0x4b8, 0x1c2, 0x286, 0xfff,
1016 0xfff, 0x087, 0x18a, 0x19f, 0xfff, 0xfff, 0xfff, 0xfff,
1017 0x18c, 0xfff, 0x215, 0xfff, 0xfff, 0xfff, 0xfff, 0x283,
1018 0xfff, 0xfff, 0xfff, 0x126, 0xfff, 0xfff, 0x370, 0xfff,
1019 0x53f, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0x31c, 0xfff,
1020 0x4d1, 0xfff, 0xfff, 0xfff, 0x021, 0xfff, 0x157, 0xfff,
1021 0xfff, 0x028, 0x16e, 0xfff, 0x421, 0xfff, 0x1c6, 0xfff,
1022 0xfff, 0x511, 0xfff, 0xfff, 0x39c, 0x46f, 0x1b2, 0xfff,
1023 0xfff, 0x316, 0xfff, 0xfff, 0x009, 0xfff, 0xfff, 0x195,
1024 0xfff, 0x240, 0x546, 0xfff, 0xfff, 0x520, 0xfff, 0xfff,
1025 0xfff, 0xfff, 0xfff, 0xfff, 0x454, 0xfff, 0xfff, 0xfff,
1026 0x3f3, 0xfff, 0xfff, 0x187, 0xfff, 0x4a9, 0xfff, 0xfff,
1027 0xfff, 0xfff, 0xfff, 0xfff, 0x51c, 0x453, 0x1e6, 0xfff,
1028 0xfff, 0xfff, 0x1b0, 0xfff, 0x477, 0xfff, 0xfff, 0xfff,
1029 0x4fe, 0xfff, 0x32f, 0xfff, 0xfff, 0x15e, 0x1d4, 0xfff,
1030 0x0e5, 0xfff, 0xfff, 0xfff, 0x242, 0x14b, 0x046, 0xfff,
1031 0x3f6, 0x3bb, 0x3e4, 0xfff, 0xfff, 0x2e3, 0xfff, 0x245,
1032 0xfff, 0x149, 0xfff, 0xfff, 0xfff, 0x2db, 0xfff, 0xfff,
1033 0x181, 0xfff, 0x089, 0x2c5, 0xfff, 0x1f5, 0xfff, 0x2d6,
1034 0x507, 0xfff, 0x42d, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
1035 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
1036 0x080, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff, 0xfff,
1037 0xfff, 0xfff, 0xfff, 0xfff, 0x3c3, 0x320, 0xfff, 0x1e1,
1038 0xfff, 0x0f5, 0x13b, 0xfff, 0xfff, 0xfff, 0x003, 0x4da,
1039 0xfff, 0xfff, 0xfff, 0x42c, 0xfff, 0xfff, 0x0cb, 0xfff,
1040 0x536, 0x2c3, 0xfff, 0xfff, 0xfff, 0xfff, 0x199, 0xfff,
1041 0xfff, 0x0c0, 0xfff, 0x01e, 0x497, 0xfff, 0xfff, 0x3e5,
1042 0xfff, 0xfff, 0xfff, 0x0cf, 0xfff, 0x2bd, 0xfff, 0x223,
1043 0xfff, 0x3ff, 0xfff, 0x058, 0x174, 0x3ef, 0xfff, 0x002
1044};
1045
1046static unsigned short err_pos(unsigned short din)
1047{
1048 BUG_ON(din >= ARRAY_SIZE(err_pos_lut));
1049 return err_pos_lut[din];
1050}
1051static int chk_no_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1052{
1053 if ((chk_syndrome_list[0] | chk_syndrome_list[1] |
1054 chk_syndrome_list[2] | chk_syndrome_list[3] |
1055 chk_syndrome_list[4] | chk_syndrome_list[5] |
1056 chk_syndrome_list[6] | chk_syndrome_list[7]) != 0x0) {
1057 return -EINVAL;
1058 } else {
1059 err_info[0] = 0x0;
1060 return 0;
1061 }
1062}
1063static int chk_1_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1064{
1065 unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
1066 tmp0 = gf4096_mul(chk_syndrome_list[1], gf4096_inv(chk_syndrome_list[0]));
1067 tmp1 = gf4096_mul(chk_syndrome_list[2], gf4096_inv(chk_syndrome_list[1]));
1068 tmp2 = gf4096_mul(chk_syndrome_list[3], gf4096_inv(chk_syndrome_list[2]));
1069 tmp3 = gf4096_mul(chk_syndrome_list[4], gf4096_inv(chk_syndrome_list[3]));
1070 tmp4 = gf4096_mul(chk_syndrome_list[5], gf4096_inv(chk_syndrome_list[4]));
1071 tmp5 = gf4096_mul(chk_syndrome_list[6], gf4096_inv(chk_syndrome_list[5]));
1072 tmp6 = gf4096_mul(chk_syndrome_list[7], gf4096_inv(chk_syndrome_list[6]));
1073 if ((tmp0 == tmp1) & (tmp1 == tmp2) & (tmp2 == tmp3) & (tmp3 == tmp4) & (tmp4 == tmp5) & (tmp5 == tmp6)) {
1074 err_info[0] = 0x1; // encode 1-symbol error as 0x1
1075 err_info[1] = err_pos(tmp0);
1076 err_info[1] = (unsigned short)(0x55e - err_info[1]);
1077 err_info[5] = chk_syndrome_list[0];
1078 return 0;
1079 } else
1080 return -EINVAL;
1081}
1082static int chk_2_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1083{
1084 unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
1085 unsigned short coefs[4];
1086 unsigned short err_pats[4];
1087 int found_num_root = 0;
1088 unsigned short bit2_root0, bit2_root1;
1089 unsigned short bit2_root0_inv, bit2_root1_inv;
1090 unsigned short err_loc_eqn, test_root;
1091 unsigned short bit2_loc0, bit2_loc1;
1092 unsigned short bit2_pat0, bit2_pat1;
1093
1094 find_2x2_soln(chk_syndrome_list[1],
1095 chk_syndrome_list[0],
1096 chk_syndrome_list[2], chk_syndrome_list[1], chk_syndrome_list[2], chk_syndrome_list[3], coefs);
1097 for (test_root = 0x1; test_root < 0xfff; test_root++) {
1098 err_loc_eqn =
1099 gf4096_mul(coefs[1], gf4096_mul(test_root, test_root)) ^ gf4096_mul(coefs[0], test_root) ^ 0x1;
1100 if (err_loc_eqn == 0x0) {
1101 if (found_num_root == 0) {
1102 bit2_root0 = test_root;
1103 found_num_root = 1;
1104 } else if (found_num_root == 1) {
1105 bit2_root1 = test_root;
1106 found_num_root = 2;
1107 break;
1108 }
1109 }
1110 }
1111 if (found_num_root != 2)
1112 return -EINVAL;
1113 else {
1114 bit2_root0_inv = gf4096_inv(bit2_root0);
1115 bit2_root1_inv = gf4096_inv(bit2_root1);
1116 find_2bit_err_pats(chk_syndrome_list[0],
1117 chk_syndrome_list[1], bit2_root0_inv, bit2_root1_inv, err_pats);
1118 bit2_pat0 = err_pats[0];
1119 bit2_pat1 = err_pats[1];
1120 //for(x+1)
1121 tmp0 = gf4096_mul(gf4096_mul(bit2_root0_inv, bit2_root0_inv), gf4096_mul(bit2_root0_inv, bit2_root0_inv)); //rinv0^4
1122 tmp1 = gf4096_mul(bit2_root0_inv, tmp0); //rinv0^5
1123 tmp2 = gf4096_mul(bit2_root0_inv, tmp1); //rinv0^6
1124 tmp3 = gf4096_mul(bit2_root0_inv, tmp2); //rinv0^7
1125 tmp4 = gf4096_mul(gf4096_mul(bit2_root1_inv, bit2_root1_inv), gf4096_mul(bit2_root1_inv, bit2_root1_inv)); //rinv1^4
1126 tmp5 = gf4096_mul(bit2_root1_inv, tmp4); //rinv1^5
1127 tmp6 = gf4096_mul(bit2_root1_inv, tmp5); //rinv1^6
1128 tmp7 = gf4096_mul(bit2_root1_inv, tmp6); //rinv1^7
1129 //check if only 2-bit error
1130 if ((chk_syndrome_list[4] ==
1131 (gf4096_mul(bit2_pat0, tmp0) ^
1132 gf4096_mul(bit2_pat1,
1133 tmp4))) & (chk_syndrome_list[5] ==
1134 (gf4096_mul(bit2_pat0, tmp1) ^
1135 gf4096_mul(bit2_pat1,
1136 tmp5))) &
1137 (chk_syndrome_list[6] ==
1138 (gf4096_mul(bit2_pat0, tmp2) ^
1139 gf4096_mul(bit2_pat1,
1140 tmp6))) & (chk_syndrome_list[7] ==
1141 (gf4096_mul(bit2_pat0, tmp3) ^ gf4096_mul(bit2_pat1, tmp7)))) {
1142 if ((err_pos(bit2_root0_inv) == 0xfff) | (err_pos(bit2_root1_inv) == 0xfff)) {
1143 return -EINVAL;
1144 } else {
1145 bit2_loc0 = 0x55e - err_pos(bit2_root0_inv);
1146 bit2_loc1 = 0x55e - err_pos(bit2_root1_inv);
1147 err_info[0] = 0x2; // encode 2-symbol error as 0x2
1148 err_info[1] = bit2_loc0;
1149 err_info[2] = bit2_loc1;
1150 err_info[5] = bit2_pat0;
1151 err_info[6] = bit2_pat1;
1152 return 0;
1153 }
1154 } else
1155 return -EINVAL;
1156 }
1157}
1158static int chk_3_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1159{
1160 unsigned short tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
1161 unsigned short coefs[4];
1162 unsigned short err_pats[4];
1163 int found_num_root = 0;
1164 unsigned short bit3_root0, bit3_root1, bit3_root2;
1165 unsigned short bit3_root0_inv, bit3_root1_inv, bit3_root2_inv;
1166 unsigned short err_loc_eqn, test_root;
1167
1168 find_3bit_err_coefs(chk_syndrome_list[0], chk_syndrome_list[1],
1169 chk_syndrome_list[2], chk_syndrome_list[3],
1170 chk_syndrome_list[4], chk_syndrome_list[5], coefs);
1171
1172 for (test_root = 0x1; test_root < 0xfff; test_root++) {
1173 err_loc_eqn = gf4096_mul(coefs[2],
1174 gf4096_mul(gf4096_mul(test_root, test_root),
1175 test_root)) ^ gf4096_mul(coefs[1], gf4096_mul(test_root, test_root))
1176 ^ gf4096_mul(coefs[0], test_root) ^ 0x1;
1177
1178 if (err_loc_eqn == 0x0) {
1179 if (found_num_root == 0) {
1180 bit3_root0 = test_root;
1181 found_num_root = 1;
1182 } else if (found_num_root == 1) {
1183 bit3_root1 = test_root;
1184 found_num_root = 2;
1185 } else if (found_num_root == 2) {
1186 bit3_root2 = test_root;
1187 found_num_root = 3;
1188 break;
1189 }
1190 }
1191 }
1192 if (found_num_root != 3)
1193 return -EINVAL;
1194 else {
1195 bit3_root0_inv = gf4096_inv(bit3_root0);
1196 bit3_root1_inv = gf4096_inv(bit3_root1);
1197 bit3_root2_inv = gf4096_inv(bit3_root2);
1198
1199 find_3bit_err_pats(chk_syndrome_list[0], chk_syndrome_list[1],
1200 chk_syndrome_list[2], bit3_root0_inv,
1201 bit3_root1_inv, bit3_root2_inv, err_pats);
1202
1203 //check if only 3-bit error
1204 tmp0 = gf4096_mul(bit3_root0_inv, bit3_root0_inv);
1205 tmp0 = gf4096_mul(tmp0, tmp0);
1206 tmp0 = gf4096_mul(tmp0, bit3_root0_inv);
1207 tmp0 = gf4096_mul(tmp0, bit3_root0_inv); //rinv0^6
1208 tmp1 = gf4096_mul(tmp0, bit3_root0_inv); //rinv0^7
1209 tmp2 = gf4096_mul(bit3_root1_inv, bit3_root1_inv);
1210 tmp2 = gf4096_mul(tmp2, tmp2);
1211 tmp2 = gf4096_mul(tmp2, bit3_root1_inv);
1212 tmp2 = gf4096_mul(tmp2, bit3_root1_inv); //rinv1^6
1213 tmp3 = gf4096_mul(tmp2, bit3_root1_inv); //rinv1^7
1214 tmp4 = gf4096_mul(bit3_root2_inv, bit3_root2_inv);
1215 tmp4 = gf4096_mul(tmp4, tmp4);
1216 tmp4 = gf4096_mul(tmp4, bit3_root2_inv);
1217 tmp4 = gf4096_mul(tmp4, bit3_root2_inv); //rinv2^6
1218 tmp5 = gf4096_mul(tmp4, bit3_root2_inv); //rinv2^7
1219
1220 //check if only 3 errors
1221 if ((chk_syndrome_list[6] == (gf4096_mul(err_pats[0], tmp0) ^
1222 gf4096_mul(err_pats[1], tmp2) ^
1223 gf4096_mul(err_pats[2], tmp4))) &
1224 (chk_syndrome_list[7] == (gf4096_mul(err_pats[0], tmp1) ^
1225 gf4096_mul(err_pats[1], tmp3) ^ gf4096_mul(err_pats[2], tmp5)))) {
1226 if ((err_pos(bit3_root0_inv) == 0xfff) |
1227 (err_pos(bit3_root1_inv) == 0xfff) | (err_pos(bit3_root2_inv) == 0xfff)) {
1228 return -EINVAL;
1229 } else {
1230 err_info[0] = 0x3;
1231 err_info[1] = (0x55e - err_pos(bit3_root0_inv));
1232 err_info[2] = (0x55e - err_pos(bit3_root1_inv));
1233 err_info[3] = (0x55e - err_pos(bit3_root2_inv));
1234 err_info[5] = err_pats[0];
1235 err_info[6] = err_pats[1];
1236 err_info[7] = err_pats[2];
1237 return 0;
1238 }
1239 } else
1240 return -EINVAL;
1241 }
1242}
1243static int chk_4_err_only(unsigned short *chk_syndrome_list, unsigned short *err_info)
1244{
1245 unsigned short coefs[4];
1246 unsigned short err_pats[4];
1247 int found_num_root = 0;
1248 unsigned short bit4_root0, bit4_root1, bit4_root2, bit4_root3;
1249 unsigned short bit4_root0_inv, bit4_root1_inv, bit4_root2_inv, bit4_root3_inv;
1250 unsigned short err_loc_eqn, test_root;
1251
1252 find_4bit_err_coefs(chk_syndrome_list[0],
1253 chk_syndrome_list[1],
1254 chk_syndrome_list[2],
1255 chk_syndrome_list[3],
1256 chk_syndrome_list[4],
1257 chk_syndrome_list[5], chk_syndrome_list[6], chk_syndrome_list[7], coefs);
1258
1259 for (test_root = 0x1; test_root < 0xfff; test_root++) {
1260 err_loc_eqn =
1261 gf4096_mul(coefs[3],
1262 gf4096_mul(gf4096_mul
1263 (gf4096_mul(test_root, test_root),
1264 test_root),
1265 test_root)) ^ gf4096_mul(coefs[2],
1266 gf4096_mul
1267 (gf4096_mul(test_root, test_root), test_root))
1268 ^ gf4096_mul(coefs[1], gf4096_mul(test_root, test_root)) ^ gf4096_mul(coefs[0], test_root)
1269 ^ 0x1;
1270 if (err_loc_eqn == 0x0) {
1271 if (found_num_root == 0) {
1272 bit4_root0 = test_root;
1273 found_num_root = 1;
1274 } else if (found_num_root == 1) {
1275 bit4_root1 = test_root;
1276 found_num_root = 2;
1277 } else if (found_num_root == 2) {
1278 bit4_root2 = test_root;
1279 found_num_root = 3;
1280 } else {
1281 found_num_root = 4;
1282 bit4_root3 = test_root;
1283 break;
1284 }
1285 }
1286 }
1287 if (found_num_root != 4) {
1288 return -EINVAL;
1289 } else {
1290 bit4_root0_inv = gf4096_inv(bit4_root0);
1291 bit4_root1_inv = gf4096_inv(bit4_root1);
1292 bit4_root2_inv = gf4096_inv(bit4_root2);
1293 bit4_root3_inv = gf4096_inv(bit4_root3);
1294 find_4bit_err_pats(chk_syndrome_list[0],
1295 chk_syndrome_list[1],
1296 chk_syndrome_list[2],
1297 chk_syndrome_list[3],
1298 bit4_root0_inv, bit4_root1_inv, bit4_root2_inv, bit4_root3_inv, err_pats);
1299 err_info[0] = 0x4;
1300 err_info[1] = (0x55e - err_pos(bit4_root0_inv));
1301 err_info[2] = (0x55e - err_pos(bit4_root1_inv));
1302 err_info[3] = (0x55e - err_pos(bit4_root2_inv));
1303 err_info[4] = (0x55e - err_pos(bit4_root3_inv));
1304 err_info[5] = err_pats[0];
1305 err_info[6] = err_pats[1];
1306 err_info[7] = err_pats[2];
1307 err_info[8] = err_pats[3];
1308 return 0;
1309 }
1310}
1311
1312void correct_12bit_symbol(unsigned char *buf, unsigned short sym,
1313 unsigned short val)
1314{
1315 if (unlikely(sym > 1366)) {
1316 printk(KERN_ERR "Error: symbol %d out of range; cannot correct\n", sym);
1317 } else if (sym == 0) {
1318 buf[0] ^= val;
1319 } else if (sym & 1) {
1320 buf[1+(3*(sym-1))/2] ^= (val >> 4);
1321 buf[2+(3*(sym-1))/2] ^= ((val & 0xf) << 4);
1322 } else {
1323 buf[2+(3*(sym-2))/2] ^= (val >> 8);
1324 buf[3+(3*(sym-2))/2] ^= (val & 0xff);
1325 }
1326}
1327
1328static int debugecc = 0;
1329module_param(debugecc, int, 0644);
1330
1331int cafe_correct_ecc(unsigned char *buf,
1332 unsigned short *chk_syndrome_list)
1333{
1334 unsigned short err_info[9];
1335 int i;
1336
1337 if (debugecc) {
1338 printk(KERN_WARNING "cafe_correct_ecc invoked. Syndromes %x %x %x %x %x %x %x %x\n",
1339 chk_syndrome_list[0], chk_syndrome_list[1],
1340 chk_syndrome_list[2], chk_syndrome_list[3],
1341 chk_syndrome_list[4], chk_syndrome_list[5],
1342 chk_syndrome_list[6], chk_syndrome_list[7]);
1343 for (i=0; i < 2048; i+=16) {
1344 printk(KERN_WARNING "D %04x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1345 i,
1346 buf[i], buf[i+1], buf[i+2], buf[i+3],
1347 buf[i+4], buf[i+5], buf[i+6], buf[i+7],
1348 buf[i+8], buf[i+9], buf[i+10], buf[i+11],
1349 buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
1350 }
1351 for ( ; i < 2112; i+=16) {
1352 printk(KERN_WARNING "O %02x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
1353 i - 2048,
1354 buf[i], buf[i+1], buf[i+2], buf[i+3],
1355 buf[i+4], buf[i+5], buf[i+6], buf[i+7],
1356 buf[i+8], buf[i+9], buf[i+10], buf[i+11],
1357 buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
1358 }
1359 }
1360
1361
1362
1363 if (chk_no_err_only(chk_syndrome_list, err_info) &&
1364 chk_1_err_only(chk_syndrome_list, err_info) &&
1365 chk_2_err_only(chk_syndrome_list, err_info) &&
1366 chk_3_err_only(chk_syndrome_list, err_info) &&
1367 chk_4_err_only(chk_syndrome_list, err_info)) {
1368 return -EIO;
1369 }
1370
1371 for (i=0; i < err_info[0]; i++) {
1372 if (debugecc)
1373 printk(KERN_WARNING "Correct symbol %d with 0x%03x\n",
1374 err_info[1+i], err_info[5+i]);
1375
1376 correct_12bit_symbol(buf, err_info[1+i], err_info[5+i]);
1377 }
1378
1379 return err_info[0];
1380}
1381
diff --git a/drivers/mtd/nand/cafe.c b/drivers/mtd/nand/cafe_nand.c
index c328a7514510..cff969d05d4a 100644
--- a/drivers/mtd/nand/cafe.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
11#undef DEBUG 11#undef DEBUG
12#include <linux/mtd/mtd.h> 12#include <linux/mtd/mtd.h>
13#include <linux/mtd/nand.h> 13#include <linux/mtd/nand.h>
14#include <linux/rslib.h>
14#include <linux/pci.h> 15#include <linux/pci.h>
15#include <linux/delay.h> 16#include <linux/delay.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
@@ -46,13 +47,14 @@
46#define CAFE_GLOBAL_IRQ_MASK 0x300c 47#define CAFE_GLOBAL_IRQ_MASK 0x300c
47#define CAFE_NAND_RESET 0x3034 48#define CAFE_NAND_RESET 0x3034
48 49
49int cafe_correct_ecc(unsigned char *buf, 50/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
50 unsigned short *chk_syndrome_list); 51#define CTRL1_CHIPSELECT (1<<19)
51 52
52struct cafe_priv { 53struct cafe_priv {
53 struct nand_chip nand; 54 struct nand_chip nand;
54 struct pci_dev *pdev; 55 struct pci_dev *pdev;
55 void __iomem *mmio; 56 void __iomem *mmio;
57 struct rs_control *rs;
56 uint32_t ctl1; 58 uint32_t ctl1;
57 uint32_t ctl2; 59 uint32_t ctl2;
58 int datalen; 60 int datalen;
@@ -195,8 +197,8 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
195 197
196 cafe->data_pos = cafe->datalen = 0; 198 cafe->data_pos = cafe->datalen = 0;
197 199
198 /* Set command valid bit */ 200 /* Set command valid bit, mask in the chip select bit */
199 ctl1 = 0x80000000 | command; 201 ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
200 202
201 /* Set RD or WR bits as appropriate */ 203 /* Set RD or WR bits as appropriate */
202 if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) { 204 if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
@@ -309,8 +311,16 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
309 311
310static void cafe_select_chip(struct mtd_info *mtd, int chipnr) 312static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
311{ 313{
312 //struct cafe_priv *cafe = mtd->priv; 314 struct cafe_priv *cafe = mtd->priv;
313 // cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr); 315
316 cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
317
318 /* Mask the appropriate bit into the stored value of ctl1
319 which will be used by cafe_nand_cmdfunc() */
320 if (chipnr)
321 cafe->ctl1 |= CTRL1_CHIPSELECT;
322 else
323 cafe->ctl1 &= ~CTRL1_CHIPSELECT;
314} 324}
315 325
316static int cafe_nand_interrupt(int irq, void *id) 326static int cafe_nand_interrupt(int irq, void *id)
@@ -374,28 +384,66 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
374 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 384 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
375 385
376 if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) { 386 if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
377 unsigned short syn[8]; 387 unsigned short syn[8], pat[4];
378 int i; 388 int pos[4];
389 u8 *oob = chip->oob_poi;
390 int i, n;
379 391
380 for (i=0; i<8; i+=2) { 392 for (i=0; i<8; i+=2) {
381 uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2)); 393 uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
382 syn[i] = tmp & 0xfff; 394 syn[i] = cafe->rs->index_of[tmp & 0xfff];
383 syn[i+1] = (tmp >> 16) & 0xfff; 395 syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
396 }
397
398 n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
399 pat);
400
401 for (i = 0; i < n; i++) {
402 int p = pos[i];
403
404 /* The 12-bit symbols are mapped to bytes here */
405
406 if (p > 1374) {
407 /* out of range */
408 n = -1374;
409 } else if (p == 0) {
410 /* high four bits do not correspond to data */
411 if (pat[i] > 0xff)
412 n = -2048;
413 else
414 buf[0] ^= pat[i];
415 } else if (p == 1365) {
416 buf[2047] ^= pat[i] >> 4;
417 oob[0] ^= pat[i] << 4;
418 } else if (p > 1365) {
419 if ((p & 1) == 1) {
420 oob[3*p/2 - 2048] ^= pat[i] >> 4;
421 oob[3*p/2 - 2047] ^= pat[i] << 4;
422 } else {
423 oob[3*p/2 - 2049] ^= pat[i] >> 8;
424 oob[3*p/2 - 2048] ^= pat[i];
425 }
426 } else if ((p & 1) == 1) {
427 buf[3*p/2] ^= pat[i] >> 4;
428 buf[3*p/2 + 1] ^= pat[i] << 4;
429 } else {
430 buf[3*p/2 - 1] ^= pat[i] >> 8;
431 buf[3*p/2] ^= pat[i];
432 }
384 } 433 }
385 434
386 if ((i = cafe_correct_ecc(buf, syn)) < 0) { 435 if (n < 0) {
387 dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n", 436 dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
388 cafe_readl(cafe, NAND_ADDR2) * 2048); 437 cafe_readl(cafe, NAND_ADDR2) * 2048);
389 for (i=0; i< 0x5c; i+=4) 438 for (i = 0; i < 0x5c; i += 4)
390 printk("Register %x: %08x\n", i, readl(cafe->mmio + i)); 439 printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
391 mtd->ecc_stats.failed++; 440 mtd->ecc_stats.failed++;
392 } else { 441 } else {
393 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", i); 442 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
394 mtd->ecc_stats.corrected += i; 443 mtd->ecc_stats.corrected += n;
395 } 444 }
396 } 445 }
397 446
398
399 return 0; 447 return 0;
400} 448}
401 449
@@ -416,7 +464,7 @@ static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
416 464
417static struct nand_bbt_descr cafe_bbt_main_descr_2048 = { 465static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
418 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 466 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
419 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 467 | NAND_BBT_2BIT | NAND_BBT_VERSION,
420 .offs = 14, 468 .offs = 14,
421 .len = 4, 469 .len = 4,
422 .veroffs = 18, 470 .veroffs = 18,
@@ -426,7 +474,7 @@ static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
426 474
427static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = { 475static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
428 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 476 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
429 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 477 | NAND_BBT_2BIT | NAND_BBT_VERSION,
430 .offs = 14, 478 .offs = 14,
431 .len = 4, 479 .len = 4,
432 .veroffs = 18, 480 .veroffs = 18,
@@ -442,7 +490,7 @@ static struct nand_ecclayout cafe_oobinfo_512 = {
442 490
443static struct nand_bbt_descr cafe_bbt_main_descr_512 = { 491static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
444 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 492 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
445 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 493 | NAND_BBT_2BIT | NAND_BBT_VERSION,
446 .offs = 14, 494 .offs = 14,
447 .len = 1, 495 .len = 1,
448 .veroffs = 15, 496 .veroffs = 15,
@@ -452,7 +500,7 @@ static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
452 500
453static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = { 501static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
454 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 502 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
455 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 503 | NAND_BBT_2BIT | NAND_BBT_VERSION,
456 .offs = 14, 504 .offs = 14,
457 .len = 1, 505 .len = 1,
458 .veroffs = 15, 506 .veroffs = 15,
@@ -525,6 +573,48 @@ static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
525 return 0; 573 return 0;
526} 574}
527 575
576/* F_2[X]/(X**6+X+1) */
577static unsigned short __devinit gf64_mul(u8 a, u8 b)
578{
579 u8 c;
580 unsigned int i;
581
582 c = 0;
583 for (i = 0; i < 6; i++) {
584 if (a & 1)
585 c ^= b;
586 a >>= 1;
587 b <<= 1;
588 if ((b & 0x40) != 0)
589 b ^= 0x43;
590 }
591
592 return c;
593}
594
595/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X] */
596static u16 __devinit gf4096_mul(u16 a, u16 b)
597{
598 u8 ah, al, bh, bl, ch, cl;
599
600 ah = a >> 6;
601 al = a & 0x3f;
602 bh = b >> 6;
603 bl = b & 0x3f;
604
605 ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
606 cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
607
608 return (ch << 6) ^ cl;
609}
610
611static int __devinit cafe_mul(int x)
612{
613 if (x == 0)
614 return 1;
615 return gf4096_mul(x, 0xe01);
616}
617
528static int __devinit cafe_nand_probe(struct pci_dev *pdev, 618static int __devinit cafe_nand_probe(struct pci_dev *pdev,
529 const struct pci_device_id *ent) 619 const struct pci_device_id *ent)
530{ 620{
@@ -564,6 +654,12 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
564 } 654 }
565 cafe->nand.buffers = (void *)cafe->dmabuf + 2112; 655 cafe->nand.buffers = (void *)cafe->dmabuf + 2112;
566 656
657 cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
658 if (!cafe->rs) {
659 err = -ENOMEM;
660 goto out_ior;
661 }
662
567 cafe->nand.cmdfunc = cafe_nand_cmdfunc; 663 cafe->nand.cmdfunc = cafe_nand_cmdfunc;
568 cafe->nand.dev_ready = cafe_device_ready; 664 cafe->nand.dev_ready = cafe_device_ready;
569 cafe->nand.read_byte = cafe_read_byte; 665 cafe->nand.read_byte = cafe_read_byte;
@@ -646,7 +742,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
646 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); 742 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK));
647 743
648 /* Scan to find existence of the device */ 744 /* Scan to find existence of the device */
649 if (nand_scan_ident(mtd, 1)) { 745 if (nand_scan_ident(mtd, 2)) {
650 err = -ENXIO; 746 err = -ENXIO;
651 goto out_irq; 747 goto out_irq;
652 } 748 }
@@ -713,6 +809,7 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
713 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK); 809 cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
714 free_irq(pdev->irq, mtd); 810 free_irq(pdev->irq, mtd);
715 nand_release(mtd); 811 nand_release(mtd);
812 free_rs(cafe->rs);
716 pci_iounmap(pdev, cafe->mmio); 813 pci_iounmap(pdev, cafe->mmio);
717 dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr); 814 dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
718 kfree(mtd); 815 kfree(mtd);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 04de315e4937..7e68203fe1ba 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -303,28 +303,27 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
303 struct nand_chip *chip = mtd->priv; 303 struct nand_chip *chip = mtd->priv;
304 u16 bad; 304 u16 bad;
305 305
306 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
307
306 if (getchip) { 308 if (getchip) {
307 page = (int)(ofs >> chip->page_shift);
308 chipnr = (int)(ofs >> chip->chip_shift); 309 chipnr = (int)(ofs >> chip->chip_shift);
309 310
310 nand_get_device(chip, mtd, FL_READING); 311 nand_get_device(chip, mtd, FL_READING);
311 312
312 /* Select the NAND device */ 313 /* Select the NAND device */
313 chip->select_chip(mtd, chipnr); 314 chip->select_chip(mtd, chipnr);
314 } else 315 }
315 page = (int)(ofs >> chip->page_shift);
316 316
317 if (chip->options & NAND_BUSWIDTH_16) { 317 if (chip->options & NAND_BUSWIDTH_16) {
318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE, 318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
319 page & chip->pagemask); 319 page);
320 bad = cpu_to_le16(chip->read_word(mtd)); 320 bad = cpu_to_le16(chip->read_word(mtd));
321 if (chip->badblockpos & 0x1) 321 if (chip->badblockpos & 0x1)
322 bad >>= 8; 322 bad >>= 8;
323 if ((bad & 0xFF) != 0xff) 323 if ((bad & 0xFF) != 0xff)
324 res = 1; 324 res = 1;
325 } else { 325 } else {
326 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, 326 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page);
327 page & chip->pagemask);
328 if (chip->read_byte(mtd) != 0xff) 327 if (chip->read_byte(mtd) != 0xff)
329 res = 1; 328 res = 1;
330 } 329 }
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
new file mode 100644
index 000000000000..cd725fc5e813
--- /dev/null
+++ b/drivers/mtd/nand/plat_nand.c
@@ -0,0 +1,150 @@
1/*
2 * Generic NAND driver
3 *
4 * Author: Vitaly Wool <vitalywool@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19
20struct plat_nand_data {
21 struct nand_chip chip;
22 struct mtd_info mtd;
23 void __iomem *io_base;
24#ifdef CONFIG_MTD_PARTITIONS
25 int nr_parts;
26 struct mtd_partition *parts;
27#endif
28};
29
30/*
31 * Probe for the NAND device.
32 */
33static int __init plat_nand_probe(struct platform_device *pdev)
34{
35 struct platform_nand_data *pdata = pdev->dev.platform_data;
36 struct plat_nand_data *data;
37 int res = 0;
38
39 /* Allocate memory for the device structure (and zero it) */
40 data = kzalloc(sizeof(struct plat_nand_data), GFP_KERNEL);
41 if (!data) {
42 dev_err(&pdev->dev, "failed to allocate device structure.\n");
43 return -ENOMEM;
44 }
45
46 data->io_base = ioremap(pdev->resource[0].start,
47 pdev->resource[0].end - pdev->resource[0].start + 1);
48 if (data->io_base == NULL) {
49 dev_err(&pdev->dev, "ioremap failed\n");
50 kfree(data);
51 return -EIO;
52 }
53
54 data->chip.priv = &data;
55 data->mtd.priv = &data->chip;
56 data->mtd.owner = THIS_MODULE;
57
58 data->chip.IO_ADDR_R = data->io_base;
59 data->chip.IO_ADDR_W = data->io_base;
60 data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
61 data->chip.dev_ready = pdata->ctrl.dev_ready;
62 data->chip.select_chip = pdata->ctrl.select_chip;
63 data->chip.chip_delay = pdata->chip.chip_delay;
64 data->chip.options |= pdata->chip.options;
65
66 data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
67 data->chip.ecc.layout = pdata->chip.ecclayout;
68 data->chip.ecc.mode = NAND_ECC_SOFT;
69
70 platform_set_drvdata(pdev, data);
71
72 /* Scan to find existance of the device */
73 if (nand_scan(&data->mtd, 1)) {
74 res = -ENXIO;
75 goto out;
76 }
77
78#ifdef CONFIG_MTD_PARTITIONS
79 if (pdata->chip.part_probe_types) {
80 res = parse_mtd_partitions(&data->mtd,
81 pdata->chip.part_probe_types,
82 &data->parts, 0);
83 if (res > 0) {
84 add_mtd_partitions(&data->mtd, data->parts, res);
85 return 0;
86 }
87 }
88 if (pdata->chip.partitions) {
89 data->parts = pdata->chip.partitions;
90 res = add_mtd_partitions(&data->mtd, data->parts,
91 pdata->chip.nr_partitions);
92 } else
93#endif
94 res = add_mtd_device(&data->mtd);
95
96 if (!res)
97 return res;
98
99 nand_release(&data->mtd);
100out:
101 platform_set_drvdata(pdev, NULL);
102 iounmap(data->io_base);
103 kfree(data);
104 return res;
105}
106
107/*
108 * Remove a NAND device.
109 */
110static int __devexit plat_nand_remove(struct platform_device *pdev)
111{
112 struct plat_nand_data *data = platform_get_drvdata(pdev);
113 struct platform_nand_data *pdata = pdev->dev.platform_data;
114
115 nand_release(&data->mtd);
116#ifdef CONFIG_MTD_PARTITIONS
117 if (data->parts && data->parts != pdata->chip.partitions)
118 kfree(data->parts);
119#endif
120 iounmap(data->io_base);
121 kfree(data);
122
123 return 0;
124}
125
126static struct platform_driver plat_nand_driver = {
127 .probe = plat_nand_probe,
128 .remove = plat_nand_remove,
129 .driver = {
130 .name = "gen_nand",
131 .owner = THIS_MODULE,
132 },
133};
134
135static int __init plat_nand_init(void)
136{
137 return platform_driver_register(&plat_nand_driver);
138}
139
140static void __exit plat_nand_exit(void)
141{
142 platform_driver_unregister(&plat_nand_driver);
143}
144
145module_init(plat_nand_init);
146module_exit(plat_nand_exit);
147
148MODULE_LICENSE("GPL");
149MODULE_AUTHOR("Vitaly Wool");
150MODULE_DESCRIPTION("Simple generic NAND driver");
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 000794c6caf5..0537fac8de74 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2192,7 +2192,7 @@ static int onenand_check_maf(int manuf)
2192 * @param mtd MTD device structure 2192 * @param mtd MTD device structure
2193 * 2193 *
2194 * OneNAND detection method: 2194 * OneNAND detection method:
2195 * Compare the the values from command with ones from register 2195 * Compare the values from command with ones from register
2196 */ 2196 */
2197static int onenand_probe(struct mtd_info *mtd) 2197static int onenand_probe(struct mtd_info *mtd)
2198{ 2198{
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9588da3a30e7..127f60841b10 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -95,8 +95,7 @@ static int max_interrupt_work = 10;
95#include <asm/io.h> 95#include <asm/io.h>
96#include <asm/irq.h> 96#include <asm/irq.h>
97 97
98static char versionA[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 98static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
99static char versionB[] __initdata = "http://www.scyld.com/network/3c509.html\n";
100 99
101#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA)) 100#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
102#define EL3_SUSPEND 101#define EL3_SUSPEND
@@ -360,7 +359,7 @@ static int __init el3_common_init(struct net_device *dev)
360 printk(", IRQ %d.\n", dev->irq); 359 printk(", IRQ %d.\n", dev->irq);
361 360
362 if (el3_debug > 0) 361 if (el3_debug > 0)
363 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 362 printk(KERN_INFO "%s", version);
364 return 0; 363 return 0;
365 364
366} 365}
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80924f76dee8..f26ca331615e 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -103,7 +103,7 @@ static int vortex_debug = 1;
103 103
104 104
105static char version[] __devinitdata = 105static char version[] __devinitdata =
106DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; 106DRV_NAME ": Donald Becker and others.\n";
107 107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); 109MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b86ccd2ecd5b..fa489b10c38c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2493,12 +2493,28 @@ config PASEMI_MAC
2493 This driver supports the on-chip 1/10Gbit Ethernet controller on 2493 This driver supports the on-chip 1/10Gbit Ethernet controller on
2494 PA Semi's PWRficient line of chips. 2494 PA Semi's PWRficient line of chips.
2495 2495
2496config MLX4_CORE
2497 tristate
2498 depends on PCI
2499 default n
2500
2501config MLX4_DEBUG
2502 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
2503 default y
2504 ---help---
2505 This option causes debugging code to be compiled into the
2506 mlx4_core driver. The output can be turned on via the
2507 debug_level module parameter (which can also be set after
2508 the driver is loaded through sysfs).
2509
2496endmenu 2510endmenu
2497 2511
2498source "drivers/net/tokenring/Kconfig" 2512source "drivers/net/tokenring/Kconfig"
2499 2513
2500source "drivers/net/wireless/Kconfig" 2514source "drivers/net/wireless/Kconfig"
2501 2515
2516source "drivers/net/usb/Kconfig"
2517
2502source "drivers/net/pcmcia/Kconfig" 2518source "drivers/net/pcmcia/Kconfig"
2503 2519
2504source "drivers/net/wan/Kconfig" 2520source "drivers/net/wan/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 59c0459a037c..a77affa4f6e6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -197,6 +197,7 @@ obj-$(CONFIG_SMC911X) += smc911x.o
197obj-$(CONFIG_DM9000) += dm9000.o 197obj-$(CONFIG_DM9000) += dm9000.o
198obj-$(CONFIG_FEC_8XX) += fec_8xx/ 198obj-$(CONFIG_FEC_8XX) += fec_8xx/
199obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o 199obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o
200obj-$(CONFIG_MLX4_CORE) += mlx4/
200 201
201obj-$(CONFIG_MACB) += macb.o 202obj-$(CONFIG_MACB) += macb.o
202 203
@@ -206,6 +207,14 @@ obj-$(CONFIG_TR) += tokenring/
206obj-$(CONFIG_WAN) += wan/ 207obj-$(CONFIG_WAN) += wan/
207obj-$(CONFIG_ARCNET) += arcnet/ 208obj-$(CONFIG_ARCNET) += arcnet/
208obj-$(CONFIG_NET_PCMCIA) += pcmcia/ 209obj-$(CONFIG_NET_PCMCIA) += pcmcia/
210
211obj-$(CONFIG_USB_CATC) += usb/
212obj-$(CONFIG_USB_KAWETH) += usb/
213obj-$(CONFIG_USB_PEGASUS) += usb/
214obj-$(CONFIG_USB_RTL8150) += usb/
215obj-$(CONFIG_USB_USBNET) += usb/
216obj-$(CONFIG_USB_ZD1201) += usb/
217
209obj-y += wireless/ 218obj-y += wireless/
210obj-$(CONFIG_NET_TULIP) += tulip/ 219obj-$(CONFIG_NET_TULIP) += tulip/
211obj-$(CONFIG_HAMRADIO) += hamradio/ 220obj-$(CONFIG_HAMRADIO) += hamradio/
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index d28f88bbdd5f..78cf00ff3d38 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -2038,6 +2038,15 @@ static int atl1_close(struct net_device *netdev)
2038 return 0; 2038 return 0;
2039} 2039}
2040 2040
2041#ifdef CONFIG_NET_POLL_CONTROLLER
2042static void atl1_poll_controller(struct net_device *netdev)
2043{
2044 disable_irq(netdev->irq);
2045 atl1_intr(netdev->irq, netdev);
2046 enable_irq(netdev->irq);
2047}
2048#endif
2049
2041/* 2050/*
2042 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 2051 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
2043 * will assert. We do soft reset <0x1400=1> according 2052 * will assert. We do soft reset <0x1400=1> according
@@ -2190,6 +2199,9 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2190 netdev->do_ioctl = &atl1_ioctl; 2199 netdev->do_ioctl = &atl1_ioctl;
2191 netdev->tx_timeout = &atl1_tx_timeout; 2200 netdev->tx_timeout = &atl1_tx_timeout;
2192 netdev->watchdog_timeo = 5 * HZ; 2201 netdev->watchdog_timeo = 5 * HZ;
2202#ifdef CONFIG_NET_POLL_CONTROLLER
2203 netdev->poll_controller = atl1_poll_controller;
2204#endif
2193 netdev->vlan_rx_register = atl1_vlan_rx_register; 2205 netdev->vlan_rx_register = atl1_vlan_rx_register;
2194 netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid; 2206 netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid;
2195 netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid; 2207 netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid;
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 18aba838c1ff..82d78ff8399b 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -31,10 +31,8 @@
31 31
32*/ 32*/
33 33
34static const char versionA[] = 34static const char version[] =
35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n"; 35"atp.c:v1.09=ac 2002/10/01 Donald Becker <becker@scyld.com>\n";
36static const char versionB[] =
37" http://www.scyld.com/network/atp.html\n";
38 36
39/* The user-configurable values. 37/* The user-configurable values.
40 These may be modified when a driver module is loaded.*/ 38 These may be modified when a driver module is loaded.*/
@@ -324,7 +322,7 @@ static int __init atp_probe1(long ioaddr)
324 322
325#ifndef MODULE 323#ifndef MODULE
326 if (net_debug) 324 if (net_debug)
327 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 325 printk(KERN_INFO "%s", version);
328#endif 326#endif
329 327
330 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM " 328 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, SAPROM "
@@ -926,7 +924,7 @@ static void set_rx_mode_8012(struct net_device *dev)
926 924
927static int __init atp_init_module(void) { 925static int __init atp_init_module(void) {
928 if (debug) /* Emit version even if no cards detected. */ 926 if (debug) /* Emit version even if no cards detected. */
929 printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB); 927 printk(KERN_INFO "%s", version);
930 return atp_init(); 928 return atp_init();
931} 929}
932 930
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 724bce51f936..223517dcbcfd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3461,7 +3461,7 @@ void bond_unregister_arp(struct bonding *bond)
3461/*---------------------------- Hashing Policies -----------------------------*/ 3461/*---------------------------- Hashing Policies -----------------------------*/
3462 3462
3463/* 3463/*
3464 * Hash for the the output device based upon layer 3 and layer 4 data. If 3464 * Hash for the output device based upon layer 3 and layer 4 data. If
3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3465 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3466 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3466 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3467 */ 3467 */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 3a03a74c0609..637ae8f68791 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1214,7 +1214,7 @@ e1000_remove(struct pci_dev *pdev)
1214 int i; 1214 int i;
1215#endif 1215#endif
1216 1216
1217 flush_scheduled_work(); 1217 cancel_work_sync(&adapter->reset_task);
1218 1218
1219 e1000_release_manageability(adapter); 1219 e1000_release_manageability(adapter);
1220 1220
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 39654e1e2bed..47680237f783 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1126,7 +1126,7 @@ static void eepro_tx_timeout (struct net_device *dev)
1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name, 1126 printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
1127 "network cable problem"); 1127 "network cable problem");
1128 /* This is not a duplicate. One message for the console, 1128 /* This is not a duplicate. One message for the console,
1129 one for the the log file */ 1129 one for the log file */
1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name, 1130 printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
1131 "network cable problem"); 1131 "network cable problem");
1132 eepro_complete_selreset(ioaddr); 1132 eepro_complete_selreset(ioaddr);
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 6c267c38df97..9800341956a2 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -28,7 +28,7 @@
28*/ 28*/
29 29
30static const char * const version = 30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n" 31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"; 32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33 33
34/* A few user-configurable values that apply to all boards. 34/* A few user-configurable values that apply to all boards.
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 4e3f14c9c717..5e517946f46a 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -93,8 +93,6 @@ static int rx_copybreak;
93static char version[] __devinitdata = 93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n"; 94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata = 95static char version2[] __devinitdata =
96" http://www.scyld.com/network/epic100.html\n";
97static char version3[] __devinitdata =
98" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
99 97
100MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -323,8 +321,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
323#ifndef MODULE 321#ifndef MODULE
324 static int printed_version; 322 static int printed_version;
325 if (!printed_version++) 323 if (!printed_version++)
326 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 324 printk (KERN_INFO "%s" KERN_INFO "%s",
327 version, version2, version3); 325 version, version2);
328#endif 326#endif
329 327
330 card_idx++; 328 card_idx++;
@@ -1596,8 +1594,8 @@ static int __init epic_init (void)
1596{ 1594{
1597/* when a module, this is printed whether or not devices are found in probe */ 1595/* when a module, this is printed whether or not devices are found in probe */
1598#ifdef MODULE 1596#ifdef MODULE
1599 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s", 1597 printk (KERN_INFO "%s" KERN_INFO "%s",
1600 version, version2, version3); 1598 version, version2);
1601#endif 1599#endif
1602 1600
1603 return pci_register_driver(&epic_driver); 1601 return pci_register_driver(&epic_driver);
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 6e90619b3b41..36d2c7d4f4d0 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -140,7 +140,7 @@ config BAYCOM_SER_HDX
140 modems that connect to a serial interface. The driver supports the 140 modems that connect to a serial interface. The driver supports the
141 ser12 design in half-duplex mode. This is the old driver. It is 141 ser12 design in half-duplex mode. This is the old driver. It is
142 still provided in case your serial interface chip does not work with 142 still provided in case your serial interface chip does not work with
143 the full-duplex driver. This driver is depreciated. To configure 143 the full-duplex driver. This driver is deprecated. To configure
144 the driver, use the sethdlc utility available in the standard ax25 144 the driver, use the sethdlc utility available in the standard ax25
145 utilities package. For information on the modems, see 145 utilities package. For information on the modems, see
146 <http://www.baycom.de/> and 146 <http://www.baycom.de/> and
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 2ab173d9a0e4..1e67720f1066 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -113,7 +113,7 @@
113/* RxOver overflow in Recv FIFO */ 113/* RxOver overflow in Recv FIFO */
114/* SipRcv received serial gap (or other condition you set) */ 114/* SipRcv received serial gap (or other condition you set) */
115/* Interrupts are enabled by writing a one to the IER register */ 115/* Interrupts are enabled by writing a one to the IER register */
116/* Interrupts are cleared by writting a one to the ISR register */ 116/* Interrupts are cleared by writing a one to the ISR register */
117/* */ 117/* */
118/* 6. The remaining registers: 0x6 and 0x3 appear to be */ 118/* 6. The remaining registers: 0x6 and 0x3 appear to be */
119/* reserved parts of 16 or 32 bit registersthe remainder */ 119/* reserved parts of 16 or 32 bit registersthe remainder */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index f15aebde7b90..52c99d01d568 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -315,7 +315,7 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
315 * hw - Struct containing variables accessed by shared code 315 * hw - Struct containing variables accessed by shared code
316 * 316 *
317 * Reads the first 64 16 bit words of the EEPROM and sums the values read. 317 * Reads the first 64 16 bit words of the EEPROM and sums the values read.
318 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 318 * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
319 * valid. 319 * valid.
320 * 320 *
321 * Returns: 321 * Returns:
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
index 84960dae2a22..ea3b8fc86d1e 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/meth.h
@@ -126,7 +126,7 @@ typedef struct rx_packet {
126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */ 126 /* Note: when loopback is set this bit becomes collision control. Setting this bit will */
127 /* cause a collision to be reported. */ 127 /* cause a collision to be reported. */
128 128
129 /* Bits 5 and 6 are used to determine the the Destination address filter mode */ 129 /* Bits 5 and 6 are used to determine the Destination address filter mode */
130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */ 130#define METH_ACCEPT_MY 0 /* 00: Accept PHY address only */
131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */ 131#define METH_ACCEPT_MCAST 0x20 /* 01: Accept physical, broadcast, and multicast filter matches only */
132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */ 132#define METH_ACCEPT_AMCAST 0x40 /* 10: Accept physical, broadcast, and all multicast packets */
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
new file mode 100644
index 000000000000..0952a6528f58
--- /dev/null
+++ b/drivers/net/mlx4/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o profile.o qp.o reset.o srq.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
new file mode 100644
index 000000000000..9ffdb9d29da9
--- /dev/null
+++ b/drivers/net/mlx4/alloc.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/slab.h>
35#include <linux/bitmap.h>
36
37#include "mlx4.h"
38
39u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
40{
41 u32 obj;
42
43 spin_lock(&bitmap->lock);
44
45 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
46 if (obj >= bitmap->max) {
47 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
48 obj = find_first_zero_bit(bitmap->table, bitmap->max);
49 }
50
51 if (obj < bitmap->max) {
52 set_bit(obj, bitmap->table);
53 obj |= bitmap->top;
54 bitmap->last = obj + 1;
55 } else
56 obj = -1;
57
58 spin_unlock(&bitmap->lock);
59
60 return obj;
61}
62
63void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
64{
65 obj &= bitmap->max - 1;
66
67 spin_lock(&bitmap->lock);
68 clear_bit(obj, bitmap->table);
69 bitmap->last = min(bitmap->last, obj);
70 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
71 spin_unlock(&bitmap->lock);
72}
73
74int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
75{
76 int i;
77
78 /* num must be a power of 2 */
79 if (num != roundup_pow_of_two(num))
80 return -EINVAL;
81
82 bitmap->last = 0;
83 bitmap->top = 0;
84 bitmap->max = num;
85 bitmap->mask = mask;
86 spin_lock_init(&bitmap->lock);
87 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
88 if (!bitmap->table)
89 return -ENOMEM;
90
91 for (i = 0; i < reserved; ++i)
92 set_bit(i, bitmap->table);
93
94 return 0;
95}
96
97void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
98{
99 kfree(bitmap->table);
100}
101
102/*
103 * Handling for queue buffers -- we allocate a bunch of memory and
104 * register it in a memory region at HCA virtual address 0. If the
105 * requested size is > max_direct, we split the allocation into
106 * multiple pages, so we don't require too much contiguous memory.
107 */
108
109int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
110 struct mlx4_buf *buf)
111{
112 dma_addr_t t;
113
114 if (size <= max_direct) {
115 buf->nbufs = 1;
116 buf->npages = 1;
117 buf->page_shift = get_order(size) + PAGE_SHIFT;
118 buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
119 size, &t, GFP_KERNEL);
120 if (!buf->u.direct.buf)
121 return -ENOMEM;
122
123 buf->u.direct.map = t;
124
125 while (t & ((1 << buf->page_shift) - 1)) {
126 --buf->page_shift;
127 buf->npages *= 2;
128 }
129
130 memset(buf->u.direct.buf, 0, size);
131 } else {
132 int i;
133
134 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
135 buf->npages = buf->nbufs;
136 buf->page_shift = PAGE_SHIFT;
137 buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
138 GFP_KERNEL);
139 if (!buf->u.page_list)
140 return -ENOMEM;
141
142 for (i = 0; i < buf->nbufs; ++i) {
143 buf->u.page_list[i].buf =
144 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
145 &t, GFP_KERNEL);
146 if (!buf->u.page_list[i].buf)
147 goto err_free;
148
149 buf->u.page_list[i].map = t;
150
151 memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
152 }
153 }
154
155 return 0;
156
157err_free:
158 mlx4_buf_free(dev, size, buf);
159
160 return -ENOMEM;
161}
162EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
163
164void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
165{
166 int i;
167
168 if (buf->nbufs == 1)
169 dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
170 buf->u.direct.map);
171 else {
172 for (i = 0; i < buf->nbufs; ++i)
173 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
174 buf->u.page_list[i].buf,
175 buf->u.page_list[i].map);
176 kfree(buf->u.page_list);
177 }
178}
179EXPORT_SYMBOL_GPL(mlx4_buf_free);
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
new file mode 100644
index 000000000000..1bb088aeaf71
--- /dev/null
+++ b/drivers/net/mlx4/catas.c
@@ -0,0 +1,70 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx4.h"
34
35void mlx4_handle_catas_err(struct mlx4_dev *dev)
36{
37 struct mlx4_priv *priv = mlx4_priv(dev);
38
39 int i;
40
41 mlx4_err(dev, "Catastrophic error detected:\n");
42 for (i = 0; i < priv->fw.catas_size; ++i)
43 mlx4_err(dev, " buf[%02x]: %08x\n",
44 i, swab32(readl(priv->catas_err.map + i)));
45
46 mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
47}
48
49void mlx4_map_catas_buf(struct mlx4_dev *dev)
50{
51 struct mlx4_priv *priv = mlx4_priv(dev);
52 unsigned long addr;
53
54 addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
55 priv->fw.catas_offset;
56
57 priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
58 if (!priv->catas_err.map)
59 mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
60 addr);
61
62}
63
64void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
65{
66 struct mlx4_priv *priv = mlx4_priv(dev);
67
68 if (priv->catas_err.map)
69 iounmap(priv->catas_err.map);
70}
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
new file mode 100644
index 000000000000..c1f81a993f5d
--- /dev/null
+++ b/drivers/net/mlx4/cmd.c
@@ -0,0 +1,429 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/errno.h>
38
39#include <linux/mlx4/cmd.h>
40
41#include <asm/io.h>
42
43#include "mlx4.h"
44
45#define CMD_POLL_TOKEN 0xffff
46
47enum {
48 /* command completed successfully: */
49 CMD_STAT_OK = 0x00,
50 /* Internal error (such as a bus error) occurred while processing command: */
51 CMD_STAT_INTERNAL_ERR = 0x01,
52 /* Operation/command not supported or opcode modifier not supported: */
53 CMD_STAT_BAD_OP = 0x02,
54 /* Parameter not supported or parameter out of range: */
55 CMD_STAT_BAD_PARAM = 0x03,
56 /* System not enabled or bad system state: */
57 CMD_STAT_BAD_SYS_STATE = 0x04,
58 /* Attempt to access reserved or unallocaterd resource: */
59 CMD_STAT_BAD_RESOURCE = 0x05,
60 /* Requested resource is currently executing a command, or is otherwise busy: */
61 CMD_STAT_RESOURCE_BUSY = 0x06,
62 /* Required capability exceeds device limits: */
63 CMD_STAT_EXCEED_LIM = 0x08,
64 /* Resource is not in the appropriate state or ownership: */
65 CMD_STAT_BAD_RES_STATE = 0x09,
66 /* Index out of range: */
67 CMD_STAT_BAD_INDEX = 0x0a,
68 /* FW image corrupted: */
69 CMD_STAT_BAD_NVMEM = 0x0b,
70 /* Attempt to modify a QP/EE which is not in the presumed state: */
71 CMD_STAT_BAD_QP_STATE = 0x10,
72 /* Bad segment parameters (Address/Size): */
73 CMD_STAT_BAD_SEG_PARAM = 0x20,
74 /* Memory Region has Memory Windows bound to: */
75 CMD_STAT_REG_BOUND = 0x21,
76 /* HCA local attached memory not present: */
77 CMD_STAT_LAM_NOT_PRE = 0x22,
78 /* Bad management packet (silently discarded): */
79 CMD_STAT_BAD_PKT = 0x30,
80 /* More outstanding CQEs in CQ than new CQ size: */
81 CMD_STAT_BAD_SIZE = 0x40
82};
83
84enum {
85 HCR_IN_PARAM_OFFSET = 0x00,
86 HCR_IN_MODIFIER_OFFSET = 0x08,
87 HCR_OUT_PARAM_OFFSET = 0x0c,
88 HCR_TOKEN_OFFSET = 0x14,
89 HCR_STATUS_OFFSET = 0x18,
90
91 HCR_OPMOD_SHIFT = 12,
92 HCR_T_BIT = 21,
93 HCR_E_BIT = 22,
94 HCR_GO_BIT = 23
95};
96
97enum {
98 GO_BIT_TIMEOUT = 10000
99};
100
101struct mlx4_cmd_context {
102 struct completion done;
103 int result;
104 int next;
105 u64 out_param;
106 u16 token;
107};
108
109static int mlx4_status_to_errno(u8 status) {
110 static const int trans_table[] = {
111 [CMD_STAT_INTERNAL_ERR] = -EIO,
112 [CMD_STAT_BAD_OP] = -EPERM,
113 [CMD_STAT_BAD_PARAM] = -EINVAL,
114 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
115 [CMD_STAT_BAD_RESOURCE] = -EBADF,
116 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
117 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
118 [CMD_STAT_BAD_RES_STATE] = -EBADF,
119 [CMD_STAT_BAD_INDEX] = -EBADF,
120 [CMD_STAT_BAD_NVMEM] = -EFAULT,
121 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
122 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
123 [CMD_STAT_REG_BOUND] = -EBUSY,
124 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
125 [CMD_STAT_BAD_PKT] = -EINVAL,
126 [CMD_STAT_BAD_SIZE] = -ENOMEM,
127 };
128
129 if (status >= ARRAY_SIZE(trans_table) ||
130 (status != CMD_STAT_OK && trans_table[status] == 0))
131 return -EIO;
132
133 return trans_table[status];
134}
135
136static int cmd_pending(struct mlx4_dev *dev)
137{
138 u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
139
140 return (status & swab32(1 << HCR_GO_BIT)) ||
141 (mlx4_priv(dev)->cmd.toggle ==
142 !!(status & swab32(1 << HCR_T_BIT)));
143}
144
145static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
146 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
147 int event)
148{
149 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
150 u32 __iomem *hcr = cmd->hcr;
151 int ret = -EAGAIN;
152 unsigned long end;
153
154 mutex_lock(&cmd->hcr_mutex);
155
156 end = jiffies;
157 if (event)
158 end += HZ * 10;
159
160 while (cmd_pending(dev)) {
161 if (time_after_eq(jiffies, end))
162 goto out;
163 cond_resched();
164 }
165
166 /*
167 * We use writel (instead of something like memcpy_toio)
168 * because writes of less than 32 bits to the HCR don't work
169 * (and some architectures such as ia64 implement memcpy_toio
170 * in terms of writeb).
171 */
172 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
173 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
174 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
175 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
176 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
177 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
178
179 /* __raw_writel may not order writes. */
180 wmb();
181
182 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
183 (cmd->toggle << HCR_T_BIT) |
184 (event ? (1 << HCR_E_BIT) : 0) |
185 (op_modifier << HCR_OPMOD_SHIFT) |
186 op), hcr + 6);
187 cmd->toggle = cmd->toggle ^ 1;
188
189 ret = 0;
190
191out:
192 mutex_unlock(&cmd->hcr_mutex);
193 return ret;
194}
195
196static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
197 int out_is_imm, u32 in_modifier, u8 op_modifier,
198 u16 op, unsigned long timeout)
199{
200 struct mlx4_priv *priv = mlx4_priv(dev);
201 void __iomem *hcr = priv->cmd.hcr;
202 int err = 0;
203 unsigned long end;
204
205 down(&priv->cmd.poll_sem);
206
207 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
208 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
209 if (err)
210 goto out;
211
212 end = msecs_to_jiffies(timeout) + jiffies;
213 while (cmd_pending(dev) && time_before(jiffies, end))
214 cond_resched();
215
216 if (cmd_pending(dev)) {
217 err = -ETIMEDOUT;
218 goto out;
219 }
220
221 if (out_is_imm)
222 *out_param =
223 (u64) be32_to_cpu((__force __be32)
224 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
225 (u64) be32_to_cpu((__force __be32)
226 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
227
228 err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
229 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
230
231out:
232 up(&priv->cmd.poll_sem);
233 return err;
234}
235
236void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
237{
238 struct mlx4_priv *priv = mlx4_priv(dev);
239 struct mlx4_cmd_context *context =
240 &priv->cmd.context[token & priv->cmd.token_mask];
241
242 /* previously timed out command completing at long last */
243 if (token != context->token)
244 return;
245
246 context->result = mlx4_status_to_errno(status);
247 context->out_param = out_param;
248
249 context->token += priv->cmd.token_mask + 1;
250
251 complete(&context->done);
252}
253
254static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
255 int out_is_imm, u32 in_modifier, u8 op_modifier,
256 u16 op, unsigned long timeout)
257{
258 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
259 struct mlx4_cmd_context *context;
260 int err = 0;
261
262 down(&cmd->event_sem);
263
264 spin_lock(&cmd->context_lock);
265 BUG_ON(cmd->free_head < 0);
266 context = &cmd->context[cmd->free_head];
267 cmd->free_head = context->next;
268 spin_unlock(&cmd->context_lock);
269
270 init_completion(&context->done);
271
272 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
273 in_modifier, op_modifier, op, context->token, 1);
274
275 if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
276 err = -EBUSY;
277 goto out;
278 }
279
280 err = context->result;
281 if (err)
282 goto out;
283
284 if (out_is_imm)
285 *out_param = context->out_param;
286
287out:
288 spin_lock(&cmd->context_lock);
289 context->next = cmd->free_head;
290 cmd->free_head = context - cmd->context;
291 spin_unlock(&cmd->context_lock);
292
293 up(&cmd->event_sem);
294 return err;
295}
296
297int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
298 int out_is_imm, u32 in_modifier, u8 op_modifier,
299 u16 op, unsigned long timeout)
300{
301 if (mlx4_priv(dev)->cmd.use_events)
302 return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
303 in_modifier, op_modifier, op, timeout);
304 else
305 return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
306 in_modifier, op_modifier, op, timeout);
307}
308EXPORT_SYMBOL_GPL(__mlx4_cmd);
309
310int mlx4_cmd_init(struct mlx4_dev *dev)
311{
312 struct mlx4_priv *priv = mlx4_priv(dev);
313
314 mutex_init(&priv->cmd.hcr_mutex);
315 sema_init(&priv->cmd.poll_sem, 1);
316 priv->cmd.use_events = 0;
317 priv->cmd.toggle = 1;
318
319 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
320 MLX4_HCR_SIZE);
321 if (!priv->cmd.hcr) {
322 mlx4_err(dev, "Couldn't map command register.");
323 return -ENOMEM;
324 }
325
326 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
327 MLX4_MAILBOX_SIZE,
328 MLX4_MAILBOX_SIZE, 0);
329 if (!priv->cmd.pool) {
330 iounmap(priv->cmd.hcr);
331 return -ENOMEM;
332 }
333
334 return 0;
335}
336
337void mlx4_cmd_cleanup(struct mlx4_dev *dev)
338{
339 struct mlx4_priv *priv = mlx4_priv(dev);
340
341 pci_pool_destroy(priv->cmd.pool);
342 iounmap(priv->cmd.hcr);
343}
344
345/*
346 * Switch to using events to issue FW commands (can only be called
347 * after event queue for command events has been initialized).
348 */
349int mlx4_cmd_use_events(struct mlx4_dev *dev)
350{
351 struct mlx4_priv *priv = mlx4_priv(dev);
352 int i;
353
354 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
355 sizeof (struct mlx4_cmd_context),
356 GFP_KERNEL);
357 if (!priv->cmd.context)
358 return -ENOMEM;
359
360 for (i = 0; i < priv->cmd.max_cmds; ++i) {
361 priv->cmd.context[i].token = i;
362 priv->cmd.context[i].next = i + 1;
363 }
364
365 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
366 priv->cmd.free_head = 0;
367
368 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
369 spin_lock_init(&priv->cmd.context_lock);
370
371 for (priv->cmd.token_mask = 1;
372 priv->cmd.token_mask < priv->cmd.max_cmds;
373 priv->cmd.token_mask <<= 1)
374 ; /* nothing */
375 --priv->cmd.token_mask;
376
377 priv->cmd.use_events = 1;
378
379 down(&priv->cmd.poll_sem);
380
381 return 0;
382}
383
384/*
385 * Switch back to polling (used when shutting down the device)
386 */
387void mlx4_cmd_use_polling(struct mlx4_dev *dev)
388{
389 struct mlx4_priv *priv = mlx4_priv(dev);
390 int i;
391
392 priv->cmd.use_events = 0;
393
394 for (i = 0; i < priv->cmd.max_cmds; ++i)
395 down(&priv->cmd.event_sem);
396
397 kfree(priv->cmd.context);
398
399 up(&priv->cmd.poll_sem);
400}
401
402struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
403{
404 struct mlx4_cmd_mailbox *mailbox;
405
406 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
407 if (!mailbox)
408 return ERR_PTR(-ENOMEM);
409
410 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
411 &mailbox->dma);
412 if (!mailbox->buf) {
413 kfree(mailbox);
414 return ERR_PTR(-ENOMEM);
415 }
416
417 return mailbox;
418}
419EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
420
421void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
422{
423 if (!mailbox)
424 return;
425
426 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
427 kfree(mailbox);
428}
429EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
new file mode 100644
index 000000000000..437d78ad0912
--- /dev/null
+++ b/drivers/net/mlx4/cq.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#include <linux/init.h>
38#include <linux/hardirq.h>
39
40#include <linux/mlx4/cmd.h>
41
42#include "mlx4.h"
43#include "icm.h"
44
45struct mlx4_cq_context {
46 __be32 flags;
47 u16 reserved1[3];
48 __be16 page_offset;
49 __be32 logsize_usrpage;
50 u8 reserved2;
51 u8 cq_period;
52 u8 reserved3;
53 u8 cq_max_count;
54 u8 reserved4[3];
55 u8 comp_eqn;
56 u8 log_page_size;
57 u8 reserved5[2];
58 u8 mtt_base_addr_h;
59 __be32 mtt_base_addr_l;
60 __be32 last_notified_index;
61 __be32 solicit_producer_index;
62 __be32 consumer_index;
63 __be32 producer_index;
64 u8 reserved6[2];
65 __be64 db_rec_addr;
66};
67
68#define MLX4_CQ_STATUS_OK ( 0 << 28)
69#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
70#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
71#define MLX4_CQ_FLAG_CC ( 1 << 18)
72#define MLX4_CQ_FLAG_OI ( 1 << 17)
73#define MLX4_CQ_STATE_ARMED ( 9 << 8)
74#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
75#define MLX4_EQ_STATE_FIRED (10 << 8)
76
77void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
78{
79 struct mlx4_cq *cq;
80
81 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
82 cqn & (dev->caps.num_cqs - 1));
83 if (!cq) {
84 mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
85 return;
86 }
87
88 ++cq->arm_sn;
89
90 cq->comp(cq);
91}
92
93void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
94{
95 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
96 struct mlx4_cq *cq;
97
98 spin_lock(&cq_table->lock);
99
100 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
101 if (cq)
102 atomic_inc(&cq->refcount);
103
104 spin_unlock(&cq_table->lock);
105
106 if (!cq) {
107 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
108 return;
109 }
110
111 cq->event(cq, event_type);
112
113 if (atomic_dec_and_test(&cq->refcount))
114 complete(&cq->free);
115}
116
117static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
118 int cq_num)
119{
120 return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
121 MLX4_CMD_TIME_CLASS_A);
122}
123
124static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
125 int cq_num)
126{
127 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
128 mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
129 MLX4_CMD_TIME_CLASS_A);
130}
131
132int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
133 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
134{
135 struct mlx4_priv *priv = mlx4_priv(dev);
136 struct mlx4_cq_table *cq_table = &priv->cq_table;
137 struct mlx4_cmd_mailbox *mailbox;
138 struct mlx4_cq_context *cq_context;
139 u64 mtt_addr;
140 int err;
141
142 cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
143 if (cq->cqn == -1)
144 return -ENOMEM;
145
146 err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
147 if (err)
148 goto err_out;
149
150 err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
151 if (err)
152 goto err_put;
153
154 spin_lock_irq(&cq_table->lock);
155 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
156 spin_unlock_irq(&cq_table->lock);
157 if (err)
158 goto err_cmpt_put;
159
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 if (IS_ERR(mailbox)) {
162 err = PTR_ERR(mailbox);
163 goto err_radix;
164 }
165
166 cq_context = mailbox->buf;
167 memset(cq_context, 0, sizeof *cq_context);
168
169 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
170 cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
171 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
172
173 mtt_addr = mlx4_mtt_addr(dev, mtt);
174 cq_context->mtt_base_addr_h = mtt_addr >> 32;
175 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
176 cq_context->db_rec_addr = cpu_to_be64(db_rec);
177
178 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
179 mlx4_free_cmd_mailbox(dev, mailbox);
180 if (err)
181 goto err_radix;
182
183 cq->cons_index = 0;
184 cq->arm_sn = 1;
185 cq->uar = uar;
186 atomic_set(&cq->refcount, 1);
187 init_completion(&cq->free);
188
189 return 0;
190
191err_radix:
192 spin_lock_irq(&cq_table->lock);
193 radix_tree_delete(&cq_table->tree, cq->cqn);
194 spin_unlock_irq(&cq_table->lock);
195
196err_cmpt_put:
197 mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
198
199err_put:
200 mlx4_table_put(dev, &cq_table->table, cq->cqn);
201
202err_out:
203 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
204
205 return err;
206}
207EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
208
209void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
210{
211 struct mlx4_priv *priv = mlx4_priv(dev);
212 struct mlx4_cq_table *cq_table = &priv->cq_table;
213 int err;
214
215 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
216 if (err)
217 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
218
219 synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq);
220
221 spin_lock_irq(&cq_table->lock);
222 radix_tree_delete(&cq_table->tree, cq->cqn);
223 spin_unlock_irq(&cq_table->lock);
224
225 if (atomic_dec_and_test(&cq->refcount))
226 complete(&cq->free);
227 wait_for_completion(&cq->free);
228
229 mlx4_table_put(dev, &cq_table->table, cq->cqn);
230 mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
231}
232EXPORT_SYMBOL_GPL(mlx4_cq_free);
233
234int __devinit mlx4_init_cq_table(struct mlx4_dev *dev)
235{
236 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
237 int err;
238
239 spin_lock_init(&cq_table->lock);
240 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
241
242 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
243 dev->caps.num_cqs - 1, dev->caps.reserved_cqs);
244 if (err)
245 return err;
246
247 return 0;
248}
249
250void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
251{
252 /* Nothing to do to clean up radix_tree */
253 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
254}
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
new file mode 100644
index 000000000000..acf1c801a1b8
--- /dev/null
+++ b/drivers/net/mlx4/eq.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/interrupt.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40#include "fw.h"
41
42enum {
43 MLX4_NUM_ASYNC_EQE = 0x100,
44 MLX4_NUM_SPARE_EQE = 0x80,
45 MLX4_EQ_ENTRY_SIZE = 0x20
46};
47
48/*
49 * Must be packed because start is 64 bits but only aligned to 32 bits.
50 */
51struct mlx4_eq_context {
52 __be32 flags;
53 u16 reserved1[3];
54 __be16 page_offset;
55 u8 log_eq_size;
56 u8 reserved2[4];
57 u8 eq_period;
58 u8 reserved3;
59 u8 eq_max_count;
60 u8 reserved4[3];
61 u8 intr;
62 u8 log_page_size;
63 u8 reserved5[2];
64 u8 mtt_base_addr_h;
65 __be32 mtt_base_addr_l;
66 u32 reserved6[2];
67 __be32 consumer_index;
68 __be32 producer_index;
69 u32 reserved7[4];
70};
71
72#define MLX4_EQ_STATUS_OK ( 0 << 28)
73#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
74#define MLX4_EQ_OWNER_SW ( 0 << 24)
75#define MLX4_EQ_OWNER_HW ( 1 << 24)
76#define MLX4_EQ_FLAG_EC ( 1 << 18)
77#define MLX4_EQ_FLAG_OI ( 1 << 17)
78#define MLX4_EQ_STATE_ARMED ( 9 << 8)
79#define MLX4_EQ_STATE_FIRED (10 << 8)
80#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
81
82#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
83 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
84 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
85 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
86 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
87 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
88 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
89 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
90 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
91 (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
92 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
93 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
94 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
95 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
96 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
97 (1ull << MLX4_EVENT_TYPE_CMD))
98#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
99
100struct mlx4_eqe {
101 u8 reserved1;
102 u8 type;
103 u8 reserved2;
104 u8 subtype;
105 union {
106 u32 raw[6];
107 struct {
108 __be32 cqn;
109 } __attribute__((packed)) comp;
110 struct {
111 u16 reserved1;
112 __be16 token;
113 u32 reserved2;
114 u8 reserved3[3];
115 u8 status;
116 __be64 out_param;
117 } __attribute__((packed)) cmd;
118 struct {
119 __be32 qpn;
120 } __attribute__((packed)) qp;
121 struct {
122 __be32 srqn;
123 } __attribute__((packed)) srq;
124 struct {
125 __be32 cqn;
126 u32 reserved1;
127 u8 reserved2[3];
128 u8 syndrome;
129 } __attribute__((packed)) cq_err;
130 struct {
131 u32 reserved1[2];
132 __be32 port;
133 } __attribute__((packed)) port_change;
134 } event;
135 u8 reserved3[3];
136 u8 owner;
137} __attribute__((packed));
138
139static void eq_set_ci(struct mlx4_eq *eq, int req_not)
140{
141 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
142 req_not << 31),
143 eq->doorbell);
144 /* We still want ordering, just not swabbing, so add a barrier */
145 mb();
146}
147
148static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
149{
150 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
151 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
152}
153
154static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
155{
156 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
157 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
158}
159
160static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
161{
162 struct mlx4_eqe *eqe;
163 int cqn;
164 int eqes_found = 0;
165 int set_ci = 0;
166
167 while ((eqe = next_eqe_sw(eq))) {
168 /*
169 * Make sure we read EQ entry contents after we've
170 * checked the ownership bit.
171 */
172 rmb();
173
174 switch (eqe->type) {
175 case MLX4_EVENT_TYPE_COMP:
176 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
177 mlx4_cq_completion(dev, cqn);
178 break;
179
180 case MLX4_EVENT_TYPE_PATH_MIG:
181 case MLX4_EVENT_TYPE_COMM_EST:
182 case MLX4_EVENT_TYPE_SQ_DRAINED:
183 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
184 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
185 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
186 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
187 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
188 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
189 eqe->type);
190 break;
191
192 case MLX4_EVENT_TYPE_SRQ_LIMIT:
193 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
194 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
195 eqe->type);
196 break;
197
198 case MLX4_EVENT_TYPE_CMD:
199 mlx4_cmd_event(dev,
200 be16_to_cpu(eqe->event.cmd.token),
201 eqe->event.cmd.status,
202 be64_to_cpu(eqe->event.cmd.out_param));
203 break;
204
205 case MLX4_EVENT_TYPE_PORT_CHANGE:
206 mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
207 be32_to_cpu(eqe->event.port_change.port) >> 28);
208 break;
209
210 case MLX4_EVENT_TYPE_CQ_ERROR:
211 mlx4_warn(dev, "CQ %s on CQN %06x\n",
212 eqe->event.cq_err.syndrome == 1 ?
213 "overrun" : "access violation",
214 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
215 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
216 eqe->type);
217 break;
218
219 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
220 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
221 break;
222
223 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
224 case MLX4_EVENT_TYPE_ECC_DETECT:
225 default:
226 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
227 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
228 break;
229 };
230
231 ++eq->cons_index;
232 eqes_found = 1;
233 ++set_ci;
234
235 /*
236 * The HCA will think the queue has overflowed if we
237 * don't tell it we've been processing events. We
238 * create our EQs with MLX4_NUM_SPARE_EQE extra
239 * entries, so we must update our consumer index at
240 * least that often.
241 */
242 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
243 /*
244 * Conditional on hca_type is OK here because
245 * this is a rare case, not the fast path.
246 */
247 eq_set_ci(eq, 0);
248 set_ci = 0;
249 }
250 }
251
252 eq_set_ci(eq, 1);
253
254 return eqes_found;
255}
256
257static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
258{
259 struct mlx4_dev *dev = dev_ptr;
260 struct mlx4_priv *priv = mlx4_priv(dev);
261 int work = 0;
262 int i;
263
264 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
265
266 for (i = 0; i < MLX4_EQ_CATAS; ++i)
267 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
268
269 return IRQ_RETVAL(work);
270}
271
272static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
273{
274 struct mlx4_eq *eq = eq_ptr;
275 struct mlx4_dev *dev = eq->dev;
276
277 mlx4_eq_int(dev, eq);
278
279 /* MSI-X vectors always belong to us */
280 return IRQ_HANDLED;
281}
282
283static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
284{
285 mlx4_handle_catas_err(dev_ptr);
286
287 /* MSI-X vectors always belong to us */
288 return IRQ_HANDLED;
289}
290
291static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
292 int eq_num)
293{
294 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
295 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
296}
297
298static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
299 int eq_num)
300{
301 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
302 MLX4_CMD_TIME_CLASS_A);
303}
304
305static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
306 int eq_num)
307{
308 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
309 MLX4_CMD_TIME_CLASS_A);
310}
311
312static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev,
313 struct mlx4_eq *eq)
314{
315 struct mlx4_priv *priv = mlx4_priv(dev);
316 int index;
317
318 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
319
320 if (!priv->eq_table.uar_map[index]) {
321 priv->eq_table.uar_map[index] =
322 ioremap(pci_resource_start(dev->pdev, 2) +
323 ((eq->eqn / 4) << PAGE_SHIFT),
324 PAGE_SIZE);
325 if (!priv->eq_table.uar_map[index]) {
326 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
327 eq->eqn);
328 return NULL;
329 }
330 }
331
332 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
333}
334
335static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent,
336 u8 intr, struct mlx4_eq *eq)
337{
338 struct mlx4_priv *priv = mlx4_priv(dev);
339 struct mlx4_cmd_mailbox *mailbox;
340 struct mlx4_eq_context *eq_context;
341 int npages;
342 u64 *dma_list = NULL;
343 dma_addr_t t;
344 u64 mtt_addr;
345 int err = -ENOMEM;
346 int i;
347
348 eq->dev = dev;
349 eq->nent = roundup_pow_of_two(max(nent, 2));
350 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
351
352 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
353 GFP_KERNEL);
354 if (!eq->page_list)
355 goto err_out;
356
357 for (i = 0; i < npages; ++i)
358 eq->page_list[i].buf = NULL;
359
360 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
361 if (!dma_list)
362 goto err_out_free;
363
364 mailbox = mlx4_alloc_cmd_mailbox(dev);
365 if (IS_ERR(mailbox))
366 goto err_out_free;
367 eq_context = mailbox->buf;
368
369 for (i = 0; i < npages; ++i) {
370 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
371 PAGE_SIZE, &t, GFP_KERNEL);
372 if (!eq->page_list[i].buf)
373 goto err_out_free_pages;
374
375 dma_list[i] = t;
376 eq->page_list[i].map = t;
377
378 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
379 }
380
381 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
382 if (eq->eqn == -1)
383 goto err_out_free_pages;
384
385 eq->doorbell = mlx4_get_eq_uar(dev, eq);
386 if (!eq->doorbell) {
387 err = -ENOMEM;
388 goto err_out_free_eq;
389 }
390
391 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
392 if (err)
393 goto err_out_free_eq;
394
395 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
396 if (err)
397 goto err_out_free_mtt;
398
399 memset(eq_context, 0, sizeof *eq_context);
400 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
401 MLX4_EQ_STATE_ARMED);
402 eq_context->log_eq_size = ilog2(eq->nent);
403 eq_context->intr = intr;
404 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
405
406 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
407 eq_context->mtt_base_addr_h = mtt_addr >> 32;
408 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
409
410 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
411 if (err) {
412 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
413 goto err_out_free_mtt;
414 }
415
416 kfree(dma_list);
417 mlx4_free_cmd_mailbox(dev, mailbox);
418
419 eq->cons_index = 0;
420
421 return err;
422
423err_out_free_mtt:
424 mlx4_mtt_cleanup(dev, &eq->mtt);
425
426err_out_free_eq:
427 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
428
429err_out_free_pages:
430 for (i = 0; i < npages; ++i)
431 if (eq->page_list[i].buf)
432 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
433 eq->page_list[i].buf,
434 eq->page_list[i].map);
435
436 mlx4_free_cmd_mailbox(dev, mailbox);
437
438err_out_free:
439 kfree(eq->page_list);
440 kfree(dma_list);
441
442err_out:
443 return err;
444}
445
446static void mlx4_free_eq(struct mlx4_dev *dev,
447 struct mlx4_eq *eq)
448{
449 struct mlx4_priv *priv = mlx4_priv(dev);
450 struct mlx4_cmd_mailbox *mailbox;
451 int err;
452 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
453 int i;
454
455 mailbox = mlx4_alloc_cmd_mailbox(dev);
456 if (IS_ERR(mailbox))
457 return;
458
459 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
460 if (err)
461 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
462
463 if (0) {
464 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
465 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
466 if (i % 4 == 0)
467 printk("[%02x] ", i * 4);
468 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
469 if ((i + 1) % 4 == 0)
470 printk("\n");
471 }
472 }
473
474 mlx4_mtt_cleanup(dev, &eq->mtt);
475 for (i = 0; i < npages; ++i)
476 pci_free_consistent(dev->pdev, PAGE_SIZE,
477 eq->page_list[i].buf,
478 eq->page_list[i].map);
479
480 kfree(eq->page_list);
481 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
482 mlx4_free_cmd_mailbox(dev, mailbox);
483}
484
485static void mlx4_free_irqs(struct mlx4_dev *dev)
486{
487 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
488 int i;
489
490 if (eq_table->have_irq)
491 free_irq(dev->pdev->irq, dev);
492 for (i = 0; i < MLX4_NUM_EQ; ++i)
493 if (eq_table->eq[i].have_irq)
494 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
495}
496
497static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
498{
499 struct mlx4_priv *priv = mlx4_priv(dev);
500
501 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
502 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
503 if (!priv->clr_base) {
504 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
505 return -ENOMEM;
506 }
507
508 return 0;
509}
510
511static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
512{
513 struct mlx4_priv *priv = mlx4_priv(dev);
514
515 iounmap(priv->clr_base);
516}
517
518int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
519{
520 struct mlx4_priv *priv = mlx4_priv(dev);
521 int ret;
522
523 /*
524 * We assume that mapping one page is enough for the whole EQ
525 * context table. This is fine with all current HCAs, because
526 * we only use 32 EQs and each EQ uses 64 bytes of context
527 * memory, or 1 KB total.
528 */
529 priv->eq_table.icm_virt = icm_virt;
530 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
531 if (!priv->eq_table.icm_page)
532 return -ENOMEM;
533 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
534 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
535 if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
536 __free_page(priv->eq_table.icm_page);
537 return -ENOMEM;
538 }
539
540 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
541 if (ret) {
542 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
543 PCI_DMA_BIDIRECTIONAL);
544 __free_page(priv->eq_table.icm_page);
545 }
546
547 return ret;
548}
549
550void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
551{
552 struct mlx4_priv *priv = mlx4_priv(dev);
553
554 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
555 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
556 PCI_DMA_BIDIRECTIONAL);
557 __free_page(priv->eq_table.icm_page);
558}
559
560int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
561{
562 struct mlx4_priv *priv = mlx4_priv(dev);
563 int err;
564 int i;
565
566 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
567 dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
568 if (err)
569 return err;
570
571 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
572 priv->eq_table.uar_map[i] = NULL;
573
574 err = mlx4_map_clr_int(dev);
575 if (err)
576 goto err_out_free;
577
578 priv->eq_table.clr_mask =
579 swab32(1 << (priv->eq_table.inta_pin & 31));
580 priv->eq_table.clr_int = priv->clr_base +
581 (priv->eq_table.inta_pin < 32 ? 4 : 0);
582
583 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
584 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
585 &priv->eq_table.eq[MLX4_EQ_COMP]);
586 if (err)
587 goto err_out_unmap;
588
589 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
590 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
591 &priv->eq_table.eq[MLX4_EQ_ASYNC]);
592 if (err)
593 goto err_out_comp;
594
595 if (dev->flags & MLX4_FLAG_MSI_X) {
596 static const char *eq_name[] = {
597 [MLX4_EQ_COMP] = DRV_NAME " (comp)",
598 [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
599 [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
600 };
601
602 err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
603 &priv->eq_table.eq[MLX4_EQ_CATAS]);
604 if (err)
605 goto err_out_async;
606
607 for (i = 0; i < MLX4_EQ_CATAS; ++i) {
608 err = request_irq(priv->eq_table.eq[i].irq,
609 mlx4_msi_x_interrupt,
610 0, eq_name[i], priv->eq_table.eq + i);
611 if (err)
612 goto err_out_catas;
613
614 priv->eq_table.eq[i].have_irq = 1;
615 }
616
617 err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
618 mlx4_catas_interrupt, 0,
619 eq_name[MLX4_EQ_CATAS], dev);
620 if (err)
621 goto err_out_catas;
622
623 priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
624 } else {
625 err = request_irq(dev->pdev->irq, mlx4_interrupt,
626 SA_SHIRQ, DRV_NAME, dev);
627 if (err)
628 goto err_out_async;
629
630 priv->eq_table.have_irq = 1;
631 }
632
633 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
634 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
635 if (err)
636 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
637 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
638
639 for (i = 0; i < MLX4_EQ_CATAS; ++i)
640 eq_set_ci(&priv->eq_table.eq[i], 1);
641
642 if (dev->flags & MLX4_FLAG_MSI_X) {
643 err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
644 priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
645 if (err)
646 mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
647 priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
648 }
649
650 return 0;
651
652err_out_catas:
653 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
654
655err_out_async:
656 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
657
658err_out_comp:
659 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
660
661err_out_unmap:
662 mlx4_unmap_clr_int(dev);
663 mlx4_free_irqs(dev);
664
665err_out_free:
666 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
667 return err;
668}
669
670void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
671{
672 struct mlx4_priv *priv = mlx4_priv(dev);
673 int i;
674
675 if (dev->flags & MLX4_FLAG_MSI_X)
676 mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
677 priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
678
679 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
680 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
681
682 mlx4_free_irqs(dev);
683
684 for (i = 0; i < MLX4_EQ_CATAS; ++i)
685 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
686 if (dev->flags & MLX4_FLAG_MSI_X)
687 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
688
689 mlx4_unmap_clr_int(dev);
690
691 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
692 if (priv->eq_table.uar_map[i])
693 iounmap(priv->eq_table.uar_map[i]);
694
695 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
696}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
new file mode 100644
index 000000000000..c42717313663
--- /dev/null
+++ b/drivers/net/mlx4/fw.c
@@ -0,0 +1,775 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/mlx4/cmd.h>
36
37#include "fw.h"
38#include "icm.h"
39
40extern void __buggy_use_of_MLX4_GET(void);
41extern void __buggy_use_of_MLX4_PUT(void);
42
43#define MLX4_GET(dest, source, offset) \
44 do { \
45 void *__p = (char *) (source) + (offset); \
46 switch (sizeof (dest)) { \
47 case 1: (dest) = *(u8 *) __p; break; \
48 case 2: (dest) = be16_to_cpup(__p); break; \
49 case 4: (dest) = be32_to_cpup(__p); break; \
50 case 8: (dest) = be64_to_cpup(__p); break; \
51 default: __buggy_use_of_MLX4_GET(); \
52 } \
53 } while (0)
54
55#define MLX4_PUT(dest, source, offset) \
56 do { \
57 void *__d = ((char *) (dest) + (offset)); \
58 switch (sizeof(source)) { \
59 case 1: *(u8 *) __d = (source); break; \
60 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
61 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
62 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
63 default: __buggy_use_of_MLX4_PUT(); \
64 } \
65 } while (0)
66
67static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
68{
69 static const char *fname[] = {
70 [ 0] = "RC transport",
71 [ 1] = "UC transport",
72 [ 2] = "UD transport",
73 [ 3] = "SRC transport",
74 [ 4] = "reliable multicast",
75 [ 5] = "FCoIB support",
76 [ 6] = "SRQ support",
77 [ 7] = "IPoIB checksum offload",
78 [ 8] = "P_Key violation counter",
79 [ 9] = "Q_Key violation counter",
80 [10] = "VMM",
81 [16] = "MW support",
82 [17] = "APM support",
83 [18] = "Atomic ops support",
84 [19] = "Raw multicast support",
85 [20] = "Address vector port checking support",
86 [21] = "UD multicast support",
87 [24] = "Demand paging support",
88 [25] = "Router support"
89 };
90 int i;
91
92 mlx4_dbg(dev, "DEV_CAP flags:\n");
93 for (i = 0; i < 32; ++i)
94 if (fname[i] && (flags & (1 << i)))
95 mlx4_dbg(dev, " %s\n", fname[i]);
96}
97
98int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
99{
100 struct mlx4_cmd_mailbox *mailbox;
101 u32 *outbox;
102 u8 field;
103 u16 size;
104 u16 stat_rate;
105 int err;
106
107#define QUERY_DEV_CAP_OUT_SIZE 0x100
108#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
109#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
110#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
111#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
112#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
113#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
114#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
115#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
116#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
117#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
118#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
119#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
120#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
121#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
122#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
123#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
124#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
125#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
126#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
127#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
128#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
129#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
130#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
131#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
132#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
133#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
134#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
135#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
136#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
137#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
138#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
139#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
140#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
141#define QUERY_DEV_CAP_BF_OFFSET 0x4c
142#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
143#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
144#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
145#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
146#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
147#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
148#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
149#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
150#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
151#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
152#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
153#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
154#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
155#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
156#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
157#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
158#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
159#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
160#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
161#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
162#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
163#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
164#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x97
165#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
166#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
167
168 mailbox = mlx4_alloc_cmd_mailbox(dev);
169 if (IS_ERR(mailbox))
170 return PTR_ERR(mailbox);
171 outbox = mailbox->buf;
172
173 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
174 MLX4_CMD_TIME_CLASS_A);
175
176 if (err)
177 goto out;
178
179 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
180 dev_cap->reserved_qps = 1 << (field & 0xf);
181 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
182 dev_cap->max_qps = 1 << (field & 0x1f);
183 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
184 dev_cap->reserved_srqs = 1 << (field >> 4);
185 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
186 dev_cap->max_srqs = 1 << (field & 0x1f);
187 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
188 dev_cap->max_cq_sz = 1 << field;
189 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
190 dev_cap->reserved_cqs = 1 << (field & 0xf);
191 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
192 dev_cap->max_cqs = 1 << (field & 0x1f);
193 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
194 dev_cap->max_mpts = 1 << (field & 0x3f);
195 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
196 dev_cap->reserved_eqs = 1 << (field & 0xf);
197 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
198 dev_cap->max_eqs = 1 << (field & 0x7);
199 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
200 dev_cap->reserved_mtts = 1 << (field >> 4);
201 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
202 dev_cap->max_mrw_sz = 1 << field;
203 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
204 dev_cap->reserved_mrws = 1 << (field & 0xf);
205 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
206 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
207 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
208 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
209 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
210 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
211 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
212 dev_cap->max_rdma_global = 1 << (field & 0x3f);
213 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
214 dev_cap->local_ca_ack_delay = field & 0x1f;
215 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
216 dev_cap->max_mtu = field >> 4;
217 dev_cap->max_port_width = field & 0xf;
218 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
219 dev_cap->max_vl = field >> 4;
220 dev_cap->num_ports = field & 0xf;
221 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
222 dev_cap->max_gids = 1 << (field & 0xf);
223 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
224 dev_cap->stat_rate_support = stat_rate;
225 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
226 dev_cap->max_pkeys = 1 << (field & 0xf);
227 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
228 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
229 dev_cap->reserved_uars = field >> 4;
230 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
231 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
232 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
233 dev_cap->min_page_sz = 1 << field;
234
235 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
236 if (field & 0x80) {
237 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
238 dev_cap->bf_reg_size = 1 << (field & 0x1f);
239 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
240 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
241 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
242 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
243 } else {
244 dev_cap->bf_reg_size = 0;
245 mlx4_dbg(dev, "BlueFlame not available\n");
246 }
247
248 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
249 dev_cap->max_sq_sg = field;
250 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
251 dev_cap->max_sq_desc_sz = size;
252
253 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
254 dev_cap->max_qp_per_mcg = 1 << field;
255 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
256 dev_cap->reserved_mgms = field & 0xf;
257 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
258 dev_cap->max_mcgs = 1 << field;
259 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
260 dev_cap->reserved_pds = field >> 4;
261 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
262 dev_cap->max_pds = 1 << (field & 0x3f);
263
264 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
265 dev_cap->rdmarc_entry_sz = size;
266 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
267 dev_cap->qpc_entry_sz = size;
268 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
269 dev_cap->aux_entry_sz = size;
270 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
271 dev_cap->altc_entry_sz = size;
272 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
273 dev_cap->eqc_entry_sz = size;
274 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
275 dev_cap->cqc_entry_sz = size;
276 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
277 dev_cap->srq_entry_sz = size;
278 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
279 dev_cap->cmpt_entry_sz = size;
280 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
281 dev_cap->mtt_entry_sz = size;
282 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
283 dev_cap->dmpt_entry_sz = size;
284
285 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
286 dev_cap->max_srq_sz = 1 << field;
287 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
288 dev_cap->max_qp_sz = 1 << field;
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
290 dev_cap->resize_srq = field & 1;
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
292 dev_cap->max_rq_sg = field;
293 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
294 dev_cap->max_rq_desc_sz = size;
295
296 MLX4_GET(dev_cap->bmme_flags, outbox,
297 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
298 MLX4_GET(dev_cap->reserved_lkey, outbox,
299 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
300 MLX4_GET(dev_cap->max_icm_sz, outbox,
301 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
302
303 if (dev_cap->bmme_flags & 1)
304 mlx4_dbg(dev, "Base MM extensions: yes "
305 "(flags %d, rsvd L_Key %08x)\n",
306 dev_cap->bmme_flags, dev_cap->reserved_lkey);
307 else
308 mlx4_dbg(dev, "Base MM extensions: no\n");
309
310 /*
311 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
312 * we can't use any EQs whose doorbell falls on that page,
313 * even if the EQ itself isn't reserved.
314 */
315 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
316 dev_cap->reserved_eqs);
317
318 mlx4_dbg(dev, "Max ICM size %lld MB\n",
319 (unsigned long long) dev_cap->max_icm_sz >> 20);
320 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
321 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
322 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
323 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
324 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
325 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
326 mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
327 dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
328 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
329 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
330 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
331 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
332 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
333 dev_cap->max_pds, dev_cap->reserved_mgms);
334 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
335 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
336 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
337 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu,
338 dev_cap->max_port_width);
339 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
340 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
341 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
342 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
343
344 dump_dev_cap_flags(dev, dev_cap->flags);
345
346out:
347 mlx4_free_cmd_mailbox(dev, mailbox);
348 return err;
349}
350
351int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
352{
353 struct mlx4_cmd_mailbox *mailbox;
354 struct mlx4_icm_iter iter;
355 __be64 *pages;
356 int lg;
357 int nent = 0;
358 int i;
359 int err = 0;
360 int ts = 0, tc = 0;
361
362 mailbox = mlx4_alloc_cmd_mailbox(dev);
363 if (IS_ERR(mailbox))
364 return PTR_ERR(mailbox);
365 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
366 pages = mailbox->buf;
367
368 for (mlx4_icm_first(icm, &iter);
369 !mlx4_icm_last(&iter);
370 mlx4_icm_next(&iter)) {
371 /*
372 * We have to pass pages that are aligned to their
373 * size, so find the least significant 1 in the
374 * address or size and use that as our log2 size.
375 */
376 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
377 if (lg < MLX4_ICM_PAGE_SHIFT) {
378 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
379 MLX4_ICM_PAGE_SIZE,
380 (unsigned long long) mlx4_icm_addr(&iter),
381 mlx4_icm_size(&iter));
382 err = -EINVAL;
383 goto out;
384 }
385
386 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
387 if (virt != -1) {
388 pages[nent * 2] = cpu_to_be64(virt);
389 virt += 1 << lg;
390 }
391
392 pages[nent * 2 + 1] =
393 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
394 (lg - MLX4_ICM_PAGE_SHIFT));
395 ts += 1 << (lg - 10);
396 ++tc;
397
398 if (++nent == MLX4_MAILBOX_SIZE / 16) {
399 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
400 MLX4_CMD_TIME_CLASS_B);
401 if (err)
402 goto out;
403 nent = 0;
404 }
405 }
406 }
407
408 if (nent)
409 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
410 if (err)
411 goto out;
412
413 switch (op) {
414 case MLX4_CMD_MAP_FA:
415 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
416 break;
417 case MLX4_CMD_MAP_ICM_AUX:
418 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
419 break;
420 case MLX4_CMD_MAP_ICM:
421 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
422 tc, ts, (unsigned long long) virt - (ts << 10));
423 break;
424 }
425
426out:
427 mlx4_free_cmd_mailbox(dev, mailbox);
428 return err;
429}
430
431int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
432{
433 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
434}
435
436int mlx4_UNMAP_FA(struct mlx4_dev *dev)
437{
438 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
439}
440
441
442int mlx4_RUN_FW(struct mlx4_dev *dev)
443{
444 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
445}
446
447int mlx4_QUERY_FW(struct mlx4_dev *dev)
448{
449 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
450 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
451 struct mlx4_cmd_mailbox *mailbox;
452 u32 *outbox;
453 int err = 0;
454 u64 fw_ver;
455 u8 lg;
456
457#define QUERY_FW_OUT_SIZE 0x100
458#define QUERY_FW_VER_OFFSET 0x00
459#define QUERY_FW_MAX_CMD_OFFSET 0x0f
460#define QUERY_FW_ERR_START_OFFSET 0x30
461#define QUERY_FW_ERR_SIZE_OFFSET 0x38
462#define QUERY_FW_ERR_BAR_OFFSET 0x3c
463
464#define QUERY_FW_SIZE_OFFSET 0x00
465#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
466#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
467
468 mailbox = mlx4_alloc_cmd_mailbox(dev);
469 if (IS_ERR(mailbox))
470 return PTR_ERR(mailbox);
471 outbox = mailbox->buf;
472
473 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
474 MLX4_CMD_TIME_CLASS_A);
475 if (err)
476 goto out;
477
478 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
479 /*
480 * FW subminor version is at more signifant bits than minor
481 * version, so swap here.
482 */
483 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
484 ((fw_ver & 0xffff0000ull) >> 16) |
485 ((fw_ver & 0x0000ffffull) << 16);
486
487 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
488 cmd->max_cmds = 1 << lg;
489
490 mlx4_dbg(dev, "FW version %d.%d.%03d, max commands %d\n",
491 (int) (dev->caps.fw_ver >> 32),
492 (int) (dev->caps.fw_ver >> 16) & 0xffff,
493 (int) dev->caps.fw_ver & 0xffff,
494 cmd->max_cmds);
495
496 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
497 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
498 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
499 fw->catas_bar = (fw->catas_bar >> 6) * 2;
500
501 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
502 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
503
504 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
505 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
506 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
507 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
508
509 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
510
511 /*
512 * Round up number of system pages needed in case
513 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
514 */
515 fw->fw_pages =
516 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
517 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
518
519 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
520 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
521
522out:
523 mlx4_free_cmd_mailbox(dev, mailbox);
524 return err;
525}
526
527static void get_board_id(void *vsd, char *board_id)
528{
529 int i;
530
531#define VSD_OFFSET_SIG1 0x00
532#define VSD_OFFSET_SIG2 0xde
533#define VSD_OFFSET_MLX_BOARD_ID 0xd0
534#define VSD_OFFSET_TS_BOARD_ID 0x20
535
536#define VSD_SIGNATURE_TOPSPIN 0x5ad
537
538 memset(board_id, 0, MLX4_BOARD_ID_LEN);
539
540 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
541 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
542 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
543 } else {
544 /*
545 * The board ID is a string but the firmware byte
546 * swaps each 4-byte word before passing it back to
547 * us. Therefore we need to swab it before printing.
548 */
549 for (i = 0; i < 4; ++i)
550 ((u32 *) board_id)[i] =
551 swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
552 }
553}
554
555int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
556{
557 struct mlx4_cmd_mailbox *mailbox;
558 u32 *outbox;
559 int err;
560
561#define QUERY_ADAPTER_OUT_SIZE 0x100
562#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
563#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
564#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
565#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
566#define QUERY_ADAPTER_VSD_OFFSET 0x20
567
568 mailbox = mlx4_alloc_cmd_mailbox(dev);
569 if (IS_ERR(mailbox))
570 return PTR_ERR(mailbox);
571 outbox = mailbox->buf;
572
573 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
574 MLX4_CMD_TIME_CLASS_A);
575 if (err)
576 goto out;
577
578 MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
579 MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
580 MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
581 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
582
583 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
584 adapter->board_id);
585
586out:
587 mlx4_free_cmd_mailbox(dev, mailbox);
588 return err;
589}
590
591int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
592{
593 struct mlx4_cmd_mailbox *mailbox;
594 __be32 *inbox;
595 int err;
596
597#define INIT_HCA_IN_SIZE 0x200
598#define INIT_HCA_VERSION_OFFSET 0x000
599#define INIT_HCA_VERSION 2
600#define INIT_HCA_FLAGS_OFFSET 0x014
601#define INIT_HCA_QPC_OFFSET 0x020
602#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
603#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
604#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
605#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
606#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
607#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
608#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
609#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
610#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
611#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
612#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
613#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
614#define INIT_HCA_MCAST_OFFSET 0x0c0
615#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
616#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
617#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
618#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
619#define INIT_HCA_TPT_OFFSET 0x0f0
620#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
621#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
622#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
623#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
624#define INIT_HCA_UAR_OFFSET 0x120
625#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
626#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
627
628 mailbox = mlx4_alloc_cmd_mailbox(dev);
629 if (IS_ERR(mailbox))
630 return PTR_ERR(mailbox);
631 inbox = mailbox->buf;
632
633 memset(inbox, 0, INIT_HCA_IN_SIZE);
634
635 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
636
637#if defined(__LITTLE_ENDIAN)
638 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
639#elif defined(__BIG_ENDIAN)
640 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
641#else
642#error Host endianness not defined
643#endif
644 /* Check port for UD address vector: */
645 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
646
647 /* QPC/EEC/CQC/EQC/RDMARC attributes */
648
649 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
650 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
651 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
652 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
653 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
654 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
655 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
656 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
657 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
658 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
659 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
660 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
661
662 /* multicast attributes */
663
664 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
665 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
666 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
667 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
668
669 /* TPT attributes */
670
671 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
672 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
673 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
674 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
675
676 /* UAR attributes */
677
678 MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
679 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
680
681 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 1000);
682
683 if (err)
684 mlx4_err(dev, "INIT_HCA returns %d\n", err);
685
686 mlx4_free_cmd_mailbox(dev, mailbox);
687 return err;
688}
689
690int mlx4_INIT_PORT(struct mlx4_dev *dev, struct mlx4_init_port_param *param, int port)
691{
692 struct mlx4_cmd_mailbox *mailbox;
693 u32 *inbox;
694 int err;
695 u32 flags;
696
697#define INIT_PORT_IN_SIZE 256
698#define INIT_PORT_FLAGS_OFFSET 0x00
699#define INIT_PORT_FLAG_SIG (1 << 18)
700#define INIT_PORT_FLAG_NG (1 << 17)
701#define INIT_PORT_FLAG_G0 (1 << 16)
702#define INIT_PORT_VL_SHIFT 4
703#define INIT_PORT_PORT_WIDTH_SHIFT 8
704#define INIT_PORT_MTU_OFFSET 0x04
705#define INIT_PORT_MAX_GID_OFFSET 0x06
706#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
707#define INIT_PORT_GUID0_OFFSET 0x10
708#define INIT_PORT_NODE_GUID_OFFSET 0x18
709#define INIT_PORT_SI_GUID_OFFSET 0x20
710
711 mailbox = mlx4_alloc_cmd_mailbox(dev);
712 if (IS_ERR(mailbox))
713 return PTR_ERR(mailbox);
714 inbox = mailbox->buf;
715
716 memset(inbox, 0, INIT_PORT_IN_SIZE);
717
718 flags = 0;
719 flags |= param->set_guid0 ? INIT_PORT_FLAG_G0 : 0;
720 flags |= param->set_node_guid ? INIT_PORT_FLAG_NG : 0;
721 flags |= param->set_si_guid ? INIT_PORT_FLAG_SIG : 0;
722 flags |= (param->vl_cap & 0xf) << INIT_PORT_VL_SHIFT;
723 flags |= (param->port_width_cap & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
724 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
725
726 MLX4_PUT(inbox, param->mtu, INIT_PORT_MTU_OFFSET);
727 MLX4_PUT(inbox, param->max_gid, INIT_PORT_MAX_GID_OFFSET);
728 MLX4_PUT(inbox, param->max_pkey, INIT_PORT_MAX_PKEY_OFFSET);
729 MLX4_PUT(inbox, param->guid0, INIT_PORT_GUID0_OFFSET);
730 MLX4_PUT(inbox, param->node_guid, INIT_PORT_NODE_GUID_OFFSET);
731 MLX4_PUT(inbox, param->si_guid, INIT_PORT_SI_GUID_OFFSET);
732
733 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
734 MLX4_CMD_TIME_CLASS_A);
735
736 mlx4_free_cmd_mailbox(dev, mailbox);
737
738 return err;
739}
740EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
741
742int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
743{
744 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
745}
746EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
747
748int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
749{
750 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
751}
752
753int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
754{
755 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
756 MLX4_CMD_SET_ICM_SIZE,
757 MLX4_CMD_TIME_CLASS_A);
758 if (ret)
759 return ret;
760
761 /*
762 * Round up number of system pages needed in case
763 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
764 */
765 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
766 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
767
768 return 0;
769}
770
771int mlx4_NOP(struct mlx4_dev *dev)
772{
773 /* Input modifier of 0x1f means "finish as soon as possible." */
774 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
775}
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
new file mode 100644
index 000000000000..2616fa53d4d0
--- /dev/null
+++ b/drivers/net/mlx4/fw.h
@@ -0,0 +1,167 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#ifndef MLX4_FW_H
36#define MLX4_FW_H
37
38#include "mlx4.h"
39#include "icm.h"
40
41struct mlx4_dev_cap {
42 int max_srq_sz;
43 int max_qp_sz;
44 int reserved_qps;
45 int max_qps;
46 int reserved_srqs;
47 int max_srqs;
48 int max_cq_sz;
49 int reserved_cqs;
50 int max_cqs;
51 int max_mpts;
52 int reserved_eqs;
53 int max_eqs;
54 int reserved_mtts;
55 int max_mrw_sz;
56 int reserved_mrws;
57 int max_mtt_seg;
58 int max_requester_per_qp;
59 int max_responder_per_qp;
60 int max_rdma_global;
61 int local_ca_ack_delay;
62 int max_mtu;
63 int max_port_width;
64 int max_vl;
65 int num_ports;
66 int max_gids;
67 u16 stat_rate_support;
68 int max_pkeys;
69 u32 flags;
70 int reserved_uars;
71 int uar_size;
72 int min_page_sz;
73 int bf_reg_size;
74 int bf_regs_per_page;
75 int max_sq_sg;
76 int max_sq_desc_sz;
77 int max_rq_sg;
78 int max_rq_desc_sz;
79 int max_qp_per_mcg;
80 int reserved_mgms;
81 int max_mcgs;
82 int reserved_pds;
83 int max_pds;
84 int qpc_entry_sz;
85 int rdmarc_entry_sz;
86 int altc_entry_sz;
87 int aux_entry_sz;
88 int srq_entry_sz;
89 int cqc_entry_sz;
90 int eqc_entry_sz;
91 int dmpt_entry_sz;
92 int cmpt_entry_sz;
93 int mtt_entry_sz;
94 int resize_srq;
95 u8 bmme_flags;
96 u32 reserved_lkey;
97 u64 max_icm_sz;
98};
99
100struct mlx4_adapter {
101 u32 vendor_id;
102 u32 device_id;
103 u32 revision_id;
104 char board_id[MLX4_BOARD_ID_LEN];
105 u8 inta_pin;
106};
107
108struct mlx4_init_hca_param {
109 u64 qpc_base;
110 u64 rdmarc_base;
111 u64 auxc_base;
112 u64 altc_base;
113 u64 srqc_base;
114 u64 cqc_base;
115 u64 eqc_base;
116 u64 mc_base;
117 u64 dmpt_base;
118 u64 cmpt_base;
119 u64 mtt_base;
120 u16 log_mc_entry_sz;
121 u16 log_mc_hash_sz;
122 u8 log_num_qps;
123 u8 log_num_srqs;
124 u8 log_num_cqs;
125 u8 log_num_eqs;
126 u8 log_rd_per_qp;
127 u8 log_mc_table_sz;
128 u8 log_mpt_sz;
129 u8 log_uar_sz;
130};
131
132struct mlx4_init_ib_param {
133 int port_width;
134 int vl_cap;
135 int mtu_cap;
136 u16 gid_cap;
137 u16 pkey_cap;
138 int set_guid0;
139 u64 guid0;
140 int set_node_guid;
141 u64 node_guid;
142 int set_si_guid;
143 u64 si_guid;
144};
145
146struct mlx4_set_ib_param {
147 int set_si_guid;
148 int reset_qkey_viol;
149 u64 si_guid;
150 u32 cap_mask;
151};
152
153int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
154int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
155int mlx4_UNMAP_FA(struct mlx4_dev *dev);
156int mlx4_RUN_FW(struct mlx4_dev *dev);
157int mlx4_QUERY_FW(struct mlx4_dev *dev);
158int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
159int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
160int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
161int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
162int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
163int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
164int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
165int mlx4_NOP(struct mlx4_dev *dev);
166
167#endif /* MLX4_FW_H */
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
new file mode 100644
index 000000000000..e96feaed6ed4
--- /dev/null
+++ b/drivers/net/mlx4/icm.c
@@ -0,0 +1,379 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/errno.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40#include "icm.h"
41#include "fw.h"
42
43/*
44 * We allocate in as big chunks as we can, up to a maximum of 256 KB
45 * per chunk.
46 */
47enum {
48 MLX4_ICM_ALLOC_SIZE = 1 << 18,
49 MLX4_TABLE_CHUNK_SIZE = 1 << 18
50};
51
52void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm)
53{
54 struct mlx4_icm_chunk *chunk, *tmp;
55 int i;
56
57 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
58 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60 PCI_DMA_BIDIRECTIONAL);
61
62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(chunk->mem[i].page,
64 get_order(chunk->mem[i].length));
65
66 kfree(chunk);
67 }
68
69 kfree(icm);
70}
71
72struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
73 gfp_t gfp_mask)
74{
75 struct mlx4_icm *icm;
76 struct mlx4_icm_chunk *chunk = NULL;
77 int cur_order;
78
79 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
80 if (!icm)
81 return icm;
82
83 icm->refcount = 0;
84 INIT_LIST_HEAD(&icm->chunk_list);
85
86 cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
87
88 while (npages > 0) {
89 if (!chunk) {
90 chunk = kmalloc(sizeof *chunk,
91 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
92 if (!chunk)
93 goto fail;
94
95 chunk->npages = 0;
96 chunk->nsg = 0;
97 list_add_tail(&chunk->list, &icm->chunk_list);
98 }
99
100 while (1 << cur_order > npages)
101 --cur_order;
102
103 chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
104 if (chunk->mem[chunk->npages].page) {
105 chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
106 chunk->mem[chunk->npages].offset = 0;
107
108 if (++chunk->npages == MLX4_ICM_CHUNK_LEN) {
109 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
110 chunk->npages,
111 PCI_DMA_BIDIRECTIONAL);
112
113 if (chunk->nsg <= 0)
114 goto fail;
115
116 chunk = NULL;
117 }
118
119 npages -= 1 << cur_order;
120 } else {
121 --cur_order;
122 if (cur_order < 0)
123 goto fail;
124 }
125 }
126
127 if (chunk) {
128 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
129 chunk->npages,
130 PCI_DMA_BIDIRECTIONAL);
131
132 if (chunk->nsg <= 0)
133 goto fail;
134 }
135
136 return icm;
137
138fail:
139 mlx4_free_icm(dev, icm);
140 return NULL;
141}
142
143static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
144{
145 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
146}
147
148int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
149{
150 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
151 MLX4_CMD_TIME_CLASS_B);
152}
153
154int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
155{
156 struct mlx4_cmd_mailbox *mailbox;
157 __be64 *inbox;
158 int err;
159
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 if (IS_ERR(mailbox))
162 return PTR_ERR(mailbox);
163 inbox = mailbox->buf;
164
165 inbox[0] = cpu_to_be64(virt);
166 inbox[1] = cpu_to_be64(dma_addr);
167
168 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
169 MLX4_CMD_TIME_CLASS_B);
170
171 mlx4_free_cmd_mailbox(dev, mailbox);
172
173 if (!err)
174 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
175 (unsigned long long) dma_addr, (unsigned long long) virt);
176
177 return err;
178}
179
180int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
181{
182 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
183}
184
185int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
186{
187 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
188}
189
190int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
191{
192 int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
193 int ret = 0;
194
195 mutex_lock(&table->mutex);
196
197 if (table->icm[i]) {
198 ++table->icm[i]->refcount;
199 goto out;
200 }
201
202 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
203 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
204 __GFP_NOWARN);
205 if (!table->icm[i]) {
206 ret = -ENOMEM;
207 goto out;
208 }
209
210 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
211 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
212 mlx4_free_icm(dev, table->icm[i]);
213 table->icm[i] = NULL;
214 ret = -ENOMEM;
215 goto out;
216 }
217
218 ++table->icm[i]->refcount;
219
220out:
221 mutex_unlock(&table->mutex);
222 return ret;
223}
224
225void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
226{
227 int i;
228
229 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
230
231 mutex_lock(&table->mutex);
232
233 if (--table->icm[i]->refcount == 0) {
234 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
235 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
236 mlx4_free_icm(dev, table->icm[i]);
237 table->icm[i] = NULL;
238 }
239
240 mutex_unlock(&table->mutex);
241}
242
243void *mlx4_table_find(struct mlx4_icm_table *table, int obj)
244{
245 int idx, offset, i;
246 struct mlx4_icm_chunk *chunk;
247 struct mlx4_icm *icm;
248 struct page *page = NULL;
249
250 if (!table->lowmem)
251 return NULL;
252
253 mutex_lock(&table->mutex);
254
255 idx = obj & (table->num_obj - 1);
256 icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)];
257 offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
258
259 if (!icm)
260 goto out;
261
262 list_for_each_entry(chunk, &icm->chunk_list, list) {
263 for (i = 0; i < chunk->npages; ++i) {
264 if (chunk->mem[i].length > offset) {
265 page = chunk->mem[i].page;
266 goto out;
267 }
268 offset -= chunk->mem[i].length;
269 }
270 }
271
272out:
273 mutex_unlock(&table->mutex);
274 return page ? lowmem_page_address(page) + offset : NULL;
275}
276
277int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
278 int start, int end)
279{
280 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
281 int i, err;
282
283 for (i = start; i <= end; i += inc) {
284 err = mlx4_table_get(dev, table, i);
285 if (err)
286 goto fail;
287 }
288
289 return 0;
290
291fail:
292 while (i > start) {
293 i -= inc;
294 mlx4_table_put(dev, table, i);
295 }
296
297 return err;
298}
299
300void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
301 int start, int end)
302{
303 int i;
304
305 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
306 mlx4_table_put(dev, table, i);
307}
308
309int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
310 u64 virt, int obj_size, int nobj, int reserved,
311 int use_lowmem)
312{
313 int obj_per_chunk;
314 int num_icm;
315 unsigned chunk_size;
316 int i;
317
318 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
319 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
320
321 table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
322 if (!table->icm)
323 return -ENOMEM;
324 table->virt = virt;
325 table->num_icm = num_icm;
326 table->num_obj = nobj;
327 table->obj_size = obj_size;
328 table->lowmem = use_lowmem;
329 mutex_init(&table->mutex);
330
331 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
332 chunk_size = MLX4_TABLE_CHUNK_SIZE;
333 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
334 chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
335
336 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
337 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
338 __GFP_NOWARN);
339 if (!table->icm[i])
340 goto err;
341 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
342 mlx4_free_icm(dev, table->icm[i]);
343 table->icm[i] = NULL;
344 goto err;
345 }
346
347 /*
348 * Add a reference to this ICM chunk so that it never
349 * gets freed (since it contains reserved firmware objects).
350 */
351 ++table->icm[i]->refcount;
352 }
353
354 return 0;
355
356err:
357 for (i = 0; i < num_icm; ++i)
358 if (table->icm[i]) {
359 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
360 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
361 mlx4_free_icm(dev, table->icm[i]);
362 }
363
364 return -ENOMEM;
365}
366
367void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
368{
369 int i;
370
371 for (i = 0; i < table->num_icm; ++i)
372 if (table->icm[i]) {
373 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
374 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
375 mlx4_free_icm(dev, table->icm[i]);
376 }
377
378 kfree(table->icm);
379}
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
new file mode 100644
index 000000000000..bea223d879a5
--- /dev/null
+++ b/drivers/net/mlx4/icm.h
@@ -0,0 +1,135 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef MLX4_ICM_H
35#define MLX4_ICM_H
36
37#include <linux/list.h>
38#include <linux/pci.h>
39#include <linux/mutex.h>
40
41#define MLX4_ICM_CHUNK_LEN \
42 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
43 (sizeof (struct scatterlist)))
44
45enum {
46 MLX4_ICM_PAGE_SHIFT = 12,
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48};
49
50struct mlx4_icm_chunk {
51 struct list_head list;
52 int npages;
53 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
55};
56
57struct mlx4_icm {
58 struct list_head chunk_list;
59 int refcount;
60};
61
62struct mlx4_icm_iter {
63 struct mlx4_icm *icm;
64 struct mlx4_icm_chunk *chunk;
65 int page_idx;
66};
67
68struct mlx4_dev;
69
70struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask);
71void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm);
72
73int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
74void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
75int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
76 int start, int end);
77void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
78 int start, int end);
79int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
80 u64 virt, int obj_size, int nobj, int reserved,
81 int use_lowmem);
82void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
83int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
84void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
85void *mlx4_table_find(struct mlx4_icm_table *table, int obj);
86int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
87 int start, int end);
88void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
89 int start, int end);
90
91static inline void mlx4_icm_first(struct mlx4_icm *icm,
92 struct mlx4_icm_iter *iter)
93{
94 iter->icm = icm;
95 iter->chunk = list_empty(&icm->chunk_list) ?
96 NULL : list_entry(icm->chunk_list.next,
97 struct mlx4_icm_chunk, list);
98 iter->page_idx = 0;
99}
100
101static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
102{
103 return !iter->chunk;
104}
105
106static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
107{
108 if (++iter->page_idx >= iter->chunk->nsg) {
109 if (iter->chunk->list.next == &iter->icm->chunk_list) {
110 iter->chunk = NULL;
111 return;
112 }
113
114 iter->chunk = list_entry(iter->chunk->list.next,
115 struct mlx4_icm_chunk, list);
116 iter->page_idx = 0;
117 }
118}
119
120static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
121{
122 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
123}
124
125static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
126{
127 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
128}
129
130int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
131int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
132int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
133int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
134
135#endif /* MLX4_ICM_H */
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
new file mode 100644
index 000000000000..65854f9e9c76
--- /dev/null
+++ b/drivers/net/mlx4/intf.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx4/driver.h>
34
35#include "mlx4.h"
36
37struct mlx4_device_context {
38 struct list_head list;
39 struct mlx4_interface *intf;
40 void *context;
41};
42
43static LIST_HEAD(intf_list);
44static LIST_HEAD(dev_list);
45static DEFINE_MUTEX(intf_mutex);
46
47static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
48{
49 struct mlx4_device_context *dev_ctx;
50
51 dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
52 if (!dev_ctx)
53 return;
54
55 dev_ctx->intf = intf;
56 dev_ctx->context = intf->add(&priv->dev);
57
58 if (dev_ctx->context) {
59 spin_lock_irq(&priv->ctx_lock);
60 list_add_tail(&dev_ctx->list, &priv->ctx_list);
61 spin_unlock_irq(&priv->ctx_lock);
62 } else
63 kfree(dev_ctx);
64}
65
66static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
67{
68 struct mlx4_device_context *dev_ctx;
69
70 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
71 if (dev_ctx->intf == intf) {
72 spin_lock_irq(&priv->ctx_lock);
73 list_del(&dev_ctx->list);
74 spin_unlock_irq(&priv->ctx_lock);
75
76 intf->remove(&priv->dev, dev_ctx->context);
77 kfree(dev_ctx);
78 return;
79 }
80}
81
82int mlx4_register_interface(struct mlx4_interface *intf)
83{
84 struct mlx4_priv *priv;
85
86 if (!intf->add || !intf->remove)
87 return -EINVAL;
88
89 mutex_lock(&intf_mutex);
90
91 list_add_tail(&intf->list, &intf_list);
92 list_for_each_entry(priv, &dev_list, dev_list)
93 mlx4_add_device(intf, priv);
94
95 mutex_unlock(&intf_mutex);
96
97 return 0;
98}
99EXPORT_SYMBOL_GPL(mlx4_register_interface);
100
101void mlx4_unregister_interface(struct mlx4_interface *intf)
102{
103 struct mlx4_priv *priv;
104
105 mutex_lock(&intf_mutex);
106
107 list_for_each_entry(priv, &dev_list, dev_list)
108 mlx4_remove_device(intf, priv);
109
110 list_del(&intf->list);
111
112 mutex_unlock(&intf_mutex);
113}
114EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
115
116void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
117 int subtype, int port)
118{
119 struct mlx4_priv *priv = mlx4_priv(dev);
120 struct mlx4_device_context *dev_ctx;
121 unsigned long flags;
122
123 spin_lock_irqsave(&priv->ctx_lock, flags);
124
125 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
126 if (dev_ctx->intf->event)
127 dev_ctx->intf->event(dev, dev_ctx->context, type,
128 subtype, port);
129
130 spin_unlock_irqrestore(&priv->ctx_lock, flags);
131}
132
133int mlx4_register_device(struct mlx4_dev *dev)
134{
135 struct mlx4_priv *priv = mlx4_priv(dev);
136 struct mlx4_interface *intf;
137
138 INIT_LIST_HEAD(&priv->ctx_list);
139 spin_lock_init(&priv->ctx_lock);
140
141 mutex_lock(&intf_mutex);
142
143 list_add_tail(&priv->dev_list, &dev_list);
144 list_for_each_entry(intf, &intf_list, list)
145 mlx4_add_device(intf, priv);
146
147 mutex_unlock(&intf_mutex);
148
149 return 0;
150}
151
152void mlx4_unregister_device(struct mlx4_dev *dev)
153{
154 struct mlx4_priv *priv = mlx4_priv(dev);
155 struct mlx4_interface *intf;
156
157 mutex_lock(&intf_mutex);
158
159 list_for_each_entry(intf, &intf_list, list)
160 mlx4_remove_device(intf, priv);
161
162 list_del(&priv->dev_list);
163
164 mutex_unlock(&intf_mutex);
165}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
new file mode 100644
index 000000000000..4debb024eaf9
--- /dev/null
+++ b/drivers/net/mlx4/main.c
@@ -0,0 +1,936 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/module.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
41
42#include <linux/mlx4/device.h>
43#include <linux/mlx4/doorbell.h>
44
45#include "mlx4.h"
46#include "fw.h"
47#include "icm.h"
48
49MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_VERSION(DRV_VERSION);
53
54#ifdef CONFIG_MLX4_DEBUG
55
56int mlx4_debug_level = 0;
57module_param_named(debug_level, mlx4_debug_level, int, 0644);
58MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
59
60#endif /* CONFIG_MLX4_DEBUG */
61
62#ifdef CONFIG_PCI_MSI
63
64static int msi_x;
65module_param(msi_x, int, 0444);
66MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
67
68#else /* CONFIG_PCI_MSI */
69
70#define msi_x (0)
71
72#endif /* CONFIG_PCI_MSI */
73
74static const char mlx4_version[] __devinitdata =
75 DRV_NAME ": Mellanox ConnectX core driver v"
76 DRV_VERSION " (" DRV_RELDATE ")\n";
77
78static struct mlx4_profile default_profile = {
79 .num_qp = 1 << 16,
80 .num_srq = 1 << 16,
81 .rdmarc_per_qp = 4,
82 .num_cq = 1 << 16,
83 .num_mcg = 1 << 13,
84 .num_mpt = 1 << 17,
85 .num_mtt = 1 << 20,
86};
87
88static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{
90 int err;
91
92 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
93 if (err) {
94 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
95 return err;
96 }
97
98 if (dev_cap->min_page_sz > PAGE_SIZE) {
99 mlx4_err(dev, "HCA minimum page size of %d bigger than "
100 "kernel PAGE_SIZE of %ld, aborting.\n",
101 dev_cap->min_page_sz, PAGE_SIZE);
102 return -ENODEV;
103 }
104 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
105 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
106 "aborting.\n",
107 dev_cap->num_ports, MLX4_MAX_PORTS);
108 return -ENODEV;
109 }
110
111 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
112 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
113 "PCI resource 2 size of 0x%llx, aborting.\n",
114 dev_cap->uar_size,
115 (unsigned long long) pci_resource_len(dev->pdev, 2));
116 return -ENODEV;
117 }
118
119 dev->caps.num_ports = dev_cap->num_ports;
120 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
121 dev->caps.vl_cap = dev_cap->max_vl;
122 dev->caps.mtu_cap = dev_cap->max_mtu;
123 dev->caps.gid_table_len = dev_cap->max_gids;
124 dev->caps.pkey_table_len = dev_cap->max_pkeys;
125 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
126 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
127 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
128 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
129 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
130 dev->caps.max_wqes = dev_cap->max_qp_sz;
131 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
132 dev->caps.reserved_qps = dev_cap->reserved_qps;
133 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
134 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
135 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
136 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
137 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
138 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
139 /*
140 * Subtract 1 from the limit because we need to allocate a
141 * spare CQE so the HCA HW can tell the difference between an
142 * empty CQ and a full CQ.
143 */
144 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
145 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
146 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
147 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
148 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
149 dev->caps.reserved_uars = dev_cap->reserved_uars;
150 dev->caps.reserved_pds = dev_cap->reserved_pds;
151 dev->caps.port_width_cap = dev_cap->max_port_width;
152 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
153 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
154 dev->caps.flags = dev_cap->flags;
155 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
156
157 return 0;
158}
159
160static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
161{
162 struct mlx4_priv *priv = mlx4_priv(dev);
163 int err;
164
165 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
166 GFP_HIGHUSER | __GFP_NOWARN);
167 if (!priv->fw.fw_icm) {
168 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
169 return -ENOMEM;
170 }
171
172 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
173 if (err) {
174 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
175 goto err_free;
176 }
177
178 err = mlx4_RUN_FW(dev);
179 if (err) {
180 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
181 goto err_unmap_fa;
182 }
183
184 return 0;
185
186err_unmap_fa:
187 mlx4_UNMAP_FA(dev);
188
189err_free:
190 mlx4_free_icm(dev, priv->fw.fw_icm);
191 return err;
192}
193
194static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
195 int cmpt_entry_sz)
196{
197 struct mlx4_priv *priv = mlx4_priv(dev);
198 int err;
199
200 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
201 cmpt_base +
202 ((u64) (MLX4_CMPT_TYPE_QP *
203 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
204 cmpt_entry_sz, dev->caps.num_qps,
205 dev->caps.reserved_qps, 0);
206 if (err)
207 goto err;
208
209 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
210 cmpt_base +
211 ((u64) (MLX4_CMPT_TYPE_SRQ *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_srqs,
214 dev->caps.reserved_srqs, 0);
215 if (err)
216 goto err_qp;
217
218 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
219 cmpt_base +
220 ((u64) (MLX4_CMPT_TYPE_CQ *
221 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
222 cmpt_entry_sz, dev->caps.num_cqs,
223 dev->caps.reserved_cqs, 0);
224 if (err)
225 goto err_srq;
226
227 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
228 cmpt_base +
229 ((u64) (MLX4_CMPT_TYPE_EQ *
230 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
231 cmpt_entry_sz,
232 roundup_pow_of_two(MLX4_NUM_EQ +
233 dev->caps.reserved_eqs),
234 MLX4_NUM_EQ + dev->caps.reserved_eqs, 0);
235 if (err)
236 goto err_cq;
237
238 return 0;
239
240err_cq:
241 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
242
243err_srq:
244 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
245
246err_qp:
247 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
248
249err:
250 return err;
251}
252
253static int __devinit mlx4_init_icm(struct mlx4_dev *dev,
254 struct mlx4_dev_cap *dev_cap,
255 struct mlx4_init_hca_param *init_hca,
256 u64 icm_size)
257{
258 struct mlx4_priv *priv = mlx4_priv(dev);
259 u64 aux_pages;
260 int err;
261
262 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
263 if (err) {
264 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
265 return err;
266 }
267
268 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
269 (unsigned long long) icm_size >> 10,
270 (unsigned long long) aux_pages << 2);
271
272 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
273 GFP_HIGHUSER | __GFP_NOWARN);
274 if (!priv->fw.aux_icm) {
275 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
276 return -ENOMEM;
277 }
278
279 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
280 if (err) {
281 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
282 goto err_free_aux;
283 }
284
285 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
286 if (err) {
287 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
288 goto err_unmap_aux;
289 }
290
291 err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
292 if (err) {
293 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
294 goto err_unmap_cmpt;
295 }
296
297 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
298 init_hca->mtt_base,
299 dev->caps.mtt_entry_sz,
300 dev->caps.num_mtt_segs,
301 dev->caps.reserved_mtts, 1);
302 if (err) {
303 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
304 goto err_unmap_eq;
305 }
306
307 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
308 init_hca->dmpt_base,
309 dev_cap->dmpt_entry_sz,
310 dev->caps.num_mpts,
311 dev->caps.reserved_mrws, 1);
312 if (err) {
313 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
314 goto err_unmap_mtt;
315 }
316
317 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
318 init_hca->qpc_base,
319 dev_cap->qpc_entry_sz,
320 dev->caps.num_qps,
321 dev->caps.reserved_qps, 0);
322 if (err) {
323 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
324 goto err_unmap_dmpt;
325 }
326
327 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
328 init_hca->auxc_base,
329 dev_cap->aux_entry_sz,
330 dev->caps.num_qps,
331 dev->caps.reserved_qps, 0);
332 if (err) {
333 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
334 goto err_unmap_qp;
335 }
336
337 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
338 init_hca->altc_base,
339 dev_cap->altc_entry_sz,
340 dev->caps.num_qps,
341 dev->caps.reserved_qps, 0);
342 if (err) {
343 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
344 goto err_unmap_auxc;
345 }
346
347 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
348 init_hca->rdmarc_base,
349 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
350 dev->caps.num_qps,
351 dev->caps.reserved_qps, 0);
352 if (err) {
353 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
354 goto err_unmap_altc;
355 }
356
357 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
358 init_hca->cqc_base,
359 dev_cap->cqc_entry_sz,
360 dev->caps.num_cqs,
361 dev->caps.reserved_cqs, 0);
362 if (err) {
363 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
364 goto err_unmap_rdmarc;
365 }
366
367 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
368 init_hca->srqc_base,
369 dev_cap->srq_entry_sz,
370 dev->caps.num_srqs,
371 dev->caps.reserved_srqs, 0);
372 if (err) {
373 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
374 goto err_unmap_cq;
375 }
376
377 /*
378 * It's not strictly required, but for simplicity just map the
379 * whole multicast group table now. The table isn't very big
380 * and it's a lot easier than trying to track ref counts.
381 */
382 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
383 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
384 dev->caps.num_mgms + dev->caps.num_amgms,
385 dev->caps.num_mgms + dev->caps.num_amgms,
386 0);
387 if (err) {
388 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
389 goto err_unmap_srq;
390 }
391
392 return 0;
393
394err_unmap_srq:
395 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
396
397err_unmap_cq:
398 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
399
400err_unmap_rdmarc:
401 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
402
403err_unmap_altc:
404 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
405
406err_unmap_auxc:
407 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
408
409err_unmap_qp:
410 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
411
412err_unmap_dmpt:
413 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
414
415err_unmap_mtt:
416 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
417
418err_unmap_eq:
419 mlx4_unmap_eq_icm(dev);
420
421err_unmap_cmpt:
422 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
423 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
424 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
425 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
426
427err_unmap_aux:
428 mlx4_UNMAP_ICM_AUX(dev);
429
430err_free_aux:
431 mlx4_free_icm(dev, priv->fw.aux_icm);
432
433 return err;
434}
435
436static void mlx4_free_icms(struct mlx4_dev *dev)
437{
438 struct mlx4_priv *priv = mlx4_priv(dev);
439
440 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
441 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
442 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
443 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
444 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
445 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
446 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
447 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
448 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
449 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
450 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
451 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
452 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
453 mlx4_unmap_eq_icm(dev);
454
455 mlx4_UNMAP_ICM_AUX(dev);
456 mlx4_free_icm(dev, priv->fw.aux_icm);
457}
458
459static void mlx4_close_hca(struct mlx4_dev *dev)
460{
461 mlx4_CLOSE_HCA(dev, 0);
462 mlx4_free_icms(dev);
463 mlx4_UNMAP_FA(dev);
464 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm);
465}
466
467static int __devinit mlx4_init_hca(struct mlx4_dev *dev)
468{
469 struct mlx4_priv *priv = mlx4_priv(dev);
470 struct mlx4_adapter adapter;
471 struct mlx4_dev_cap dev_cap;
472 struct mlx4_profile profile;
473 struct mlx4_init_hca_param init_hca;
474 u64 icm_size;
475 int err;
476
477 err = mlx4_QUERY_FW(dev);
478 if (err) {
479 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
480 return err;
481 }
482
483 err = mlx4_load_fw(dev);
484 if (err) {
485 mlx4_err(dev, "Failed to start FW, aborting.\n");
486 return err;
487 }
488
489 err = mlx4_dev_cap(dev, &dev_cap);
490 if (err) {
491 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
492 goto err_stop_fw;
493 }
494
495 profile = default_profile;
496
497 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
498 if ((long long) icm_size < 0) {
499 err = icm_size;
500 goto err_stop_fw;
501 }
502
503 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
504
505 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
506 if (err)
507 goto err_stop_fw;
508
509 err = mlx4_INIT_HCA(dev, &init_hca);
510 if (err) {
511 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
512 goto err_free_icm;
513 }
514
515 err = mlx4_QUERY_ADAPTER(dev, &adapter);
516 if (err) {
517 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
518 goto err_close;
519 }
520
521 priv->eq_table.inta_pin = adapter.inta_pin;
522 priv->rev_id = adapter.revision_id;
523 memcpy(priv->board_id, adapter.board_id, sizeof priv->board_id);
524
525 return 0;
526
527err_close:
528 mlx4_close_hca(dev);
529
530err_free_icm:
531 mlx4_free_icms(dev);
532
533err_stop_fw:
534 mlx4_UNMAP_FA(dev);
535 mlx4_free_icm(dev, priv->fw.fw_icm);
536
537 return err;
538}
539
540static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
541{
542 struct mlx4_priv *priv = mlx4_priv(dev);
543 int err;
544
545 MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
546
547 err = mlx4_init_uar_table(dev);
548 if (err) {
549 mlx4_err(dev, "Failed to initialize "
550 "user access region table, aborting.\n");
551 return err;
552 }
553
554 err = mlx4_uar_alloc(dev, &priv->driver_uar);
555 if (err) {
556 mlx4_err(dev, "Failed to allocate driver access region, "
557 "aborting.\n");
558 goto err_uar_table_free;
559 }
560
561 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
562 if (!priv->kar) {
563 mlx4_err(dev, "Couldn't map kernel access region, "
564 "aborting.\n");
565 err = -ENOMEM;
566 goto err_uar_free;
567 }
568
569 err = mlx4_init_pd_table(dev);
570 if (err) {
571 mlx4_err(dev, "Failed to initialize "
572 "protection domain table, aborting.\n");
573 goto err_kar_unmap;
574 }
575
576 err = mlx4_init_mr_table(dev);
577 if (err) {
578 mlx4_err(dev, "Failed to initialize "
579 "memory region table, aborting.\n");
580 goto err_pd_table_free;
581 }
582
583 mlx4_map_catas_buf(dev);
584
585 err = mlx4_init_eq_table(dev);
586 if (err) {
587 mlx4_err(dev, "Failed to initialize "
588 "event queue table, aborting.\n");
589 goto err_catas_buf;
590 }
591
592 err = mlx4_cmd_use_events(dev);
593 if (err) {
594 mlx4_err(dev, "Failed to switch to event-driven "
595 "firmware commands, aborting.\n");
596 goto err_eq_table_free;
597 }
598
599 err = mlx4_NOP(dev);
600 if (err) {
601 mlx4_err(dev, "NOP command failed to generate interrupt "
602 "(IRQ %d), aborting.\n",
603 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
604 if (dev->flags & MLX4_FLAG_MSI_X)
605 mlx4_err(dev, "Try again with MSI-X disabled.\n");
606 else
607 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
608
609 goto err_cmd_poll;
610 }
611
612 mlx4_dbg(dev, "NOP command IRQ test passed\n");
613
614 err = mlx4_init_cq_table(dev);
615 if (err) {
616 mlx4_err(dev, "Failed to initialize "
617 "completion queue table, aborting.\n");
618 goto err_cmd_poll;
619 }
620
621 err = mlx4_init_srq_table(dev);
622 if (err) {
623 mlx4_err(dev, "Failed to initialize "
624 "shared receive queue table, aborting.\n");
625 goto err_cq_table_free;
626 }
627
628 err = mlx4_init_qp_table(dev);
629 if (err) {
630 mlx4_err(dev, "Failed to initialize "
631 "queue pair table, aborting.\n");
632 goto err_srq_table_free;
633 }
634
635 err = mlx4_init_mcg_table(dev);
636 if (err) {
637 mlx4_err(dev, "Failed to initialize "
638 "multicast group table, aborting.\n");
639 goto err_qp_table_free;
640 }
641
642 return 0;
643
644err_qp_table_free:
645 mlx4_cleanup_qp_table(dev);
646
647err_srq_table_free:
648 mlx4_cleanup_srq_table(dev);
649
650err_cq_table_free:
651 mlx4_cleanup_cq_table(dev);
652
653err_cmd_poll:
654 mlx4_cmd_use_polling(dev);
655
656err_eq_table_free:
657 mlx4_cleanup_eq_table(dev);
658
659err_catas_buf:
660 mlx4_unmap_catas_buf(dev);
661 mlx4_cleanup_mr_table(dev);
662
663err_pd_table_free:
664 mlx4_cleanup_pd_table(dev);
665
666err_kar_unmap:
667 iounmap(priv->kar);
668
669err_uar_free:
670 mlx4_uar_free(dev, &priv->driver_uar);
671
672err_uar_table_free:
673 mlx4_cleanup_uar_table(dev);
674 return err;
675}
676
677static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
678{
679 struct mlx4_priv *priv = mlx4_priv(dev);
680 struct msix_entry entries[MLX4_NUM_EQ];
681 int err;
682 int i;
683
684 if (msi_x) {
685 for (i = 0; i < MLX4_NUM_EQ; ++i)
686 entries[i].entry = i;
687
688 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
689 if (err) {
690 if (err > 0)
691 mlx4_info(dev, "Only %d MSI-X vectors available, "
692 "not using MSI-X\n", err);
693 goto no_msi;
694 }
695
696 for (i = 0; i < MLX4_NUM_EQ; ++i)
697 priv->eq_table.eq[i].irq = entries[i].vector;
698
699 dev->flags |= MLX4_FLAG_MSI_X;
700 return;
701 }
702
703no_msi:
704 for (i = 0; i < MLX4_NUM_EQ; ++i)
705 priv->eq_table.eq[i].irq = dev->pdev->irq;
706}
707
708static int __devinit mlx4_init_one(struct pci_dev *pdev,
709 const struct pci_device_id *id)
710{
711 static int mlx4_version_printed;
712 struct mlx4_priv *priv;
713 struct mlx4_dev *dev;
714 int err;
715
716 if (!mlx4_version_printed) {
717 printk(KERN_INFO "%s", mlx4_version);
718 ++mlx4_version_printed;
719 }
720
721 printk(KERN_INFO PFX "Initializing %s\n",
722 pci_name(pdev));
723
724 err = pci_enable_device(pdev);
725 if (err) {
726 dev_err(&pdev->dev, "Cannot enable PCI device, "
727 "aborting.\n");
728 return err;
729 }
730
731 /*
732 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
733 * be present)
734 */
735 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
736 pci_resource_len(pdev, 0) != 1 << 20) {
737 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
738 err = -ENODEV;
739 goto err_disable_pdev;
740 }
741 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
742 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
743 err = -ENODEV;
744 goto err_disable_pdev;
745 }
746
747 err = pci_request_region(pdev, 0, DRV_NAME);
748 if (err) {
749 dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
750 goto err_disable_pdev;
751 }
752
753 err = pci_request_region(pdev, 2, DRV_NAME);
754 if (err) {
755 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
756 goto err_release_bar0;
757 }
758
759 pci_set_master(pdev);
760
761 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
762 if (err) {
763 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
764 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
765 if (err) {
766 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
767 goto err_release_bar2;
768 }
769 }
770 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
771 if (err) {
772 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
773 "consistent PCI DMA mask.\n");
774 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
775 if (err) {
776 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
777 "aborting.\n");
778 goto err_release_bar2;
779 }
780 }
781
782 priv = kzalloc(sizeof *priv, GFP_KERNEL);
783 if (!priv) {
784 dev_err(&pdev->dev, "Device struct alloc failed, "
785 "aborting.\n");
786 err = -ENOMEM;
787 goto err_release_bar2;
788 }
789
790 dev = &priv->dev;
791 dev->pdev = pdev;
792
793 /*
794 * Now reset the HCA before we touch the PCI capabilities or
795 * attempt a firmware command, since a boot ROM may have left
796 * the HCA in an undefined state.
797 */
798 err = mlx4_reset(dev);
799 if (err) {
800 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
801 goto err_free_dev;
802 }
803
804 mlx4_enable_msi_x(dev);
805
806 if (mlx4_cmd_init(dev)) {
807 mlx4_err(dev, "Failed to init command interface, aborting.\n");
808 goto err_free_dev;
809 }
810
811 err = mlx4_init_hca(dev);
812 if (err)
813 goto err_cmd;
814
815 err = mlx4_setup_hca(dev);
816 if (err)
817 goto err_close;
818
819 err = mlx4_register_device(dev);
820 if (err)
821 goto err_cleanup;
822
823 pci_set_drvdata(pdev, dev);
824
825 return 0;
826
827err_cleanup:
828 mlx4_cleanup_mcg_table(dev);
829 mlx4_cleanup_qp_table(dev);
830 mlx4_cleanup_srq_table(dev);
831 mlx4_cleanup_cq_table(dev);
832 mlx4_cmd_use_polling(dev);
833 mlx4_cleanup_eq_table(dev);
834
835 mlx4_unmap_catas_buf(dev);
836
837 mlx4_cleanup_mr_table(dev);
838 mlx4_cleanup_pd_table(dev);
839 mlx4_cleanup_uar_table(dev);
840
841err_close:
842 mlx4_close_hca(dev);
843
844err_cmd:
845 mlx4_cmd_cleanup(dev);
846
847err_free_dev:
848 if (dev->flags & MLX4_FLAG_MSI_X)
849 pci_disable_msix(pdev);
850
851 kfree(priv);
852
853err_release_bar2:
854 pci_release_region(pdev, 2);
855
856err_release_bar0:
857 pci_release_region(pdev, 0);
858
859err_disable_pdev:
860 pci_disable_device(pdev);
861 pci_set_drvdata(pdev, NULL);
862 return err;
863}
864
865static void __devexit mlx4_remove_one(struct pci_dev *pdev)
866{
867 struct mlx4_dev *dev = pci_get_drvdata(pdev);
868 struct mlx4_priv *priv = mlx4_priv(dev);
869 int p;
870
871 if (dev) {
872 mlx4_unregister_device(dev);
873
874 for (p = 1; p <= dev->caps.num_ports; ++p)
875 mlx4_CLOSE_PORT(dev, p);
876
877 mlx4_cleanup_mcg_table(dev);
878 mlx4_cleanup_qp_table(dev);
879 mlx4_cleanup_srq_table(dev);
880 mlx4_cleanup_cq_table(dev);
881 mlx4_cmd_use_polling(dev);
882 mlx4_cleanup_eq_table(dev);
883
884 mlx4_unmap_catas_buf(dev);
885
886 mlx4_cleanup_mr_table(dev);
887 mlx4_cleanup_pd_table(dev);
888
889 iounmap(priv->kar);
890 mlx4_uar_free(dev, &priv->driver_uar);
891 mlx4_cleanup_uar_table(dev);
892 mlx4_close_hca(dev);
893 mlx4_cmd_cleanup(dev);
894
895 if (dev->flags & MLX4_FLAG_MSI_X)
896 pci_disable_msix(pdev);
897
898 kfree(priv);
899 pci_release_region(pdev, 2);
900 pci_release_region(pdev, 0);
901 pci_disable_device(pdev);
902 pci_set_drvdata(pdev, NULL);
903 }
904}
905
906static struct pci_device_id mlx4_pci_table[] = {
907 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
908 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
909 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
910 { 0, }
911};
912
913MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
914
915static struct pci_driver mlx4_driver = {
916 .name = DRV_NAME,
917 .id_table = mlx4_pci_table,
918 .probe = mlx4_init_one,
919 .remove = __devexit_p(mlx4_remove_one)
920};
921
922static int __init mlx4_init(void)
923{
924 int ret;
925
926 ret = pci_register_driver(&mlx4_driver);
927 return ret < 0 ? ret : 0;
928}
929
930static void __exit mlx4_cleanup(void)
931{
932 pci_unregister_driver(&mlx4_driver);
933}
934
935module_init(mlx4_init);
936module_exit(mlx4_cleanup);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
new file mode 100644
index 000000000000..672024a0ee71
--- /dev/null
+++ b/drivers/net/mlx4/mcg.c
@@ -0,0 +1,380 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/init.h>
34#include <linux/string.h>
35#include <linux/slab.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40
41struct mlx4_mgm {
42 __be32 next_gid_index;
43 __be32 members_count;
44 u32 reserved[2];
45 u8 gid[16];
46 __be32 qp[MLX4_QP_PER_MGM];
47};
48
49static const u8 zero_gid[16]; /* automatically initialized to 0 */
50
51static int mlx4_READ_MCG(struct mlx4_dev *dev, int index,
52 struct mlx4_cmd_mailbox *mailbox)
53{
54 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
55 MLX4_CMD_TIME_CLASS_A);
56}
57
58static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index,
59 struct mlx4_cmd_mailbox *mailbox)
60{
61 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
62 MLX4_CMD_TIME_CLASS_A);
63}
64
65static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
66 u16 *hash)
67{
68 u64 imm;
69 int err;
70
71 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH,
72 MLX4_CMD_TIME_CLASS_A);
73
74 if (!err)
75 *hash = imm;
76
77 return err;
78}
79
80/*
81 * Caller must hold MCG table semaphore. gid and mgm parameters must
82 * be properly aligned for command interface.
83 *
84 * Returns 0 unless a firmware command error occurs.
85 *
86 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
87 * and *mgm holds MGM entry.
88 *
89 * if GID is found in AMGM, *index = index in AMGM, *prev = index of
90 * previous entry in hash chain and *mgm holds AMGM entry.
91 *
92 * If no AMGM exists for given gid, *index = -1, *prev = index of last
93 * entry in hash chain and *mgm holds end of hash chain.
94 */
95static int find_mgm(struct mlx4_dev *dev,
96 u8 *gid, struct mlx4_cmd_mailbox *mgm_mailbox,
97 u16 *hash, int *prev, int *index)
98{
99 struct mlx4_cmd_mailbox *mailbox;
100 struct mlx4_mgm *mgm = mgm_mailbox->buf;
101 u8 *mgid;
102 int err;
103
104 mailbox = mlx4_alloc_cmd_mailbox(dev);
105 if (IS_ERR(mailbox))
106 return -ENOMEM;
107 mgid = mailbox->buf;
108
109 memcpy(mgid, gid, 16);
110
111 err = mlx4_MGID_HASH(dev, mailbox, hash);
112 mlx4_free_cmd_mailbox(dev, mailbox);
113 if (err)
114 return err;
115
116 if (0)
117 mlx4_dbg(dev, "Hash for %04x:%04x:%04x:%04x:"
118 "%04x:%04x:%04x:%04x is %04x\n",
119 be16_to_cpu(((__be16 *) gid)[0]),
120 be16_to_cpu(((__be16 *) gid)[1]),
121 be16_to_cpu(((__be16 *) gid)[2]),
122 be16_to_cpu(((__be16 *) gid)[3]),
123 be16_to_cpu(((__be16 *) gid)[4]),
124 be16_to_cpu(((__be16 *) gid)[5]),
125 be16_to_cpu(((__be16 *) gid)[6]),
126 be16_to_cpu(((__be16 *) gid)[7]),
127 *hash);
128
129 *index = *hash;
130 *prev = -1;
131
132 do {
133 err = mlx4_READ_MCG(dev, *index, mgm_mailbox);
134 if (err)
135 return err;
136
137 if (!memcmp(mgm->gid, zero_gid, 16)) {
138 if (*index != *hash) {
139 mlx4_err(dev, "Found zero MGID in AMGM.\n");
140 err = -EINVAL;
141 }
142 return err;
143 }
144
145 if (!memcmp(mgm->gid, gid, 16))
146 return err;
147
148 *prev = *index;
149 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
150 } while (*index);
151
152 *index = -1;
153 return err;
154}
155
156int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
157{
158 struct mlx4_priv *priv = mlx4_priv(dev);
159 struct mlx4_cmd_mailbox *mailbox;
160 struct mlx4_mgm *mgm;
161 u32 members_count;
162 u16 hash;
163 int index, prev;
164 int link = 0;
165 int i;
166 int err;
167
168 mailbox = mlx4_alloc_cmd_mailbox(dev);
169 if (IS_ERR(mailbox))
170 return PTR_ERR(mailbox);
171 mgm = mailbox->buf;
172
173 mutex_lock(&priv->mcg_table.mutex);
174
175 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
176 if (err)
177 goto out;
178
179 if (index != -1) {
180 if (!memcmp(mgm->gid, zero_gid, 16))
181 memcpy(mgm->gid, gid, 16);
182 } else {
183 link = 1;
184
185 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
186 if (index == -1) {
187 mlx4_err(dev, "No AMGM entries left\n");
188 err = -ENOMEM;
189 goto out;
190 }
191 index += dev->caps.num_mgms;
192
193 err = mlx4_READ_MCG(dev, index, mailbox);
194 if (err)
195 goto out;
196
197 memset(mgm, 0, sizeof *mgm);
198 memcpy(mgm->gid, gid, 16);
199 }
200
201 members_count = be32_to_cpu(mgm->members_count);
202 if (members_count == MLX4_QP_PER_MGM) {
203 mlx4_err(dev, "MGM at index %x is full.\n", index);
204 err = -ENOMEM;
205 goto out;
206 }
207
208 for (i = 0; i < members_count; ++i)
209 if (mgm->qp[i] == cpu_to_be32(qp->qpn)) {
210 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
211 err = 0;
212 goto out;
213 }
214
215 mgm->qp[members_count++] = cpu_to_be32(qp->qpn);
216 mgm->members_count = cpu_to_be32(members_count);
217
218 err = mlx4_WRITE_MCG(dev, index, mailbox);
219 if (err)
220 goto out;
221
222 if (!link)
223 goto out;
224
225 err = mlx4_READ_MCG(dev, prev, mailbox);
226 if (err)
227 goto out;
228
229 mgm->next_gid_index = cpu_to_be32(index << 6);
230
231 err = mlx4_WRITE_MCG(dev, prev, mailbox);
232 if (err)
233 goto out;
234
235out:
236 if (err && link && index != -1) {
237 if (index < dev->caps.num_mgms)
238 mlx4_warn(dev, "Got AMGM index %d < %d",
239 index, dev->caps.num_mgms);
240 else
241 mlx4_bitmap_free(&priv->mcg_table.bitmap,
242 index - dev->caps.num_mgms);
243 }
244 mutex_unlock(&priv->mcg_table.mutex);
245
246 mlx4_free_cmd_mailbox(dev, mailbox);
247 return err;
248}
249EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
250
251int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
252{
253 struct mlx4_priv *priv = mlx4_priv(dev);
254 struct mlx4_cmd_mailbox *mailbox;
255 struct mlx4_mgm *mgm;
256 u32 members_count;
257 u16 hash;
258 int prev, index;
259 int i, loc;
260 int err;
261
262 mailbox = mlx4_alloc_cmd_mailbox(dev);
263 if (IS_ERR(mailbox))
264 return PTR_ERR(mailbox);
265 mgm = mailbox->buf;
266
267 mutex_lock(&priv->mcg_table.mutex);
268
269 err = find_mgm(dev, gid, mailbox, &hash, &prev, &index);
270 if (err)
271 goto out;
272
273 if (index == -1) {
274 mlx4_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
275 "not found\n",
276 be16_to_cpu(((__be16 *) gid)[0]),
277 be16_to_cpu(((__be16 *) gid)[1]),
278 be16_to_cpu(((__be16 *) gid)[2]),
279 be16_to_cpu(((__be16 *) gid)[3]),
280 be16_to_cpu(((__be16 *) gid)[4]),
281 be16_to_cpu(((__be16 *) gid)[5]),
282 be16_to_cpu(((__be16 *) gid)[6]),
283 be16_to_cpu(((__be16 *) gid)[7]));
284 err = -EINVAL;
285 goto out;
286 }
287
288 members_count = be32_to_cpu(mgm->members_count);
289 for (loc = -1, i = 0; i < members_count; ++i)
290 if (mgm->qp[i] == cpu_to_be32(qp->qpn))
291 loc = i;
292
293 if (loc == -1) {
294 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
295 err = -EINVAL;
296 goto out;
297 }
298
299
300 mgm->members_count = cpu_to_be32(--members_count);
301 mgm->qp[loc] = mgm->qp[i - 1];
302 mgm->qp[i - 1] = 0;
303
304 err = mlx4_WRITE_MCG(dev, index, mailbox);
305 if (err)
306 goto out;
307
308 if (i != 1)
309 goto out;
310
311 if (prev == -1) {
312 /* Remove entry from MGM */
313 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
314 if (amgm_index) {
315 err = mlx4_READ_MCG(dev, amgm_index, mailbox);
316 if (err)
317 goto out;
318 } else
319 memset(mgm->gid, 0, 16);
320
321 err = mlx4_WRITE_MCG(dev, index, mailbox);
322 if (err)
323 goto out;
324
325 if (amgm_index) {
326 if (amgm_index < dev->caps.num_mgms)
327 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
328 index, amgm_index, dev->caps.num_mgms);
329 else
330 mlx4_bitmap_free(&priv->mcg_table.bitmap,
331 amgm_index - dev->caps.num_mgms);
332 }
333 } else {
334 /* Remove entry from AMGM */
335 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
336 err = mlx4_READ_MCG(dev, prev, mailbox);
337 if (err)
338 goto out;
339
340 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
341
342 err = mlx4_WRITE_MCG(dev, prev, mailbox);
343 if (err)
344 goto out;
345
346 if (index < dev->caps.num_mgms)
347 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
348 prev, index, dev->caps.num_mgms);
349 else
350 mlx4_bitmap_free(&priv->mcg_table.bitmap,
351 index - dev->caps.num_mgms);
352 }
353
354out:
355 mutex_unlock(&priv->mcg_table.mutex);
356
357 mlx4_free_cmd_mailbox(dev, mailbox);
358 return err;
359}
360EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
361
362int __devinit mlx4_init_mcg_table(struct mlx4_dev *dev)
363{
364 struct mlx4_priv *priv = mlx4_priv(dev);
365 int err;
366
367 err = mlx4_bitmap_init(&priv->mcg_table.bitmap,
368 dev->caps.num_amgms, dev->caps.num_amgms - 1, 0);
369 if (err)
370 return err;
371
372 mutex_init(&priv->mcg_table.mutex);
373
374 return 0;
375}
376
377void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
378{
379 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
380}
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
new file mode 100644
index 000000000000..9befbae3d196
--- /dev/null
+++ b/drivers/net/mlx4/mlx4.h
@@ -0,0 +1,348 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#ifndef MLX4_H
38#define MLX4_H
39
40#include <linux/radix-tree.h>
41
42#include <linux/mlx4/device.h>
43#include <linux/mlx4/doorbell.h>
44
45#define DRV_NAME "mlx4_core"
46#define PFX DRV_NAME ": "
47#define DRV_VERSION "0.01"
48#define DRV_RELDATE "May 1, 2007"
49
50enum {
51 MLX4_HCR_BASE = 0x80680,
52 MLX4_HCR_SIZE = 0x0001c,
53 MLX4_CLR_INT_SIZE = 0x00008
54};
55
56enum {
57 MLX4_BOARD_ID_LEN = 64
58};
59
60enum {
61 MLX4_MGM_ENTRY_SIZE = 0x40,
62 MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
63 MLX4_MTT_ENTRY_PER_SEG = 8
64};
65
66enum {
67 MLX4_EQ_ASYNC,
68 MLX4_EQ_COMP,
69 MLX4_EQ_CATAS,
70 MLX4_NUM_EQ
71};
72
73enum {
74 MLX4_NUM_PDS = 1 << 15
75};
76
77enum {
78 MLX4_CMPT_TYPE_QP = 0,
79 MLX4_CMPT_TYPE_SRQ = 1,
80 MLX4_CMPT_TYPE_CQ = 2,
81 MLX4_CMPT_TYPE_EQ = 3,
82 MLX4_CMPT_NUM_TYPE
83};
84
85enum {
86 MLX4_CMPT_SHIFT = 24,
87 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
88};
89
90#ifdef CONFIG_MLX4_DEBUG
91extern int mlx4_debug_level;
92
93#define mlx4_dbg(mdev, format, arg...) \
94 do { \
95 if (mlx4_debug_level) \
96 dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
97 } while (0)
98
99#else /* CONFIG_MLX4_DEBUG */
100
101#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
102
103#endif /* CONFIG_MLX4_DEBUG */
104
105#define mlx4_err(mdev, format, arg...) \
106 dev_err(&mdev->pdev->dev, format, ## arg)
107#define mlx4_info(mdev, format, arg...) \
108 dev_info(&mdev->pdev->dev, format, ## arg)
109#define mlx4_warn(mdev, format, arg...) \
110 dev_warn(&mdev->pdev->dev, format, ## arg)
111
112struct mlx4_bitmap {
113 u32 last;
114 u32 top;
115 u32 max;
116 u32 mask;
117 spinlock_t lock;
118 unsigned long *table;
119};
120
121struct mlx4_buddy {
122 unsigned long **bits;
123 int max_order;
124 spinlock_t lock;
125};
126
127struct mlx4_icm;
128
129struct mlx4_icm_table {
130 u64 virt;
131 int num_icm;
132 int num_obj;
133 int obj_size;
134 int lowmem;
135 struct mutex mutex;
136 struct mlx4_icm **icm;
137};
138
139struct mlx4_eq {
140 struct mlx4_dev *dev;
141 void __iomem *doorbell;
142 int eqn;
143 u32 cons_index;
144 u16 irq;
145 u16 have_irq;
146 int nent;
147 struct mlx4_buf_list *page_list;
148 struct mlx4_mtt mtt;
149};
150
151struct mlx4_profile {
152 int num_qp;
153 int rdmarc_per_qp;
154 int num_srq;
155 int num_cq;
156 int num_mcg;
157 int num_mpt;
158 int num_mtt;
159};
160
161struct mlx4_fw {
162 u64 clr_int_base;
163 u64 catas_offset;
164 struct mlx4_icm *fw_icm;
165 struct mlx4_icm *aux_icm;
166 u32 catas_size;
167 u16 fw_pages;
168 u8 clr_int_bar;
169 u8 catas_bar;
170};
171
172struct mlx4_cmd {
173 struct pci_pool *pool;
174 void __iomem *hcr;
175 struct mutex hcr_mutex;
176 struct semaphore poll_sem;
177 struct semaphore event_sem;
178 int max_cmds;
179 spinlock_t context_lock;
180 int free_head;
181 struct mlx4_cmd_context *context;
182 u16 token_mask;
183 u8 use_events;
184 u8 toggle;
185};
186
187struct mlx4_uar_table {
188 struct mlx4_bitmap bitmap;
189};
190
191struct mlx4_mr_table {
192 struct mlx4_bitmap mpt_bitmap;
193 struct mlx4_buddy mtt_buddy;
194 u64 mtt_base;
195 u64 mpt_base;
196 struct mlx4_icm_table mtt_table;
197 struct mlx4_icm_table dmpt_table;
198};
199
200struct mlx4_cq_table {
201 struct mlx4_bitmap bitmap;
202 spinlock_t lock;
203 struct radix_tree_root tree;
204 struct mlx4_icm_table table;
205 struct mlx4_icm_table cmpt_table;
206};
207
208struct mlx4_eq_table {
209 struct mlx4_bitmap bitmap;
210 void __iomem *clr_int;
211 void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4];
212 u32 clr_mask;
213 struct mlx4_eq eq[MLX4_NUM_EQ];
214 u64 icm_virt;
215 struct page *icm_page;
216 dma_addr_t icm_dma;
217 struct mlx4_icm_table cmpt_table;
218 int have_irq;
219 u8 inta_pin;
220};
221
222struct mlx4_srq_table {
223 struct mlx4_bitmap bitmap;
224 spinlock_t lock;
225 struct radix_tree_root tree;
226 struct mlx4_icm_table table;
227 struct mlx4_icm_table cmpt_table;
228};
229
230struct mlx4_qp_table {
231 struct mlx4_bitmap bitmap;
232 u32 rdmarc_base;
233 int rdmarc_shift;
234 spinlock_t lock;
235 struct mlx4_icm_table qp_table;
236 struct mlx4_icm_table auxc_table;
237 struct mlx4_icm_table altc_table;
238 struct mlx4_icm_table rdmarc_table;
239 struct mlx4_icm_table cmpt_table;
240};
241
242struct mlx4_mcg_table {
243 struct mutex mutex;
244 struct mlx4_bitmap bitmap;
245 struct mlx4_icm_table table;
246};
247
248struct mlx4_catas_err {
249 u32 __iomem *map;
250 int size;
251};
252
253struct mlx4_priv {
254 struct mlx4_dev dev;
255
256 struct list_head dev_list;
257 struct list_head ctx_list;
258 spinlock_t ctx_lock;
259
260 struct mlx4_fw fw;
261 struct mlx4_cmd cmd;
262
263 struct mlx4_bitmap pd_bitmap;
264 struct mlx4_uar_table uar_table;
265 struct mlx4_mr_table mr_table;
266 struct mlx4_cq_table cq_table;
267 struct mlx4_eq_table eq_table;
268 struct mlx4_srq_table srq_table;
269 struct mlx4_qp_table qp_table;
270 struct mlx4_mcg_table mcg_table;
271
272 struct mlx4_catas_err catas_err;
273
274 void __iomem *clr_base;
275
276 struct mlx4_uar driver_uar;
277 void __iomem *kar;
278 MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
279
280 u32 rev_id;
281 char board_id[MLX4_BOARD_ID_LEN];
282};
283
284static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
285{
286 return container_of(dev, struct mlx4_priv, dev);
287}
288
289u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
290void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
291int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
292void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
293
294int mlx4_reset(struct mlx4_dev *dev);
295
296int mlx4_init_pd_table(struct mlx4_dev *dev);
297int mlx4_init_uar_table(struct mlx4_dev *dev);
298int mlx4_init_mr_table(struct mlx4_dev *dev);
299int mlx4_init_eq_table(struct mlx4_dev *dev);
300int mlx4_init_cq_table(struct mlx4_dev *dev);
301int mlx4_init_qp_table(struct mlx4_dev *dev);
302int mlx4_init_srq_table(struct mlx4_dev *dev);
303int mlx4_init_mcg_table(struct mlx4_dev *dev);
304
305void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
306void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
307void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
308void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
309void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
310void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
311void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
312void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
313
314void mlx4_map_catas_buf(struct mlx4_dev *dev);
315void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
316
317int mlx4_register_device(struct mlx4_dev *dev);
318void mlx4_unregister_device(struct mlx4_dev *dev);
319void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
320 int subtype, int port);
321
322struct mlx4_dev_cap;
323struct mlx4_init_hca_param;
324
325u64 mlx4_make_profile(struct mlx4_dev *dev,
326 struct mlx4_profile *request,
327 struct mlx4_dev_cap *dev_cap,
328 struct mlx4_init_hca_param *init_hca);
329
330int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
331void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
332
333int mlx4_cmd_init(struct mlx4_dev *dev);
334void mlx4_cmd_cleanup(struct mlx4_dev *dev);
335void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
336int mlx4_cmd_use_events(struct mlx4_dev *dev);
337void mlx4_cmd_use_polling(struct mlx4_dev *dev);
338
339void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
340void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
341
342void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
343
344void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
345
346void mlx4_handle_catas_err(struct mlx4_dev *dev);
347
348#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
new file mode 100644
index 000000000000..b33864dab179
--- /dev/null
+++ b/drivers/net/mlx4/mr.c
@@ -0,0 +1,479 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/errno.h>
37
38#include <linux/mlx4/cmd.h>
39
40#include "mlx4.h"
41#include "icm.h"
42
43/*
44 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
45 */
46struct mlx4_mpt_entry {
47 __be32 flags;
48 __be32 qpn;
49 __be32 key;
50 __be32 pd;
51 __be64 start;
52 __be64 length;
53 __be32 lkey;
54 __be32 win_cnt;
55 u8 reserved1[3];
56 u8 mtt_rep;
57 __be64 mtt_seg;
58 __be32 mtt_sz;
59 __be32 entity_size;
60 __be32 first_byte_offset;
61} __attribute__((packed));
62
63#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
64#define MLX4_MPT_FLAG_MIO (1 << 17)
65#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
66#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
67#define MLX4_MPT_FLAG_REGION (1 << 8)
68
69#define MLX4_MTT_FLAG_PRESENT 1
70
71static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
72{
73 int o;
74 int m;
75 u32 seg;
76
77 spin_lock(&buddy->lock);
78
79 for (o = order; o <= buddy->max_order; ++o) {
80 m = 1 << (buddy->max_order - o);
81 seg = find_first_bit(buddy->bits[o], m);
82 if (seg < m)
83 goto found;
84 }
85
86 spin_unlock(&buddy->lock);
87 return -1;
88
89 found:
90 clear_bit(seg, buddy->bits[o]);
91
92 while (o > order) {
93 --o;
94 seg <<= 1;
95 set_bit(seg ^ 1, buddy->bits[o]);
96 }
97
98 spin_unlock(&buddy->lock);
99
100 seg <<= order;
101
102 return seg;
103}
104
105static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
106{
107 seg >>= order;
108
109 spin_lock(&buddy->lock);
110
111 while (test_bit(seg ^ 1, buddy->bits[order])) {
112 clear_bit(seg ^ 1, buddy->bits[order]);
113 seg >>= 1;
114 ++order;
115 }
116
117 set_bit(seg, buddy->bits[order]);
118
119 spin_unlock(&buddy->lock);
120}
121
122static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
123{
124 int i, s;
125
126 buddy->max_order = max_order;
127 spin_lock_init(&buddy->lock);
128
129 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
130 GFP_KERNEL);
131 if (!buddy->bits)
132 goto err_out;
133
134 for (i = 0; i <= buddy->max_order; ++i) {
135 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
136 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
137 if (!buddy->bits[i])
138 goto err_out_free;
139 bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
140 }
141
142 set_bit(0, buddy->bits[buddy->max_order]);
143
144 return 0;
145
146err_out_free:
147 for (i = 0; i <= buddy->max_order; ++i)
148 kfree(buddy->bits[i]);
149
150 kfree(buddy->bits);
151
152err_out:
153 return -ENOMEM;
154}
155
156static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
157{
158 int i;
159
160 for (i = 0; i <= buddy->max_order; ++i)
161 kfree(buddy->bits[i]);
162
163 kfree(buddy->bits);
164}
165
166static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
167{
168 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
169 u32 seg;
170
171 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
172 if (seg == -1)
173 return -1;
174
175 if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
176 seg + (1 << order) - 1)) {
177 mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
178 return -1;
179 }
180
181 return seg;
182}
183
184int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
185 struct mlx4_mtt *mtt)
186{
187 int i;
188
189 if (!npages) {
190 mtt->order = -1;
191 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
192 return 0;
193 } else
194 mtt->page_shift = page_shift;
195
196 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1)
197 ++mtt->order;
198
199 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
200 if (mtt->first_seg == -1)
201 return -ENOMEM;
202
203 return 0;
204}
205EXPORT_SYMBOL_GPL(mlx4_mtt_init);
206
207void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
208{
209 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
210
211 if (mtt->order < 0)
212 return;
213
214 mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
215 mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
216 mtt->first_seg + (1 << mtt->order) - 1);
217}
218EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
219
220u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
221{
222 return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
223}
224EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
225
226static u32 hw_index_to_key(u32 ind)
227{
228 return (ind >> 24) | (ind << 8);
229}
230
231static u32 key_to_hw_index(u32 key)
232{
233 return (key << 24) | (key >> 8);
234}
235
236static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
237 int mpt_index)
238{
239 return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
240 MLX4_CMD_TIME_CLASS_B);
241}
242
243static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
244 int mpt_index)
245{
246 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
247 !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
248}
249
250int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
251 int npages, int page_shift, struct mlx4_mr *mr)
252{
253 struct mlx4_priv *priv = mlx4_priv(dev);
254 u32 index;
255 int err;
256
257 index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
258 if (index == -1) {
259 err = -ENOMEM;
260 goto err;
261 }
262
263 mr->iova = iova;
264 mr->size = size;
265 mr->pd = pd;
266 mr->access = access;
267 mr->enabled = 0;
268 mr->key = hw_index_to_key(index);
269
270 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
271 if (err)
272 goto err_index;
273
274 return 0;
275
276err_index:
277 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
278
279err:
280 kfree(mr);
281 return err;
282}
283EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
284
285void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
286{
287 struct mlx4_priv *priv = mlx4_priv(dev);
288 int err;
289
290 if (mr->enabled) {
291 err = mlx4_HW2SW_MPT(dev, NULL,
292 key_to_hw_index(mr->key) &
293 (dev->caps.num_mpts - 1));
294 if (err)
295 mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
296 }
297
298 mlx4_mtt_cleanup(dev, &mr->mtt);
299 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
300}
301EXPORT_SYMBOL_GPL(mlx4_mr_free);
302
303int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
304{
305 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
306 struct mlx4_cmd_mailbox *mailbox;
307 struct mlx4_mpt_entry *mpt_entry;
308 int err;
309
310 err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
311 if (err)
312 return err;
313
314 mailbox = mlx4_alloc_cmd_mailbox(dev);
315 if (IS_ERR(mailbox)) {
316 err = PTR_ERR(mailbox);
317 goto err_table;
318 }
319 mpt_entry = mailbox->buf;
320
321 memset(mpt_entry, 0, sizeof *mpt_entry);
322
323 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS |
324 MLX4_MPT_FLAG_MIO |
325 MLX4_MPT_FLAG_REGION |
326 mr->access);
327 if (mr->mtt.order < 0)
328 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
329
330 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
331 mpt_entry->pd = cpu_to_be32(mr->pd);
332 mpt_entry->start = cpu_to_be64(mr->iova);
333 mpt_entry->length = cpu_to_be64(mr->size);
334 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
335 mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
336
337 err = mlx4_SW2HW_MPT(dev, mailbox,
338 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
339 if (err) {
340 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
341 goto err_cmd;
342 }
343
344 mr->enabled = 1;
345
346 mlx4_free_cmd_mailbox(dev, mailbox);
347
348 return 0;
349
350err_cmd:
351 mlx4_free_cmd_mailbox(dev, mailbox);
352
353err_table:
354 mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
355 return err;
356}
357EXPORT_SYMBOL_GPL(mlx4_mr_enable);
358
359static int mlx4_WRITE_MTT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
360 int num_mtt)
361{
362 return mlx4_cmd(dev, mailbox->dma, num_mtt, 0, MLX4_CMD_WRITE_MTT,
363 MLX4_CMD_TIME_CLASS_B);
364}
365
366int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
367 int start_index, int npages, u64 *page_list)
368{
369 struct mlx4_cmd_mailbox *mailbox;
370 __be64 *mtt_entry;
371 int i;
372 int err = 0;
373
374 if (mtt->order < 0)
375 return -EINVAL;
376
377 mailbox = mlx4_alloc_cmd_mailbox(dev);
378 if (IS_ERR(mailbox))
379 return PTR_ERR(mailbox);
380
381 mtt_entry = mailbox->buf;
382
383 while (npages > 0) {
384 mtt_entry[0] = cpu_to_be64(mlx4_mtt_addr(dev, mtt) + start_index * 8);
385 mtt_entry[1] = 0;
386
387 for (i = 0; i < npages && i < MLX4_MAILBOX_SIZE / 8 - 2; ++i)
388 mtt_entry[i + 2] = cpu_to_be64(page_list[i] |
389 MLX4_MTT_FLAG_PRESENT);
390
391 /*
392 * If we have an odd number of entries to write, add
393 * one more dummy entry for firmware efficiency.
394 */
395 if (i & 1)
396 mtt_entry[i + 2] = 0;
397
398 err = mlx4_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
399 if (err)
400 goto out;
401
402 npages -= i;
403 start_index += i;
404 page_list += i;
405 }
406
407out:
408 mlx4_free_cmd_mailbox(dev, mailbox);
409
410 return err;
411}
412EXPORT_SYMBOL_GPL(mlx4_write_mtt);
413
414int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
415 struct mlx4_buf *buf)
416{
417 u64 *page_list;
418 int err;
419 int i;
420
421 page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
422 if (!page_list)
423 return -ENOMEM;
424
425 for (i = 0; i < buf->npages; ++i)
426 if (buf->nbufs == 1)
427 page_list[i] = buf->u.direct.map + (i << buf->page_shift);
428 else
429 page_list[i] = buf->u.page_list[i].map;
430
431 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
432
433 kfree(page_list);
434 return err;
435}
436EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
437
438int __devinit mlx4_init_mr_table(struct mlx4_dev *dev)
439{
440 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
441 int err;
442
443 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
444 ~0, dev->caps.reserved_mrws);
445 if (err)
446 return err;
447
448 err = mlx4_buddy_init(&mr_table->mtt_buddy,
449 ilog2(dev->caps.num_mtt_segs));
450 if (err)
451 goto err_buddy;
452
453 if (dev->caps.reserved_mtts) {
454 if (mlx4_alloc_mtt_range(dev, ilog2(dev->caps.reserved_mtts)) == -1) {
455 mlx4_warn(dev, "MTT table of order %d is too small.\n",
456 mr_table->mtt_buddy.max_order);
457 err = -ENOMEM;
458 goto err_reserve_mtts;
459 }
460 }
461
462 return 0;
463
464err_reserve_mtts:
465 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
466
467err_buddy:
468 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
469
470 return err;
471}
472
473void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
474{
475 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
476
477 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
478 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
479}
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
new file mode 100644
index 000000000000..23dea1ee7750
--- /dev/null
+++ b/drivers/net/mlx4/pd.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/errno.h>
36
37#include <asm/page.h>
38
39#include "mlx4.h"
40#include "icm.h"
41
42int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
43{
44 struct mlx4_priv *priv = mlx4_priv(dev);
45
46 *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
47 if (*pdn == -1)
48 return -ENOMEM;
49
50 return 0;
51}
52EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
53
54void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
55{
56 mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
57}
58EXPORT_SYMBOL_GPL(mlx4_pd_free);
59
60int __devinit mlx4_init_pd_table(struct mlx4_dev *dev)
61{
62 struct mlx4_priv *priv = mlx4_priv(dev);
63
64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
65 (1 << 24) - 1, dev->caps.reserved_pds);
66}
67
68void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
69{
70 mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
71}
72
73
74int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
75{
76 uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
77 if (uar->index == -1)
78 return -ENOMEM;
79
80 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
81
82 return 0;
83}
84EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
85
86void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
87{
88 mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
89}
90EXPORT_SYMBOL_GPL(mlx4_uar_free);
91
92int mlx4_init_uar_table(struct mlx4_dev *dev)
93{
94 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
95 dev->caps.num_uars, dev->caps.num_uars - 1,
96 max(128, dev->caps.reserved_uars));
97}
98
99void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
100{
101 mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
102}
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
new file mode 100644
index 000000000000..9ca42b213d54
--- /dev/null
+++ b/drivers/net/mlx4/profile.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36
37#include "mlx4.h"
38#include "fw.h"
39
40enum {
41 MLX4_RES_QP,
42 MLX4_RES_RDMARC,
43 MLX4_RES_ALTC,
44 MLX4_RES_AUXC,
45 MLX4_RES_SRQ,
46 MLX4_RES_CQ,
47 MLX4_RES_EQ,
48 MLX4_RES_DMPT,
49 MLX4_RES_CMPT,
50 MLX4_RES_MTT,
51 MLX4_RES_MCG,
52 MLX4_RES_NUM
53};
54
55static const char *res_name[] = {
56 [MLX4_RES_QP] = "QP",
57 [MLX4_RES_RDMARC] = "RDMARC",
58 [MLX4_RES_ALTC] = "ALTC",
59 [MLX4_RES_AUXC] = "AUXC",
60 [MLX4_RES_SRQ] = "SRQ",
61 [MLX4_RES_CQ] = "CQ",
62 [MLX4_RES_EQ] = "EQ",
63 [MLX4_RES_DMPT] = "DMPT",
64 [MLX4_RES_CMPT] = "CMPT",
65 [MLX4_RES_MTT] = "MTT",
66 [MLX4_RES_MCG] = "MCG",
67};
68
69u64 mlx4_make_profile(struct mlx4_dev *dev,
70 struct mlx4_profile *request,
71 struct mlx4_dev_cap *dev_cap,
72 struct mlx4_init_hca_param *init_hca)
73{
74 struct mlx4_priv *priv = mlx4_priv(dev);
75 struct mlx4_resource {
76 u64 size;
77 u64 start;
78 int type;
79 int num;
80 int log_num;
81 };
82
83 u64 total_size = 0;
84 struct mlx4_resource *profile;
85 struct mlx4_resource tmp;
86 int i, j;
87
88 profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
89 if (!profile)
90 return -ENOMEM;
91
92 profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
93 profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
94 profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
95 profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
96 profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
97 profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
101 profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
103
104 profile[MLX4_RES_QP].num = request->num_qp;
105 profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
106 profile[MLX4_RES_ALTC].num = request->num_qp;
107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs;
111 profile[MLX4_RES_DMPT].num = request->num_mpt;
112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
113 profile[MLX4_RES_MTT].num = request->num_mtt;
114 profile[MLX4_RES_MCG].num = request->num_mcg;
115
116 for (i = 0; i < MLX4_RES_NUM; ++i) {
117 profile[i].type = i;
118 profile[i].num = roundup_pow_of_two(profile[i].num);
119 profile[i].log_num = ilog2(profile[i].num);
120 profile[i].size *= profile[i].num;
121 profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
122 }
123
124 /*
125 * Sort the resources in decreasing order of size. Since they
126 * all have sizes that are powers of 2, we'll be able to keep
127 * resources aligned to their size and pack them without gaps
128 * using the sorted order.
129 */
130 for (i = MLX4_RES_NUM; i > 0; --i)
131 for (j = 1; j < i; ++j) {
132 if (profile[j].size > profile[j - 1].size) {
133 tmp = profile[j];
134 profile[j] = profile[j - 1];
135 profile[j - 1] = tmp;
136 }
137 }
138
139 for (i = 0; i < MLX4_RES_NUM; ++i) {
140 if (profile[i].size) {
141 profile[i].start = total_size;
142 total_size += profile[i].size;
143 }
144
145 if (total_size > dev_cap->max_icm_sz) {
146 mlx4_err(dev, "Profile requires 0x%llx bytes; "
147 "won't fit in 0x%llx bytes of context memory.\n",
148 (unsigned long long) total_size,
149 (unsigned long long) dev_cap->max_icm_sz);
150 kfree(profile);
151 return -ENOMEM;
152 }
153
154 if (profile[i].size)
155 mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
156 "size 0x%10llx\n",
157 i, res_name[profile[i].type], profile[i].log_num,
158 (unsigned long long) profile[i].start,
159 (unsigned long long) profile[i].size);
160 }
161
162 mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
163 (int) (total_size >> 10));
164
165 for (i = 0; i < MLX4_RES_NUM; ++i) {
166 switch (profile[i].type) {
167 case MLX4_RES_QP:
168 dev->caps.num_qps = profile[i].num;
169 init_hca->qpc_base = profile[i].start;
170 init_hca->log_num_qps = profile[i].log_num;
171 break;
172 case MLX4_RES_RDMARC:
173 for (priv->qp_table.rdmarc_shift = 0;
174 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
175 ++priv->qp_table.rdmarc_shift)
176 ; /* nothing */
177 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
178 priv->qp_table.rdmarc_base = (u32) profile[i].start;
179 init_hca->rdmarc_base = profile[i].start;
180 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
181 break;
182 case MLX4_RES_ALTC:
183 init_hca->altc_base = profile[i].start;
184 break;
185 case MLX4_RES_AUXC:
186 init_hca->auxc_base = profile[i].start;
187 break;
188 case MLX4_RES_SRQ:
189 dev->caps.num_srqs = profile[i].num;
190 init_hca->srqc_base = profile[i].start;
191 init_hca->log_num_srqs = profile[i].log_num;
192 break;
193 case MLX4_RES_CQ:
194 dev->caps.num_cqs = profile[i].num;
195 init_hca->cqc_base = profile[i].start;
196 init_hca->log_num_cqs = profile[i].log_num;
197 break;
198 case MLX4_RES_EQ:
199 dev->caps.num_eqs = profile[i].num;
200 init_hca->eqc_base = profile[i].start;
201 init_hca->log_num_eqs = profile[i].log_num;
202 break;
203 case MLX4_RES_DMPT:
204 dev->caps.num_mpts = profile[i].num;
205 priv->mr_table.mpt_base = profile[i].start;
206 init_hca->dmpt_base = profile[i].start;
207 init_hca->log_mpt_sz = profile[i].log_num;
208 break;
209 case MLX4_RES_CMPT:
210 init_hca->cmpt_base = profile[i].start;
211 break;
212 case MLX4_RES_MTT:
213 dev->caps.num_mtt_segs = profile[i].num;
214 priv->mr_table.mtt_base = profile[i].start;
215 init_hca->mtt_base = profile[i].start;
216 break;
217 case MLX4_RES_MCG:
218 dev->caps.num_mgms = profile[i].num >> 1;
219 dev->caps.num_amgms = profile[i].num >> 1;
220 init_hca->mc_base = profile[i].start;
221 init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
222 init_hca->log_mc_table_sz = profile[i].log_num;
223 init_hca->log_mc_hash_sz = profile[i].log_num - 1;
224 break;
225 default:
226 break;
227 }
228 }
229
230 /*
231 * PDs don't take any HCA memory, but we assign them as part
232 * of the HCA profile anyway.
233 */
234 dev->caps.num_pds = MLX4_NUM_PDS;
235
236 kfree(profile);
237 return total_size;
238}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
new file mode 100644
index 000000000000..7f8b7d55b6e1
--- /dev/null
+++ b/drivers/net/mlx4/qp.c
@@ -0,0 +1,280 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/init.h>
37
38#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/qp.h>
40
41#include "mlx4.h"
42#include "icm.h"
43
44void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
45{
46 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
47 struct mlx4_qp *qp;
48
49 spin_lock(&qp_table->lock);
50
51 qp = __mlx4_qp_lookup(dev, qpn);
52 if (qp)
53 atomic_inc(&qp->refcount);
54
55 spin_unlock(&qp_table->lock);
56
57 if (!qp) {
58 mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
59 return;
60 }
61
62 qp->event(qp, event_type);
63
64 if (atomic_dec_and_test(&qp->refcount))
65 complete(&qp->free);
66}
67
68int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
69 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
70 struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
71 int sqd_event, struct mlx4_qp *qp)
72{
73 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
74 [MLX4_QP_STATE_RST] = {
75 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
76 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
77 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
78 },
79 [MLX4_QP_STATE_INIT] = {
80 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
81 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
82 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
83 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
84 },
85 [MLX4_QP_STATE_RTR] = {
86 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
87 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
88 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
89 },
90 [MLX4_QP_STATE_RTS] = {
91 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
92 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
93 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
94 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
95 },
96 [MLX4_QP_STATE_SQD] = {
97 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
98 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
99 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
100 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
101 },
102 [MLX4_QP_STATE_SQER] = {
103 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
104 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
105 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
106 },
107 [MLX4_QP_STATE_ERR] = {
108 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
109 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
110 }
111 };
112
113 struct mlx4_cmd_mailbox *mailbox;
114 int ret = 0;
115
116 if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
117 new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
118 !op[cur_state][new_state])
119 return -EINVAL;
120
121 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
122 return mlx4_cmd(dev, 0, qp->qpn, 2,
123 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
124
125 mailbox = mlx4_alloc_cmd_mailbox(dev);
126 if (IS_ERR(mailbox))
127 return PTR_ERR(mailbox);
128
129 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
130 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
131 context->mtt_base_addr_h = mtt_addr >> 32;
132 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
133 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
134 }
135
136 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
137 memcpy(mailbox->buf + 8, context, sizeof *context);
138
139 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
140 cpu_to_be32(qp->qpn);
141
142 ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
143 new_state == MLX4_QP_STATE_RST ? 2 : 0,
144 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
145
146 mlx4_free_cmd_mailbox(dev, mailbox);
147 return ret;
148}
149EXPORT_SYMBOL_GPL(mlx4_qp_modify);
150
151int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
152{
153 struct mlx4_priv *priv = mlx4_priv(dev);
154 struct mlx4_qp_table *qp_table = &priv->qp_table;
155 int err;
156
157 if (sqpn)
158 qp->qpn = sqpn;
159 else {
160 qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
161 if (qp->qpn == -1)
162 return -ENOMEM;
163 }
164
165 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
166 if (err)
167 goto err_out;
168
169 err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
170 if (err)
171 goto err_put_qp;
172
173 err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
174 if (err)
175 goto err_put_auxc;
176
177 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
178 if (err)
179 goto err_put_altc;
180
181 err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
182 if (err)
183 goto err_put_rdmarc;
184
185 spin_lock_irq(&qp_table->lock);
186 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
187 spin_unlock_irq(&qp_table->lock);
188 if (err)
189 goto err_put_cmpt;
190
191 atomic_set(&qp->refcount, 1);
192 init_completion(&qp->free);
193
194 return 0;
195
196err_put_cmpt:
197 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
198
199err_put_rdmarc:
200 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
201
202err_put_altc:
203 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
204
205err_put_auxc:
206 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
207
208err_put_qp:
209 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
210
211err_out:
212 if (!sqpn)
213 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
214
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
218
219void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
220{
221 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
222 unsigned long flags;
223
224 spin_lock_irqsave(&qp_table->lock, flags);
225 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
226 spin_unlock_irqrestore(&qp_table->lock, flags);
227}
228EXPORT_SYMBOL_GPL(mlx4_qp_remove);
229
230void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
231{
232 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
233
234 if (atomic_dec_and_test(&qp->refcount))
235 complete(&qp->free);
236 wait_for_completion(&qp->free);
237
238 mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
239 mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
240 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
242 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
243
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245}
246EXPORT_SYMBOL_GPL(mlx4_qp_free);
247
248static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
249{
250 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
251 MLX4_CMD_TIME_CLASS_B);
252}
253
254int __devinit mlx4_init_qp_table(struct mlx4_dev *dev)
255{
256 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
257 int err;
258
259 spin_lock_init(&qp_table->lock);
260 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
261
262 /*
263 * We reserve 2 extra QPs per port for the special QPs. The
264 * block of special QPs must be aligned to a multiple of 8, so
265 * round up.
266 */
267 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
268 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
269 (1 << 24) - 1, dev->caps.sqp_start + 8);
270 if (err)
271 return err;
272
273 return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
274}
275
276void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
277{
278 mlx4_CONF_SPECIAL_QP(dev, 0);
279 mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
280}
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
new file mode 100644
index 000000000000..51eef8492e93
--- /dev/null
+++ b/drivers/net/mlx4/reset.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/init.h>
34#include <linux/errno.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/slab.h>
38
39#include "mlx4.h"
40
41int mlx4_reset(struct mlx4_dev *dev)
42{
43 void __iomem *reset;
44 u32 *hca_header = NULL;
45 int pcie_cap;
46 u16 devctl;
47 u16 linkctl;
48 u16 vendor;
49 unsigned long end;
50 u32 sem;
51 int i;
52 int err = 0;
53
54#define MLX4_RESET_BASE 0xf0000
55#define MLX4_RESET_SIZE 0x400
56#define MLX4_SEM_OFFSET 0x3fc
57#define MLX4_RESET_OFFSET 0x10
58#define MLX4_RESET_VALUE swab32(1)
59
60#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
61#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
62
63 /*
64 * Reset the chip. This is somewhat ugly because we have to
65 * save off the PCI header before reset and then restore it
66 * after the chip reboots. We skip config space offsets 22
67 * and 23 since those have a special meaning.
68 */
69
70 /* Do we need to save off the full 4K PCI Express header?? */
71 hca_header = kmalloc(256, GFP_KERNEL);
72 if (!hca_header) {
73 err = -ENOMEM;
74 mlx4_err(dev, "Couldn't allocate memory to save HCA "
75 "PCI header, aborting.\n");
76 goto out;
77 }
78
79 pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
80
81 for (i = 0; i < 64; ++i) {
82 if (i == 22 || i == 23)
83 continue;
84 if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
85 err = -ENODEV;
86 mlx4_err(dev, "Couldn't save HCA "
87 "PCI header, aborting.\n");
88 goto out;
89 }
90 }
91
92 reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
93 MLX4_RESET_SIZE);
94 if (!reset) {
95 err = -ENOMEM;
96 mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
97 goto out;
98 }
99
100 /* grab HW semaphore to lock out flash updates */
101 end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
102 do {
103 sem = readl(reset + MLX4_SEM_OFFSET);
104 if (!sem)
105 break;
106
107 msleep(1);
108 } while (time_before(jiffies, end));
109
110 if (sem) {
111 mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
112 err = -EAGAIN;
113 iounmap(reset);
114 goto out;
115 }
116
117 /* actually hit reset */
118 writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
119 iounmap(reset);
120
121 end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
122 do {
123 if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
124 vendor != 0xffff)
125 break;
126
127 msleep(1);
128 } while (time_before(jiffies, end));
129
130 if (vendor == 0xffff) {
131 err = -ENODEV;
132 mlx4_err(dev, "PCI device did not come back after reset, "
133 "aborting.\n");
134 goto out;
135 }
136
137 /* Now restore the PCI headers */
138 if (pcie_cap) {
139 devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
140 if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
141 devctl)) {
142 err = -ENODEV;
143 mlx4_err(dev, "Couldn't restore HCA PCI Express "
144 "Device Control register, aborting.\n");
145 goto out;
146 }
147 linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
148 if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
149 linkctl)) {
150 err = -ENODEV;
151 mlx4_err(dev, "Couldn't restore HCA PCI Express "
152 "Link control register, aborting.\n");
153 goto out;
154 }
155 }
156
157 for (i = 0; i < 16; ++i) {
158 if (i * 4 == PCI_COMMAND)
159 continue;
160
161 if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
162 err = -ENODEV;
163 mlx4_err(dev, "Couldn't restore HCA reg %x, "
164 "aborting.\n", i);
165 goto out;
166 }
167 }
168
169 if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
170 hca_header[PCI_COMMAND / 4])) {
171 err = -ENODEV;
172 mlx4_err(dev, "Couldn't restore HCA COMMAND, "
173 "aborting.\n");
174 goto out;
175 }
176
177out:
178 kfree(hca_header);
179
180 return err;
181}
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
new file mode 100644
index 000000000000..2134f83aed87
--- /dev/null
+++ b/drivers/net/mlx4/srq.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/init.h>
34
35#include <linux/mlx4/cmd.h>
36
37#include "mlx4.h"
38#include "icm.h"
39
40struct mlx4_srq_context {
41 __be32 state_logsize_srqn;
42 u8 logstride;
43 u8 reserved1[3];
44 u8 pg_offset;
45 u8 reserved2[3];
46 u32 reserved3;
47 u8 log_page_size;
48 u8 reserved4[2];
49 u8 mtt_base_addr_h;
50 __be32 mtt_base_addr_l;
51 __be32 pd;
52 __be16 limit_watermark;
53 __be16 wqe_cnt;
54 u16 reserved5;
55 __be16 wqe_counter;
56 u32 reserved6;
57 __be64 db_rec_addr;
58};
59
60void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
61{
62 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
63 struct mlx4_srq *srq;
64
65 spin_lock(&srq_table->lock);
66
67 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
68 if (srq)
69 atomic_inc(&srq->refcount);
70
71 spin_unlock(&srq_table->lock);
72
73 if (!srq) {
74 mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
75 return;
76 }
77
78 srq->event(srq, event_type);
79
80 if (atomic_dec_and_test(&srq->refcount))
81 complete(&srq->free);
82}
83
84static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
85 int srq_num)
86{
87 return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
88 MLX4_CMD_TIME_CLASS_A);
89}
90
91static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
92 int srq_num)
93{
94 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
95 mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
96 MLX4_CMD_TIME_CLASS_A);
97}
98
99static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
100{
101 return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
102 MLX4_CMD_TIME_CLASS_B);
103}
104
105int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
106 u64 db_rec, struct mlx4_srq *srq)
107{
108 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_srq_context *srq_context;
111 u64 mtt_addr;
112 int err;
113
114 srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
115 if (srq->srqn == -1)
116 return -ENOMEM;
117
118 err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
119 if (err)
120 goto err_out;
121
122 err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
123 if (err)
124 goto err_put;
125
126 spin_lock_irq(&srq_table->lock);
127 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
128 spin_unlock_irq(&srq_table->lock);
129 if (err)
130 goto err_cmpt_put;
131
132 mailbox = mlx4_alloc_cmd_mailbox(dev);
133 if (IS_ERR(mailbox)) {
134 err = PTR_ERR(mailbox);
135 goto err_radix;
136 }
137
138 srq_context = mailbox->buf;
139 memset(srq_context, 0, sizeof *srq_context);
140
141 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
142 srq->srqn);
143 srq_context->logstride = srq->wqe_shift - 4;
144 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
145
146 mtt_addr = mlx4_mtt_addr(dev, mtt);
147 srq_context->mtt_base_addr_h = mtt_addr >> 32;
148 srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
149 srq_context->pd = cpu_to_be32(pdn);
150 srq_context->db_rec_addr = cpu_to_be64(db_rec);
151
152 err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
153 mlx4_free_cmd_mailbox(dev, mailbox);
154 if (err)
155 goto err_radix;
156
157 atomic_set(&srq->refcount, 1);
158 init_completion(&srq->free);
159
160 return 0;
161
162err_radix:
163 spin_lock_irq(&srq_table->lock);
164 radix_tree_delete(&srq_table->tree, srq->srqn);
165 spin_unlock_irq(&srq_table->lock);
166
167err_cmpt_put:
168 mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
169
170err_put:
171 mlx4_table_put(dev, &srq_table->table, srq->srqn);
172
173err_out:
174 mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
175
176 return err;
177}
178EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
179
180void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
181{
182 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
183 int err;
184
185 err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
186 if (err)
187 mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
188
189 spin_lock_irq(&srq_table->lock);
190 radix_tree_delete(&srq_table->tree, srq->srqn);
191 spin_unlock_irq(&srq_table->lock);
192
193 if (atomic_dec_and_test(&srq->refcount))
194 complete(&srq->free);
195 wait_for_completion(&srq->free);
196
197 mlx4_table_put(dev, &srq_table->table, srq->srqn);
198 mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
199}
200EXPORT_SYMBOL_GPL(mlx4_srq_free);
201
202int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
203{
204 return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
205}
206EXPORT_SYMBOL_GPL(mlx4_srq_arm);
207
208int __devinit mlx4_init_srq_table(struct mlx4_dev *dev)
209{
210 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
211 int err;
212
213 spin_lock_init(&srq_table->lock);
214 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
215
216 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
217 dev->caps.num_srqs - 1, dev->caps.reserved_srqs);
218 if (err)
219 return err;
220
221 return 0;
222}
223
224void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
225{
226 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
227}
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 223e0e6264ba..4cf0d3fcb519 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -131,7 +131,6 @@ static const char version[] __devinitdata =
131 KERN_INFO DRV_NAME " dp8381x driver, version " 131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n" 132 DRV_VERSION ", " DRV_RELDATE "\n"
133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n" 133 KERN_INFO " originally by Donald Becker <becker@scyld.com>\n"
134 KERN_INFO " http://www.scyld.com/network/natsemi.html\n"
135 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"; 134 KERN_INFO " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
136 135
137MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 136MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 589785d1e762..995c0a5d4066 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -63,8 +63,7 @@ static int options[MAX_UNITS];
63 63
64/* These identify the driver base version and may not be removed. */ 64/* These identify the driver base version and may not be removed. */
65static char version[] __devinitdata = 65static char version[] __devinitdata =
66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n" 66KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " D. Becker/P. Gortmaker\n";
67KERN_INFO " http://www.scyld.com/network/ne2k-pci.html\n";
68 67
69#if defined(__powerpc__) 68#if defined(__powerpc__)
70#define inl_le(addr) le32_to_cpu(inl(addr)) 69#define inl_le(addr) le32_to_cpu(inl(addr))
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 1060154ae750..4ecb8ca5a992 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -189,16 +189,20 @@ static void ibmtr_detach(struct pcmcia_device *link)
189{ 189{
190 struct ibmtr_dev_t *info = link->priv; 190 struct ibmtr_dev_t *info = link->priv;
191 struct net_device *dev = info->dev; 191 struct net_device *dev = info->dev;
192 struct tok_info *ti = netdev_priv(dev);
192 193
193 DEBUG(0, "ibmtr_detach(0x%p)\n", link); 194 DEBUG(0, "ibmtr_detach(0x%p)\n", link);
195
196 /*
197 * When the card removal interrupt hits tok_interrupt(),
198 * bail out early, so we don't crash the machine
199 */
200 ti->sram_phys |= 1;
194 201
195 if (link->dev_node) 202 if (link->dev_node)
196 unregister_netdev(dev); 203 unregister_netdev(dev);
197 204
198 { 205 del_timer_sync(&(ti->tr_timer));
199 struct tok_info *ti = netdev_priv(dev);
200 del_timer_sync(&(ti->tr_timer));
201 }
202 206
203 ibmtr_release(link); 207 ibmtr_release(link);
204 208
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f994f129f3d8..c0d3101eb6a0 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "PHY device support" 5menu "PHY device support"
6 depends on !S390
6 7
7config PHYLIB 8config PHYLIB
8 tristate "PHY Device support and infrastructure" 9 tristate "PHY Device support and infrastructure"
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index eed433d6056a..f71dab347667 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -662,10 +662,10 @@ int phy_stop_interrupts(struct phy_device *phydev)
662 phy_error(phydev); 662 phy_error(phydev);
663 663
664 /* 664 /*
665 * Finish any pending work; we might have been scheduled 665 * Finish any pending work; we might have been scheduled to be called
666 * to be called from keventd ourselves, though. 666 * from keventd ourselves, but cancel_work_sync() handles that.
667 */ 667 */
668 run_scheduled_work(&phydev->phy_queue); 668 cancel_work_sync(&phydev->phy_queue);
669 669
670 free_irq(phydev->irq, phydev); 670 free_irq(phydev->irq, phydev);
671 671
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b07da1054add..e0489578945d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3594,7 +3594,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3594 skge->duplex = -1; 3594 skge->duplex = -1;
3595 skge->speed = -1; 3595 skge->speed = -1;
3596 skge->advertising = skge_supported_modes(hw); 3596 skge->advertising = skge_supported_modes(hw);
3597 skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0; 3597
3598 if (pci_wake_enabled(hw->pdev))
3599 skge->wol = wol_supported(hw) & WAKE_MAGIC;
3598 3600
3599 hw->dev[port] = dev; 3601 hw->dev[port] = dev;
3600 3602
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index f51ba31970aa..e1f912d04043 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -110,8 +110,7 @@ static char *media[MAX_UNITS];
110 110
111/* These identify the driver base version and may not be removed. */ 111/* These identify the driver base version and may not be removed. */
112static char version[] = 112static char version[] =
113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
115 114
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); 116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index e5e901ecd808..923b9c725cc3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3716,10 +3716,8 @@ static void tg3_reset_task(struct work_struct *work)
3716 unsigned int restart_timer; 3716 unsigned int restart_timer;
3717 3717
3718 tg3_full_lock(tp, 0); 3718 tg3_full_lock(tp, 0);
3719 tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3720 3719
3721 if (!netif_running(tp->dev)) { 3720 if (!netif_running(tp->dev)) {
3722 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3723 tg3_full_unlock(tp); 3721 tg3_full_unlock(tp);
3724 return; 3722 return;
3725 } 3723 }
@@ -3750,8 +3748,6 @@ static void tg3_reset_task(struct work_struct *work)
3750 mod_timer(&tp->timer, jiffies + 1); 3748 mod_timer(&tp->timer, jiffies + 1);
3751 3749
3752out: 3750out:
3753 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3754
3755 tg3_full_unlock(tp); 3751 tg3_full_unlock(tp);
3756} 3752}
3757 3753
@@ -7390,12 +7386,7 @@ static int tg3_close(struct net_device *dev)
7390{ 7386{
7391 struct tg3 *tp = netdev_priv(dev); 7387 struct tg3 *tp = netdev_priv(dev);
7392 7388
7393 /* Calling flush_scheduled_work() may deadlock because 7389 cancel_work_sync(&tp->reset_task);
7394 * linkwatch_event() may be on the workqueue and it will try to get
7395 * the rtnl_lock which we are holding.
7396 */
7397 while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7398 msleep(1);
7399 7390
7400 netif_stop_queue(dev); 7391 netif_stop_queue(dev);
7401 7392
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4d334cf5a243..bd9f4f428e5b 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2228,7 +2228,7 @@ struct tg3 {
2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000 2228#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
2229#define TG3_FLAG_10_100_ONLY 0x01000000 2229#define TG3_FLAG_10_100_ONLY 0x01000000
2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000 2230#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
2231#define TG3_FLAG_IN_RESET_TASK 0x04000000 2231
2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000 2232#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000 2233#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
2234#define TG3_FLAG_SUPPORT_MSI 0x20000000 2234#define TG3_FLAG_SUPPORT_MSI 0x20000000
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 9b08afbd1f65..ea896777bcaf 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -269,7 +269,7 @@ done:
269 This would turn on IM for devices that is not contributing 269 This would turn on IM for devices that is not contributing
270 to backlog congestion with unnecessary latency. 270 to backlog congestion with unnecessary latency.
271 271
272 We monitor the the device RX-ring and have: 272 We monitor the device RX-ring and have:
273 273
274 HW Interrupt Mitigation either ON or OFF. 274 HW Interrupt Mitigation either ON or OFF.
275 275
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index fa440706fb4a..38f3b99716b8 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1021,7 +1021,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1021 np->tx_ring[entry].length |= DescEndRing; 1021 np->tx_ring[entry].length |= DescEndRing;
1022 1022
1023 /* Now acquire the irq spinlock. 1023 /* Now acquire the irq spinlock.
1024 * The difficult race is the the ordering between 1024 * The difficult race is the ordering between
1025 * increasing np->cur_tx and setting DescOwned: 1025 * increasing np->cur_tx and setting DescOwned:
1026 * - if np->cur_tx is increased first the interrupt 1026 * - if np->cur_tx is increased first the interrupt
1027 * handler could consider the packet as transmitted 1027 * handler could consider the packet as transmitted
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 985a1810ca59..2470b1ee33c0 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1043,7 +1043,7 @@ static int enable_promisc(struct xircom_private *card)
1043 1043
1044 1044
1045/* 1045/*
1046link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. 1046link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
1047 1047
1048Must be called in locked state with interrupts disabled 1048Must be called in locked state with interrupts disabled
1049*/ 1049*/
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index f2dd7763cd0b..f72573594121 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -639,7 +639,7 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
639 639
640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); 640 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641 641
642 /* "I feel a presence... another warrior is on the the mesa." 642 /* "I feel a presence... another warrior is on the mesa."
643 */ 643 */
644 wmb(); 644 wmb();
645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); 645 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
diff --git a/drivers/usb/net/Kconfig b/drivers/net/usb/Kconfig
index 3de564b23147..3de564b23147 100644
--- a/drivers/usb/net/Kconfig
+++ b/drivers/net/usb/Kconfig
diff --git a/drivers/usb/net/Makefile b/drivers/net/usb/Makefile
index 595a539f8384..595a539f8384 100644
--- a/drivers/usb/net/Makefile
+++ b/drivers/net/usb/Makefile
diff --git a/drivers/usb/net/asix.c b/drivers/net/usb/asix.c
index d5ef97bc4d01..d5ef97bc4d01 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/net/usb/asix.c
diff --git a/drivers/usb/net/catc.c b/drivers/net/usb/catc.c
index 86e90c59d551..86e90c59d551 100644
--- a/drivers/usb/net/catc.c
+++ b/drivers/net/usb/catc.c
diff --git a/drivers/usb/net/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5a21f06bf8a5..5a21f06bf8a5 100644
--- a/drivers/usb/net/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
diff --git a/drivers/usb/net/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index bc62b012602b..bc62b012602b 100644
--- a/drivers/usb/net/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
diff --git a/drivers/usb/net/dm9601.c b/drivers/net/usb/dm9601.c
index a67638601477..a67638601477 100644
--- a/drivers/usb/net/dm9601.c
+++ b/drivers/net/usb/dm9601.c
diff --git a/drivers/usb/net/gl620a.c b/drivers/net/usb/gl620a.c
index 031cf5ca4dbb..031cf5ca4dbb 100644
--- a/drivers/usb/net/gl620a.c
+++ b/drivers/net/usb/gl620a.c
diff --git a/drivers/usb/net/kaweth.c b/drivers/net/usb/kaweth.c
index 60d29440f316..60d29440f316 100644
--- a/drivers/usb/net/kaweth.c
+++ b/drivers/net/usb/kaweth.c
diff --git a/drivers/usb/net/kawethfw.h b/drivers/net/usb/kawethfw.h
index cf85fcb0d1a6..cf85fcb0d1a6 100644
--- a/drivers/usb/net/kawethfw.h
+++ b/drivers/net/usb/kawethfw.h
diff --git a/drivers/usb/net/mcs7830.c b/drivers/net/usb/mcs7830.c
index 6240b978fe3d..6240b978fe3d 100644
--- a/drivers/usb/net/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
diff --git a/drivers/usb/net/net1080.c b/drivers/net/usb/net1080.c
index 19bf8dae70c9..19bf8dae70c9 100644
--- a/drivers/usb/net/net1080.c
+++ b/drivers/net/usb/net1080.c
diff --git a/drivers/usb/net/pegasus.c b/drivers/net/usb/pegasus.c
index a05fd97e5bc2..a05fd97e5bc2 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/net/usb/pegasus.c
diff --git a/drivers/usb/net/pegasus.h b/drivers/net/usb/pegasus.h
index c7467823cd1c..c7467823cd1c 100644
--- a/drivers/usb/net/pegasus.h
+++ b/drivers/net/usb/pegasus.h
diff --git a/drivers/usb/net/plusb.c b/drivers/net/usb/plusb.c
index 45300939d185..45300939d185 100644
--- a/drivers/usb/net/plusb.c
+++ b/drivers/net/usb/plusb.c
diff --git a/drivers/usb/net/rndis_host.c b/drivers/net/usb/rndis_host.c
index 980e4aaa97aa..980e4aaa97aa 100644
--- a/drivers/usb/net/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
diff --git a/drivers/usb/net/rtl8150.c b/drivers/net/usb/rtl8150.c
index fa598f0340cf..fa598f0340cf 100644
--- a/drivers/usb/net/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
diff --git a/drivers/usb/net/usbnet.c b/drivers/net/usb/usbnet.c
index f9cd42d058b0..f9cd42d058b0 100644
--- a/drivers/usb/net/usbnet.c
+++ b/drivers/net/usb/usbnet.c
diff --git a/drivers/usb/net/usbnet.h b/drivers/net/usb/usbnet.h
index cbb53e065d6c..82db5a8e528e 100644
--- a/drivers/usb/net/usbnet.h
+++ b/drivers/net/usb/usbnet.h
@@ -129,7 +129,7 @@ extern void usbnet_disconnect(struct usb_interface *);
129 129
130 130
131/* Drivers that reuse some of the standard USB CDC infrastructure 131/* Drivers that reuse some of the standard USB CDC infrastructure
132 * (notably, using multiple interfaces according to the the CDC 132 * (notably, using multiple interfaces according to the CDC
133 * union descriptor) get some helper code. 133 * union descriptor) get some helper code.
134 */ 134 */
135struct cdc_state { 135struct cdc_state {
diff --git a/drivers/usb/net/zaurus.c b/drivers/net/usb/zaurus.c
index 9f98e8ce487a..9f98e8ce487a 100644
--- a/drivers/usb/net/zaurus.c
+++ b/drivers/net/usb/zaurus.c
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e273347dc606..e3f5bb0fe603 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Wireless LAN" 5menu "Wireless LAN"
6 depends on !S390
6 7
7config WLAN_PRE80211 8config WLAN_PRE80211
8 bool "Wireless LAN (pre-802.11)" 9 bool "Wireless LAN (pre-802.11)"
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 38fac3bbcd82..7d5b8c2cc614 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -149,7 +149,7 @@ static int airport_hard_reset(struct orinoco_private *priv)
149 /* Vitally important. If we don't do this it seems we get an 149 /* Vitally important. If we don't do this it seems we get an
150 * interrupt somewhere during the power cycle, since 150 * interrupt somewhere during the power cycle, since
151 * hw_unavailable is already set it doesn't get ACKed, we get 151 * hw_unavailable is already set it doesn't get ACKed, we get
152 * into an interrupt loop and the the PMU decides to turn us 152 * into an interrupt loop and the PMU decides to turn us
153 * off. */ 153 * off. */
154 disable_irq(dev->irq); 154 disable_irq(dev->irq);
155 155
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index f8483c179e4c..10e07e865426 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -658,12 +658,6 @@ struct bcm43xx_pio {
658 658
659#define BCM43xx_MAX_80211_CORES 2 659#define BCM43xx_MAX_80211_CORES 2
660 660
661#ifdef CONFIG_BCM947XX
662#define core_offset(bcm) (bcm)->current_core_offset
663#else
664#define core_offset(bcm) 0
665#endif
666
667/* Generic information about a core. */ 661/* Generic information about a core. */
668struct bcm43xx_coreinfo { 662struct bcm43xx_coreinfo {
669 u8 available:1, 663 u8 available:1,
@@ -789,10 +783,6 @@ struct bcm43xx_private {
789 783
790 /* The currently active core. */ 784 /* The currently active core. */
791 struct bcm43xx_coreinfo *current_core; 785 struct bcm43xx_coreinfo *current_core;
792#ifdef CONFIG_BCM947XX
793 /** current core memory offset */
794 u32 current_core_offset;
795#endif
796 struct bcm43xx_coreinfo *active_80211_core; 786 struct bcm43xx_coreinfo *active_80211_core;
797 /* coreinfo structs for all possible cores follow. 787 /* coreinfo structs for all possible cores follow.
798 * Note that a core might not exist. 788 * Note that a core might not exist.
@@ -943,25 +933,25 @@ struct bcm43xx_lopair * bcm43xx_get_lopair(struct bcm43xx_phyinfo *phy,
943static inline 933static inline
944u16 bcm43xx_read16(struct bcm43xx_private *bcm, u16 offset) 934u16 bcm43xx_read16(struct bcm43xx_private *bcm, u16 offset)
945{ 935{
946 return ioread16(bcm->mmio_addr + core_offset(bcm) + offset); 936 return ioread16(bcm->mmio_addr + offset);
947} 937}
948 938
949static inline 939static inline
950void bcm43xx_write16(struct bcm43xx_private *bcm, u16 offset, u16 value) 940void bcm43xx_write16(struct bcm43xx_private *bcm, u16 offset, u16 value)
951{ 941{
952 iowrite16(value, bcm->mmio_addr + core_offset(bcm) + offset); 942 iowrite16(value, bcm->mmio_addr + offset);
953} 943}
954 944
955static inline 945static inline
956u32 bcm43xx_read32(struct bcm43xx_private *bcm, u16 offset) 946u32 bcm43xx_read32(struct bcm43xx_private *bcm, u16 offset)
957{ 947{
958 return ioread32(bcm->mmio_addr + core_offset(bcm) + offset); 948 return ioread32(bcm->mmio_addr + offset);
959} 949}
960 950
961static inline 951static inline
962void bcm43xx_write32(struct bcm43xx_private *bcm, u16 offset, u32 value) 952void bcm43xx_write32(struct bcm43xx_private *bcm, u16 offset, u32 value)
963{ 953{
964 iowrite32(value, bcm->mmio_addr + core_offset(bcm) + offset); 954 iowrite32(value, bcm->mmio_addr + offset);
965} 955}
966 956
967static inline 957static inline
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index e3d2e61a31ee..1f7731fcfbd5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -660,10 +660,6 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
660 ring->routing = BCM43xx_DMA32_CLIENTTRANS; 660 ring->routing = BCM43xx_DMA32_CLIENTTRANS;
661 if (dma64) 661 if (dma64)
662 ring->routing = BCM43xx_DMA64_CLIENTTRANS; 662 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
663#ifdef CONFIG_BCM947XX
664 if (bcm->pci_dev->bus->number == 0)
665 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
666#endif
667 663
668 ring->bcm = bcm; 664 ring->bcm = bcm;
669 ring->nr_slots = nr_slots; 665 ring->nr_slots = nr_slots;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 5e96bca6730a..ef6b253a92ce 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -61,10 +61,6 @@ MODULE_AUTHOR("Stefano Brivio");
61MODULE_AUTHOR("Michael Buesch"); 61MODULE_AUTHOR("Michael Buesch");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
64#ifdef CONFIG_BCM947XX
65extern char *nvram_get(char *name);
66#endif
67
68#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO) 64#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO)
69static int modparam_pio; 65static int modparam_pio;
70module_param_named(pio, modparam_pio, int, 0444); 66module_param_named(pio, modparam_pio, int, 0444);
@@ -142,10 +138,6 @@ MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for using multiple fi
142 { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 138 { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
143 /* Broadcom 43XG 802.11b/g */ 139 /* Broadcom 43XG 802.11b/g */
144 { PCI_VENDOR_ID_BROADCOM, 0x4325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 140 { PCI_VENDOR_ID_BROADCOM, 0x4325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
145#ifdef CONFIG_BCM947XX
146 /* SB bus on BCM947xx */
147 { PCI_VENDOR_ID_BROADCOM, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
148#endif
149 { 0 }, 141 { 0 },
150}; 142};
151MODULE_DEVICE_TABLE(pci, bcm43xx_pci_tbl); 143MODULE_DEVICE_TABLE(pci, bcm43xx_pci_tbl);
@@ -786,9 +778,6 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
786{ 778{
787 u16 value; 779 u16 value;
788 u16 *sprom; 780 u16 *sprom;
789#ifdef CONFIG_BCM947XX
790 char *c;
791#endif
792 781
793 sprom = kzalloc(BCM43xx_SPROM_SIZE * sizeof(u16), 782 sprom = kzalloc(BCM43xx_SPROM_SIZE * sizeof(u16),
794 GFP_KERNEL); 783 GFP_KERNEL);
@@ -796,28 +785,7 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
796 printk(KERN_ERR PFX "sprom_extract OOM\n"); 785 printk(KERN_ERR PFX "sprom_extract OOM\n");
797 return -ENOMEM; 786 return -ENOMEM;
798 } 787 }
799#ifdef CONFIG_BCM947XX
800 sprom[BCM43xx_SPROM_BOARDFLAGS2] = atoi(nvram_get("boardflags2"));
801 sprom[BCM43xx_SPROM_BOARDFLAGS] = atoi(nvram_get("boardflags"));
802
803 if ((c = nvram_get("il0macaddr")) != NULL)
804 e_aton(c, (char *) &(sprom[BCM43xx_SPROM_IL0MACADDR]));
805
806 if ((c = nvram_get("et1macaddr")) != NULL)
807 e_aton(c, (char *) &(sprom[BCM43xx_SPROM_ET1MACADDR]));
808
809 sprom[BCM43xx_SPROM_PA0B0] = atoi(nvram_get("pa0b0"));
810 sprom[BCM43xx_SPROM_PA0B1] = atoi(nvram_get("pa0b1"));
811 sprom[BCM43xx_SPROM_PA0B2] = atoi(nvram_get("pa0b2"));
812
813 sprom[BCM43xx_SPROM_PA1B0] = atoi(nvram_get("pa1b0"));
814 sprom[BCM43xx_SPROM_PA1B1] = atoi(nvram_get("pa1b1"));
815 sprom[BCM43xx_SPROM_PA1B2] = atoi(nvram_get("pa1b2"));
816
817 sprom[BCM43xx_SPROM_BOARDREV] = atoi(nvram_get("boardrev"));
818#else
819 bcm43xx_sprom_read(bcm, sprom); 788 bcm43xx_sprom_read(bcm, sprom);
820#endif
821 789
822 /* boardflags2 */ 790 /* boardflags2 */
823 value = sprom[BCM43xx_SPROM_BOARDFLAGS2]; 791 value = sprom[BCM43xx_SPROM_BOARDFLAGS2];
@@ -1225,12 +1193,6 @@ static int _switch_core(struct bcm43xx_private *bcm, int core)
1225 goto error; 1193 goto error;
1226 udelay(10); 1194 udelay(10);
1227 } 1195 }
1228#ifdef CONFIG_BCM947XX
1229 if (bcm->pci_dev->bus->number == 0)
1230 bcm->current_core_offset = 0x1000 * core;
1231 else
1232 bcm->current_core_offset = 0;
1233#endif
1234 1196
1235 return 0; 1197 return 0;
1236error: 1198error:
@@ -1387,19 +1349,6 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1387 1349
1388 if ((bcm43xx_core_enabled(bcm)) && 1350 if ((bcm43xx_core_enabled(bcm)) &&
1389 !bcm43xx_using_pio(bcm)) { 1351 !bcm43xx_using_pio(bcm)) {
1390//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
1391#if 0
1392#ifndef CONFIG_BCM947XX
1393 /* reset all used DMA controllers. */
1394 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
1395 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA2_BASE);
1396 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA3_BASE);
1397 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
1398 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
1399 if (bcm->current_core->rev < 5)
1400 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
1401#endif
1402#endif
1403 } 1352 }
1404 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { 1353 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
1405 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 1354 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
@@ -2140,32 +2089,11 @@ out:
2140 return err; 2089 return err;
2141} 2090}
2142 2091
2143#ifdef CONFIG_BCM947XX
2144static struct pci_device_id bcm43xx_47xx_ids[] = {
2145 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4324) },
2146 { 0 }
2147};
2148#endif
2149
2150static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm) 2092static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
2151{ 2093{
2152 int err; 2094 int err;
2153 2095
2154 bcm->irq = bcm->pci_dev->irq; 2096 bcm->irq = bcm->pci_dev->irq;
2155#ifdef CONFIG_BCM947XX
2156 if (bcm->pci_dev->bus->number == 0) {
2157 struct pci_dev *d;
2158 struct pci_device_id *id;
2159 for (id = bcm43xx_47xx_ids; id->vendor; id++) {
2160 d = pci_get_device(id->vendor, id->device, NULL);
2161 if (d != NULL) {
2162 bcm->irq = d->irq;
2163 pci_dev_put(d);
2164 break;
2165 }
2166 }
2167 }
2168#endif
2169 err = request_irq(bcm->irq, bcm43xx_interrupt_handler, 2097 err = request_irq(bcm->irq, bcm43xx_interrupt_handler,
2170 IRQF_SHARED, KBUILD_MODNAME, bcm); 2098 IRQF_SHARED, KBUILD_MODNAME, bcm);
2171 if (err) 2099 if (err)
@@ -2645,10 +2573,6 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
2645 chip_id_16 = 0x4610; 2573 chip_id_16 = 0x4610;
2646 else if ((pci_device >= 0x4710) && (pci_device <= 0x4715)) 2574 else if ((pci_device >= 0x4710) && (pci_device <= 0x4715))
2647 chip_id_16 = 0x4710; 2575 chip_id_16 = 0x4710;
2648#ifdef CONFIG_BCM947XX
2649 else if ((pci_device >= 0x4320) && (pci_device <= 0x4325))
2650 chip_id_16 = 0x4309;
2651#endif
2652 else { 2576 else {
2653 printk(KERN_ERR PFX "Could not determine Chip ID\n"); 2577 printk(KERN_ERR PFX "Could not determine Chip ID\n");
2654 return -ENODEV; 2578 return -ENODEV;
@@ -4144,11 +4068,6 @@ static int __devinit bcm43xx_init_one(struct pci_dev *pdev,
4144 struct bcm43xx_private *bcm; 4068 struct bcm43xx_private *bcm;
4145 int err; 4069 int err;
4146 4070
4147#ifdef CONFIG_BCM947XX
4148 if ((pdev->bus->number == 0) && (pdev->device != 0x0800))
4149 return -ENODEV;
4150#endif
4151
4152#ifdef DEBUG_SINGLE_DEVICE_ONLY 4071#ifdef DEBUG_SINGLE_DEVICE_ONLY
4153 if (strcmp(pci_name(pdev), DEBUG_SINGLE_DEVICE_ONLY)) 4072 if (strcmp(pci_name(pdev), DEBUG_SINGLE_DEVICE_ONLY))
4154 return -ENODEV; 4073 return -ENODEV;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index f76357178e4d..c8f3c532bab5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -33,25 +33,6 @@
33 33
34#include "bcm43xx.h" 34#include "bcm43xx.h"
35 35
36#ifdef CONFIG_BCM947XX
37#define atoi(str) simple_strtoul(((str != NULL) ? str : ""), NULL, 0)
38
39static inline void e_aton(char *str, char *dest)
40{
41 int i = 0;
42 u16 *d = (u16 *) dest;
43
44 for (;;) {
45 dest[i++] = (char) simple_strtoul(str, NULL, 16);
46 str += 2;
47 if (!*str++ || i == 6)
48 break;
49 }
50 for (i = 0; i < 3; i++)
51 d[i] = cpu_to_be16(d[i]);
52}
53#endif
54
55#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes] 36#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes]
56#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes) 37#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes)
57/* Magic helper macro to pad structures. Ignore those above. It's magic. */ 38/* Magic helper macro to pad structures. Ignore those above. It's magic. */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 841b3c136ad9..283be4a70524 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3054,7 +3054,7 @@ static const iw_handler prism54_handler[] = {
3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */ 3054 (iw_handler) prism54_set_wap, /* SIOCSIWAP */
3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */ 3055 (iw_handler) prism54_get_wap, /* SIOCGIWAP */
3056 (iw_handler) NULL, /* -- hole -- */ 3056 (iw_handler) NULL, /* -- hole -- */
3057 (iw_handler) NULL, /* SIOCGIWAPLIST depreciated */ 3057 (iw_handler) NULL, /* SIOCGIWAPLIST deprecated */
3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */ 3058 (iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */ 3059 (iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */ 3060 (iw_handler) prism54_set_essid, /* SIOCSIWESSID */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index a037b11dac9d..084795355b74 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -115,7 +115,7 @@ isl_upload_firmware(islpci_private *priv)
115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len; 115 ISL38XX_MEMORY_WINDOW_SIZE : fw_len;
116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN; 116 u32 __iomem *dev_fw_ptr = device_base + ISL38XX_DIRECT_MEM_WIN;
117 117
118 /* set the cards base address for writting the data */ 118 /* set the card's base address for writing the data */
119 isl38xx_w32_flush(device_base, reg, 119 isl38xx_w32_flush(device_base, reg,
120 ISL38XX_DIR_MEM_BASE_REG); 120 ISL38XX_DIR_MEM_BASE_REG);
121 wmb(); /* be paranoid */ 121 wmb(); /* be paranoid */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 67b867f837ca..5740d4d4267c 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -176,7 +176,7 @@ psa_write(struct net_device * dev,
176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR + 176 volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
177 (psaoff(0, psa_comp_number) << 1); 177 (psaoff(0, psa_comp_number) << 1);
178 178
179 /* Authorize writting to PSA */ 179 /* Authorize writing to PSA */
180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN); 180 hacr_write(base, HACR_PWR_STAT | HACR_ROM_WEN);
181 181
182 while(n-- > 0) 182 while(n-- > 0)
@@ -1676,7 +1676,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */
1676 fee_write(base, 0x60, 1676 fee_write(base, 0x60,
1677 dac, 2); 1677 dac, 2);
1678 1678
1679 /* We now should verify here that the EEprom writting was ok */ 1679 /* We now should verify here that the EEprom writing was ok */
1680 1680
1681 /* ReRead the first area */ 1681 /* ReRead the first area */
1682 fee_read(base, 0x00, 1682 fee_read(base, 0x00,
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 4d1c4905c749..4b9de0093a7b 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -120,7 +120,7 @@
120 * the Wavelan itself (NCR -> AT&T -> Lucent). 120 * the Wavelan itself (NCR -> AT&T -> Lucent).
121 * 121 *
122 * All started with Anders Klemets <klemets@paul.rutgers.edu>, 122 * All started with Anders Klemets <klemets@paul.rutgers.edu>,
123 * writting a Wavelan ISA driver for the MACH microkernel. Girish 123 * writing a Wavelan ISA driver for the MACH microkernel. Girish
124 * Welling <welling@paul.rutgers.edu> had also worked on it. 124 * Welling <welling@paul.rutgers.edu> had also worked on it.
125 * Keith Moore modify this for the Pcmcia hardware. 125 * Keith Moore modify this for the Pcmcia hardware.
126 * 126 *
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index e04cffc8adf3..8459549d0cee 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -40,6 +40,7 @@ static struct usb_device_id usb_ids[] = {
40 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 }, 40 { USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
41 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 41 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
42 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, 42 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
43 { USB_DEVICE(0x0df6, 0x9075), .driver_info = DEVICE_ZD1211 },
43 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 44 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
44 { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 }, 45 { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
45 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 }, 46 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
@@ -67,8 +68,11 @@ static struct usb_device_id usb_ids[] = {
67 { USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x0586, 0x3410), .driver_info = DEVICE_ZD1211B },
68 { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B }, 69 { USB_DEVICE(0x0baf, 0x0121), .driver_info = DEVICE_ZD1211B },
69 { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, 70 { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
71 { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
72 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B },
70 /* "Driverless" devices that need ejecting */ 73 /* "Driverless" devices that need ejecting */
71 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 74 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
75 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
72 {} 76 {}
73}; 77};
74 78
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 3f4a7cf9efea..f2a90a7fa2d6 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -109,7 +109,6 @@ static int gx_fix;
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 110static char version[] __devinitdata =
111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" 111KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
112KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
113KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; 112KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
114 113
115MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 114MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 36c6a1bfe558..f46c69e4ed82 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -6,6 +6,7 @@
6# 6#
7 7
8menu "Parallel port support" 8menu "Parallel port support"
9 depends on HAS_IOMEM
9 10
10config PARPORT 11config PARPORT
11 tristate "Parallel port support" 12 tristate "Parallel port support"
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3bb7739d26a5..8e58ea3d95c0 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -119,7 +119,7 @@ static inline int pci_create_newid_file(struct pci_driver *drv)
119 * system is in its list of supported devices. Returns the matching 119 * system is in its list of supported devices. Returns the matching
120 * pci_device_id structure or %NULL if there is no match. 120 * pci_device_id structure or %NULL if there is no match.
121 * 121 *
122 * Depreciated, don't use this as it will not catch any dynamic ids 122 * Deprecated, don't use this as it will not catch any dynamic ids
123 * that a driver might want to check for. 123 * that a driver might want to check for.
124 */ 124 */
125const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 125const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
diff --git a/drivers/pnp/Kconfig b/drivers/pnp/Kconfig
index c5143201419a..1959cef8e9de 100644
--- a/drivers/pnp/Kconfig
+++ b/drivers/pnp/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Plug and Play support" 5menu "Plug and Play support"
6 depends on HAS_IOMEM
6 7
7config PNP 8config PNP
8 bool "Plug and Play support" 9 bool "Plug and Play support"
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 5e439836db2d..1759baad439c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Real Time Clock" 5menu "Real Time Clock"
6 depends on !S390
6 7
7config RTC_LIB 8config RTC_LIB
8 tristate 9 tristate
@@ -98,7 +99,7 @@ config RTC_INTF_DEV_UIE_EMUL
98 bool "RTC UIE emulation on dev interface" 99 bool "RTC UIE emulation on dev interface"
99 depends on RTC_INTF_DEV 100 depends on RTC_INTF_DEV
100 help 101 help
101 Provides an emulation for RTC_UIE if the underlaying rtc chip 102 Provides an emulation for RTC_UIE if the underlying rtc chip
102 driver does not expose RTC_UIE ioctls. Those requests generate 103 driver does not expose RTC_UIE ioctls. Those requests generate
103 once-per-second update interrupts, used for synchronization. 104 once-per-second update interrupts, used for synchronization.
104 105
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 6abf4811958c..e0f91dfce0f5 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -104,7 +104,7 @@ static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
104 104
105 writeb(tmp, rtc->regbase + RCR1); 105 writeb(tmp, rtc->regbase + RCR1);
106 106
107 rtc_update_irq(&rtc->rtc_dev, 1, events); 107 rtc_update_irq(rtc->rtc_dev, 1, events);
108 108
109 spin_unlock(&rtc->lock); 109 spin_unlock(&rtc->lock);
110 110
@@ -139,7 +139,7 @@ static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
139 139
140 rtc->rearm_aie = 1; 140 rtc->rearm_aie = 1;
141 141
142 rtc_update_irq(&rtc->rtc_dev, 1, events); 142 rtc_update_irq(rtc->rtc_dev, 1, events);
143 } 143 }
144 144
145 spin_unlock(&rtc->lock); 145 spin_unlock(&rtc->lock);
@@ -153,7 +153,7 @@ static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
153 153
154 spin_lock(&rtc->lock); 154 spin_lock(&rtc->lock);
155 155
156 rtc_update_irq(&rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); 156 rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
157 157
158 spin_unlock(&rtc->lock); 158 spin_unlock(&rtc->lock);
159 159
@@ -341,7 +341,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
341 tm->tm_sec--; 341 tm->tm_sec--;
342#endif 342#endif
343 343
344 dev_dbg(&dev, "%s: tm is secs=%d, mins=%d, hours=%d, " 344 dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
345 "mday=%d, mon=%d, year=%d, wday=%d\n", 345 "mday=%d, mon=%d, year=%d, wday=%d\n",
346 __FUNCTION__, 346 __FUNCTION__,
347 tm->tm_sec, tm->tm_min, tm->tm_hour, 347 tm->tm_sec, tm->tm_min, tm->tm_hour,
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index b250c5354503..e879b212cf43 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,9 @@
1if S390 && BLOCK
2
3comment "S/390 block device drivers" 1comment "S/390 block device drivers"
4 depends on S390 2 depends on S390 && BLOCK
5 3
6config BLK_DEV_XPRAM 4config BLK_DEV_XPRAM
7 tristate "XPRAM disk support" 5 tristate "XPRAM disk support"
8 depends on S390 6 depends on S390 && BLOCK
9 help 7 help
10 Select this option if you want to use your expanded storage on S/390 8 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you 9 or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -15,12 +13,13 @@ config BLK_DEV_XPRAM
15 13
16config DCSSBLK 14config DCSSBLK
17 tristate "DCSSBLK support" 15 tristate "DCSSBLK support"
16 depends on S390 && BLOCK
18 help 17 help
19 Support for dcss block device 18 Support for dcss block device
20 19
21config DASD 20config DASD
22 tristate "Support for DASD devices" 21 tristate "Support for DASD devices"
23 depends on CCW 22 depends on CCW && BLOCK
24 help 23 help
25 Enable this option if you want to access DASDs directly utilizing 24 Enable this option if you want to access DASDs directly utilizing
26 S/390s channel subsystem commands. This is necessary for running 25 S/390s channel subsystem commands. This is necessary for running
@@ -62,5 +61,3 @@ config DASD_EER
62 This driver provides a character device interface to the 61 This driver provides a character device interface to the
63 DASD extended error reporting. This is only needed if you want to 62 DASD extended error reporting. This is only needed if you want to
64 use applications written for the EER facility. 63 use applications written for the EER facility.
65
66endif
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 977521013fe8..bfeca57098fa 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2174,9 +2174,10 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2174 return ret; 2174 return ret;
2175} 2175}
2176 2176
2177struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device, 2177static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2178 void *rdc_buffer, 2178 void *rdc_buffer,
2179 int rdc_buffer_size, char *magic) 2179 int rdc_buffer_size,
2180 char *magic)
2180{ 2181{
2181 struct dasd_ccw_req *cqr; 2182 struct dasd_ccw_req *cqr;
2182 struct ccw1 *ccw; 2183 struct ccw1 *ccw;
@@ -2219,6 +2220,7 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2219 dasd_sfree_request(cqr, cqr->device); 2220 dasd_sfree_request(cqr, cqr->device);
2220 return ret; 2221 return ret;
2221} 2222}
2223EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2222 2224
2223static int __init 2225static int __init
2224dasd_init(void) 2226dasd_init(void)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e810e4a44ed4..eccac1c3b71b 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -50,6 +50,7 @@ struct dasd_diag_private {
50 struct dasd_diag_rw_io iob; 50 struct dasd_diag_rw_io iob;
51 struct dasd_diag_init_io iib; 51 struct dasd_diag_init_io iib;
52 blocknum_t pt_block; 52 blocknum_t pt_block;
53 struct ccw_dev_id dev_id;
53}; 54};
54 55
55struct dasd_diag_req { 56struct dasd_diag_req {
@@ -102,7 +103,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
102 iib = &private->iib; 103 iib = &private->iib;
103 memset(iib, 0, sizeof (struct dasd_diag_init_io)); 104 memset(iib, 0, sizeof (struct dasd_diag_init_io));
104 105
105 iib->dev_nr = _ccw_device_get_device_number(device->cdev); 106 iib->dev_nr = private->dev_id.devno;
106 iib->block_size = blocksize; 107 iib->block_size = blocksize;
107 iib->offset = offset; 108 iib->offset = offset;
108 iib->flaga = DASD_DIAG_FLAGA_DEFAULT; 109 iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
@@ -127,7 +128,7 @@ mdsk_term_io(struct dasd_device * device)
127 private = (struct dasd_diag_private *) device->private; 128 private = (struct dasd_diag_private *) device->private;
128 iib = &private->iib; 129 iib = &private->iib;
129 memset(iib, 0, sizeof (struct dasd_diag_init_io)); 130 memset(iib, 0, sizeof (struct dasd_diag_init_io));
130 iib->dev_nr = _ccw_device_get_device_number(device->cdev); 131 iib->dev_nr = private->dev_id.devno;
131 rc = dia250(iib, TERM_BIO); 132 rc = dia250(iib, TERM_BIO);
132 return rc; 133 return rc;
133} 134}
@@ -166,7 +167,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
166 private = (struct dasd_diag_private *) device->private; 167 private = (struct dasd_diag_private *) device->private;
167 dreq = (struct dasd_diag_req *) cqr->data; 168 dreq = (struct dasd_diag_req *) cqr->data;
168 169
169 private->iob.dev_nr = _ccw_device_get_device_number(device->cdev); 170 private->iob.dev_nr = private->dev_id.devno;
170 private->iob.key = 0; 171 private->iob.key = 0;
171 private->iob.flags = DASD_DIAG_RWFLAG_ASYNC; 172 private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
172 private->iob.block_count = dreq->block_count; 173 private->iob.block_count = dreq->block_count;
@@ -323,11 +324,12 @@ dasd_diag_check_device(struct dasd_device *device)
323 "memory allocation failed for private data"); 324 "memory allocation failed for private data");
324 return -ENOMEM; 325 return -ENOMEM;
325 } 326 }
327 ccw_device_get_id(device->cdev, &private->dev_id);
326 device->private = (void *) private; 328 device->private = (void *) private;
327 } 329 }
328 /* Read Device Characteristics */ 330 /* Read Device Characteristics */
329 rdc_data = (void *) &(private->rdc_data); 331 rdc_data = (void *) &(private->rdc_data);
330 rdc_data->dev_nr = _ccw_device_get_device_number(device->cdev); 332 rdc_data->dev_nr = private->dev_id.devno;
331 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics); 333 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
332 334
333 rc = diag210((struct diag210 *) rdc_data); 335 rc = diag210((struct diag210 *) rdc_data);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c9583fbc2a7d..418b4e63a4fa 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -450,9 +450,9 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
450 return 0; 450 return 0;
451} 451}
452 452
453struct dasd_ccw_req * dasd_eckd_build_rcd_lpm(struct dasd_device *device, 453static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
454 void *rcd_buffer, 454 void *rcd_buffer,
455 struct ciw *ciw, __u8 lpm) 455 struct ciw *ciw, __u8 lpm)
456{ 456{
457 struct dasd_ccw_req *cqr; 457 struct dasd_ccw_req *cqr;
458 struct ccw1 *ccw; 458 struct ccw1 *ccw;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 758cfb542865..672eb0a3dd0b 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -255,6 +255,7 @@ dasd_ioctl_information(struct dasd_device *device,
255 unsigned long flags; 255 unsigned long flags;
256 int rc; 256 int rc;
257 struct ccw_device *cdev; 257 struct ccw_device *cdev;
258 struct ccw_dev_id dev_id;
258 259
259 if (!device->discipline->fill_info) 260 if (!device->discipline->fill_info)
260 return -EINVAL; 261 return -EINVAL;
@@ -270,8 +271,9 @@ dasd_ioctl_information(struct dasd_device *device,
270 } 271 }
271 272
272 cdev = device->cdev; 273 cdev = device->cdev;
274 ccw_device_get_id(cdev, &dev_id);
273 275
274 dasd_info->devno = _ccw_device_get_device_number(device->cdev); 276 dasd_info->devno = dev_id.devno;
275 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev); 277 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
276 dasd_info->cu_type = cdev->id.cu_type; 278 dasd_info->cu_type = cdev->id.cu_type;
277 dasd_info->cu_model = cdev->id.cu_model; 279 dasd_info->cu_model = cdev->id.cu_model;
diff --git a/drivers/s390/Kconfig b/drivers/s390/char/Kconfig
index 165af398fdea..66102a184322 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -1,69 +1,9 @@
1config CCW
2 bool
3 default y
4
5source "drivers/block/Kconfig"
6
7source "drivers/md/Kconfig"
8
9
10menu "Character device drivers"
11
12config UNIX98_PTYS
13 bool "Unix98 PTY support"
14 ---help---
15 A pseudo terminal (PTY) is a software device consisting of two
16 halves: a master and a slave. The slave device behaves identical to
17 a physical terminal; the master device is used by a process to
18 read data from and write data to the slave, thereby emulating a
19 terminal. Typical programs for the master side are telnet servers
20 and xterms.
21
22 Linux has traditionally used the BSD-like names /dev/ptyxx for
23 masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
24 has a number of problems. The GNU C library glibc 2.1 and later,
25 however, supports the Unix98 naming standard: in order to acquire a
26 pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
27 terminal is then made available to the process and the pseudo
28 terminal slave can be accessed as /dev/pts/<number>. What was
29 traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
30
31 The entries in /dev/pts/ are created on the fly by a virtual
32 file system; therefore, if you say Y here you should say Y to
33 "/dev/pts file system for Unix98 PTYs" as well.
34
35 If you want to say Y here, you need to have the C library glibc 2.1
36 or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
37 Read the instructions in <file:Documentation/Changes> pertaining to
38 pseudo terminals. It's safe to say N.
39
40config UNIX98_PTY_COUNT
41 int "Maximum number of Unix98 PTYs in use (0-2048)"
42 depends on UNIX98_PTYS
43 default "256"
44 help
45 The maximum number of Unix98 PTYs that can be used at any one time.
46 The default is 256, and should be enough for desktop systems. Server
47 machines which support incoming telnet/rlogin/ssh connections and/or
48 serve several X terminals may want to increase this: every incoming
49 connection and every xterm uses up one PTY.
50
51 When not in use, each additional set of 256 PTYs occupy
52 approximately 8 KB of kernel memory on 32-bit architectures.
53
54config HANGCHECK_TIMER
55 tristate "Hangcheck timer"
56 help
57 The hangcheck-timer module detects when the system has gone
58 out to lunch past a certain margin. It can reboot the system
59 or merely print a warning.
60
61source "drivers/char/watchdog/Kconfig"
62
63comment "S/390 character device drivers" 1comment "S/390 character device drivers"
2 depends on S390
64 3
65config TN3270 4config TN3270
66 tristate "Support for locally attached 3270 terminals" 5 tristate "Support for locally attached 3270 terminals"
6 depends on CCW
67 help 7 help
68 Include support for IBM 3270 terminals. 8 Include support for IBM 3270 terminals.
69 9
@@ -88,6 +28,7 @@ config TN3270_CONSOLE
88 28
89config TN3215 29config TN3215
90 bool "Support for 3215 line mode terminal" 30 bool "Support for 3215 line mode terminal"
31 depends on CCW
91 help 32 help
92 Include support for IBM 3215 line-mode terminals. 33 Include support for IBM 3215 line-mode terminals.
93 34
@@ -99,12 +40,19 @@ config TN3215_CONSOLE
99 Linux system console. 40 Linux system console.
100 41
101config CCW_CONSOLE 42config CCW_CONSOLE
102 bool 43 bool
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 44 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 45 default y
105 46
47config SCLP
48 bool "Support for SCLP"
49 depends on S390
50 help
51 Include support for the SCLP interface to the service element.
52
106config SCLP_TTY 53config SCLP_TTY
107 bool "Support for SCLP line mode terminal" 54 bool "Support for SCLP line mode terminal"
55 depends on SCLP
108 help 56 help
109 Include support for IBM SCLP line-mode terminals. 57 Include support for IBM SCLP line-mode terminals.
110 58
@@ -117,6 +65,7 @@ config SCLP_CONSOLE
117 65
118config SCLP_VT220_TTY 66config SCLP_VT220_TTY
119 bool "Support for SCLP VT220-compatible terminal" 67 bool "Support for SCLP VT220-compatible terminal"
68 depends on SCLP
120 help 69 help
121 Include support for an IBM SCLP VT220-compatible terminal. 70 Include support for an IBM SCLP VT220-compatible terminal.
122 71
@@ -129,6 +78,7 @@ config SCLP_VT220_CONSOLE
129 78
130config SCLP_CPI 79config SCLP_CPI
131 tristate "Control-Program Identification" 80 tristate "Control-Program Identification"
81 depends on SCLP
132 help 82 help
133 This option enables the hardware console interface for system 83 This option enables the hardware console interface for system
134 identification. This is commonly used for workload management and 84 identification. This is commonly used for workload management and
@@ -140,6 +90,7 @@ config SCLP_CPI
140 90
141config S390_TAPE 91config S390_TAPE
142 tristate "S/390 tape device support" 92 tristate "S/390 tape device support"
93 depends on CCW
143 help 94 help
144 Select this option if you want to access channel-attached tape 95 Select this option if you want to access channel-attached tape
145 devices on IBM S/390 or zSeries. 96 devices on IBM S/390 or zSeries.
@@ -194,6 +145,7 @@ config VMLOGRDR
194 145
195config VMCP 146config VMCP
196 tristate "Support for the z/VM CP interface (VM only)" 147 tristate "Support for the z/VM CP interface (VM only)"
148 depends on S390
197 help 149 help
198 Select this option if you want to be able to interact with the control 150 Select this option if you want to be able to interact with the control
199 program on z/VM 151 program on z/VM
@@ -207,33 +159,8 @@ config MONREADER
207 159
208config MONWRITER 160config MONWRITER
209 tristate "API for writing z/VM monitor service records" 161 tristate "API for writing z/VM monitor service records"
162 depends on S390
210 default "m" 163 default "m"
211 help 164 help
212 Character device driver for writing z/VM monitor service records 165 Character device driver for writing z/VM monitor service records
213 166
214endmenu
215
216menu "Cryptographic devices"
217
218config ZCRYPT
219 tristate "Support for PCI-attached cryptographic adapters"
220 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
221 default "m"
222 help
223 Select this option if you want to use a PCI-attached cryptographic
224 adapter like:
225 + PCI Cryptographic Accelerator (PCICA)
226 + PCI Cryptographic Coprocessor (PCICC)
227 + PCI-X Cryptographic Coprocessor (PCIXCC)
228 + Crypto Express2 Coprocessor (CEX2C)
229 + Crypto Express2 Accelerator (CEX2A)
230
231config ZCRYPT_MONOLITHIC
232 bool "Monolithic zcrypt module"
233 depends on ZCRYPT="m"
234 help
235 Select this option if you want to have a single module z90crypt.ko
236 that contains all parts of the crypto device driver (ap bus,
237 request router and all the card drivers).
238
239endmenu
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 8df7b1323c05..67009bfa093e 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -97,7 +97,7 @@ static u8 user_data_sever[16] = {
97 * Create the 8 bytes EBCDIC DCSS segment name from 97 * Create the 8 bytes EBCDIC DCSS segment name from
98 * an ASCII name, incl. padding 98 * an ASCII name, incl. padding
99 */ 99 */
100static inline void dcss_mkname(char *ascii_name, char *ebcdic_name) 100static void dcss_mkname(char *ascii_name, char *ebcdic_name)
101{ 101{
102 int i; 102 int i;
103 103
@@ -191,7 +191,7 @@ static inline u32 mon_rec_end(struct mon_msg *monmsg)
191 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); 191 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
192} 192}
193 193
194static inline int mon_check_mca(struct mon_msg *monmsg) 194static int mon_check_mca(struct mon_msg *monmsg)
195{ 195{
196 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || 196 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
197 (mon_rec_start(monmsg) < mon_dcss_start) || 197 (mon_rec_start(monmsg) < mon_dcss_start) ||
@@ -209,8 +209,8 @@ static inline int mon_check_mca(struct mon_msg *monmsg)
209 return 0; 209 return 0;
210} 210}
211 211
212static inline int mon_send_reply(struct mon_msg *monmsg, 212static int mon_send_reply(struct mon_msg *monmsg,
213 struct mon_private *monpriv) 213 struct mon_private *monpriv)
214{ 214{
215 int rc; 215 int rc;
216 216
@@ -236,7 +236,7 @@ static inline int mon_send_reply(struct mon_msg *monmsg,
236 return 0; 236 return 0;
237} 237}
238 238
239static inline void mon_free_mem(struct mon_private *monpriv) 239static void mon_free_mem(struct mon_private *monpriv)
240{ 240{
241 int i; 241 int i;
242 242
@@ -246,7 +246,7 @@ static inline void mon_free_mem(struct mon_private *monpriv)
246 kfree(monpriv); 246 kfree(monpriv);
247} 247}
248 248
249static inline struct mon_private *mon_alloc_mem(void) 249static struct mon_private *mon_alloc_mem(void)
250{ 250{
251 int i; 251 int i;
252 struct mon_private *monpriv; 252 struct mon_private *monpriv;
@@ -307,7 +307,7 @@ static inline void mon_next_mca(struct mon_msg *monmsg)
307 monmsg->pos = 0; 307 monmsg->pos = 0;
308} 308}
309 309
310static inline struct mon_msg *mon_next_message(struct mon_private *monpriv) 310static struct mon_msg *mon_next_message(struct mon_private *monpriv)
311{ 311{
312 struct mon_msg *monmsg; 312 struct mon_msg *monmsg;
313 313
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 8facd14adb7c..f6ef90ee3e7d 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -589,9 +589,10 @@ static int
589__raw3270_size_device_vm(struct raw3270 *rp) 589__raw3270_size_device_vm(struct raw3270 *rp)
590{ 590{
591 int rc, model; 591 int rc, model;
592 struct ccw_dev_id dev_id;
592 593
593 raw3270_init_diag210.vrdcdvno = 594 ccw_device_get_id(rp->cdev, &dev_id);
594 _ccw_device_get_device_number(rp->cdev); 595 raw3270_init_diag210.vrdcdvno = dev_id.devno;
595 raw3270_init_diag210.vrdclen = sizeof(struct diag210); 596 raw3270_init_diag210.vrdclen = sizeof(struct diag210);
596 rc = diag210(&raw3270_init_diag210); 597 rc = diag210(&raw3270_init_diag210);
597 if (rc) 598 if (rc)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 87ac4a3ad49d..dbb99d1b6f57 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -132,6 +132,9 @@ int sclp_deactivate(void);
132int sclp_reactivate(void); 132int sclp_reactivate(void);
133int sclp_service_call(sclp_cmdw_t command, void *sccb); 133int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 134
135int sclp_sdias_init(void);
136void sclp_sdias_exit(void);
137
135/* useful inlines */ 138/* useful inlines */
136 139
137/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */ 140/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index bbd5b8b66f42..d6b06ab81188 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -23,7 +23,7 @@
23 23
24/* 24/*
25 * The room for the SCCB (only for writing) is not equal to a pages size 25 * The room for the SCCB (only for writing) is not equal to a pages size
26 * (as it is specified as the maximum size in the the SCLP documentation) 26 * (as it is specified as the maximum size in the SCLP documentation)
27 * because of the additional data structure described above. 27 * because of the additional data structure described above.
28 */ 28 */
29#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) 29#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 52283daddaef..1c064976b32b 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -66,9 +66,9 @@ static DEFINE_MUTEX(sdias_mutex);
66 66
67static void sdias_callback(struct sclp_req *request, void *data) 67static void sdias_callback(struct sclp_req *request, void *data)
68{ 68{
69 struct sdias_sccb *sccb; 69 struct sdias_sccb *cbsccb;
70 70
71 sccb = (struct sdias_sccb *) request->sccb; 71 cbsccb = (struct sdias_sccb *) request->sccb;
72 sclp_req_done = 1; 72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */ 73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n"); 74 TRACE("callback done\n");
@@ -229,7 +229,7 @@ out:
229 return rc; 229 return rc;
230} 230}
231 231
232int __init sdias_init(void) 232int __init sclp_sdias_init(void)
233{ 233{
234 int rc; 234 int rc;
235 235
@@ -248,7 +248,7 @@ int __init sdias_init(void)
248 return 0; 248 return 0;
249} 249}
250 250
251void __exit sdias_exit(void) 251void __exit sclp_sdias_exit(void)
252{ 252{
253 debug_unregister(sdias_dbf); 253 debug_unregister(sdias_dbf);
254 sclp_unregister(&sclp_sdias_register); 254 sclp_unregister(&sclp_sdias_register);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 89d439316a53..66eb0688d523 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -21,6 +21,7 @@
21#include <asm/debug.h> 21#include <asm/debug.h>
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/irqflags.h> 23#include <asm/irqflags.h>
24#include "sclp.h"
24 25
25#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) 26#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
26#define MSG(x...) printk( KERN_ALERT x ) 27#define MSG(x...) printk( KERN_ALERT x )
@@ -564,8 +565,6 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr)
564 get_cpu_id(&hdr->cpu_id); 565 get_cpu_id(&hdr->cpu_id);
565} 566}
566 567
567extern int sdias_init(void);
568
569static int __init zcore_init(void) 568static int __init zcore_init(void)
570{ 569{
571 unsigned char arch; 570 unsigned char arch;
@@ -582,7 +581,7 @@ static int __init zcore_init(void)
582 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); 581 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
583 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); 582 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
584 583
585 rc = sdias_init(); 584 rc = sclp_sdias_init();
586 if (rc) 585 if (rc)
587 goto fail; 586 goto fail;
588 587
@@ -634,12 +633,10 @@ fail:
634 return rc; 633 return rc;
635} 634}
636 635
637extern void sdias_exit(void);
638
639static void __exit zcore_exit(void) 636static void __exit zcore_exit(void)
640{ 637{
641 debug_unregister(zcore_dbf); 638 debug_unregister(zcore_dbf);
642 sdias_exit(); 639 sclp_sdias_exit();
643 diag308(DIAG308_REL_HSA, NULL); 640 diag308(DIAG308_REL_HSA, NULL);
644} 641}
645 642
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 27c6d9e55b23..dfca0ef139fd 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -191,8 +191,7 @@ static int css_register_subchannel(struct subchannel *sch)
191 return ret; 191 return ret;
192} 192}
193 193
194int 194static int css_probe_device(struct subchannel_id schid)
195css_probe_device(struct subchannel_id schid)
196{ 195{
197 int ret; 196 int ret;
198 struct subchannel *sch; 197 struct subchannel *sch;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 71fcfdc42800..ed7977531c3f 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -138,9 +138,7 @@ struct css_driver {
138 * all css_drivers have the css_bus_type 138 * all css_drivers have the css_bus_type
139 */ 139 */
140extern struct bus_type css_bus_type; 140extern struct bus_type css_bus_type;
141extern struct css_driver io_subchannel_driver;
142 141
143extern int css_probe_device(struct subchannel_id);
144extern int css_sch_device_register(struct subchannel *); 142extern int css_sch_device_register(struct subchannel *);
145extern void css_sch_device_unregister(struct subchannel *); 143extern void css_sch_device_unregister(struct subchannel *);
146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 144extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a23ff582db9d..a8b373f69cf0 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -129,7 +129,7 @@ static void io_subchannel_verify(struct device *);
129static void io_subchannel_ioterm(struct device *); 129static void io_subchannel_ioterm(struct device *);
130static void io_subchannel_shutdown(struct subchannel *); 130static void io_subchannel_shutdown(struct subchannel *);
131 131
132struct css_driver io_subchannel_driver = { 132static struct css_driver io_subchannel_driver = {
133 .subchannel_type = SUBCHANNEL_TYPE_IO, 133 .subchannel_type = SUBCHANNEL_TYPE_IO,
134 .drv = { 134 .drv = {
135 .name = "io_subchannel", 135 .name = "io_subchannel",
@@ -546,7 +546,7 @@ static struct attribute_group ccwdev_attr_group = {
546 .attrs = ccwdev_attrs, 546 .attrs = ccwdev_attrs,
547}; 547};
548 548
549struct attribute_group *ccwdev_attr_groups[] = { 549static struct attribute_group *ccwdev_attr_groups[] = {
550 &ccwdev_attr_group, 550 &ccwdev_attr_group,
551 NULL, 551 NULL,
552}; 552};
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 16f59fcb66b1..a5d263fb55ae 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -616,6 +616,17 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
616 return chp_get_chp_desc(chpid); 616 return chp_get_chp_desc(chpid);
617} 617}
618 618
619/**
620 * ccw_device_get_id - obtain a ccw device id
621 * @cdev: device to obtain the id for
622 * @dev_id: where to fill in the values
623 */
624void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
625{
626 *dev_id = cdev->private->dev_id;
627}
628EXPORT_SYMBOL(ccw_device_get_id);
629
619// FIXME: these have to go: 630// FIXME: these have to go:
620 631
621int 632int
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index f770018fe1d5..e70aeb7a3781 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1983,6 +1983,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
1983 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) 1983 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1984 qdio_mark_q(q); 1984 qdio_mark_q(q);
1985 else { 1985 else {
1986 qdio_perf_stat_dec(&perf_stats.tl_runs);
1986 __qdio_inbound_processing(q); 1987 __qdio_inbound_processing(q);
1987 } 1988 }
1988 } 1989 }
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index f98fa465df0a..eada69dec4fe 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -3,7 +3,7 @@ menu "S/390 network device drivers"
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
6 depends on NETDEVICES && (NET_ETHERNET || TR || FDDI) 6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 7 help
8 Select this option if you want to use LCS networking on IBM S/390 8 Select this option if you want to use LCS networking on IBM S/390
9 or zSeries. This device driver supports Token Ring (IEEE 802.5), 9 or zSeries. This device driver supports Token Ring (IEEE 802.5),
@@ -13,7 +13,7 @@ config LCS
13 13
14config CTC 14config CTC
15 tristate "CTC device support" 15 tristate "CTC device support"
16 depends on NETDEVICES 16 depends on CCW && NETDEVICES
17 help 17 help
18 Select this option if you want to use channel-to-channel networking 18 Select this option if you want to use channel-to-channel networking
19 on IBM S/390 or zSeries. This device driver supports real CTC 19 on IBM S/390 or zSeries. This device driver supports real CTC
@@ -42,7 +42,7 @@ config SMSGIUCV
42 42
43config CLAW 43config CLAW
44 tristate "CLAW device support" 44 tristate "CLAW device support"
45 depends on NETDEVICES 45 depends on CCW && NETDEVICES
46 help 46 help
47 This driver supports channel attached CLAW devices. 47 This driver supports channel attached CLAW devices.
48 CLAW is Common Link Access for Workstation. Common devices 48 CLAW is Common Link Access for Workstation. Common devices
@@ -52,7 +52,7 @@ config CLAW
52 52
53config QETH 53config QETH
54 tristate "Gigabit Ethernet device support" 54 tristate "Gigabit Ethernet device support"
55 depends on NETDEVICES && IP_MULTICAST && QDIO 55 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
56 help 56 help
57 This driver supports the IBM S/390 and zSeries OSA Express adapters 57 This driver supports the IBM S/390 and zSeries OSA Express adapters
58 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN 58 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 29d176036e5c..0b96d49dd636 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -2860,7 +2860,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2860 if (!atomic_read(&queue->set_pci_flags_count)){ 2860 if (!atomic_read(&queue->set_pci_flags_count)){
2861 /* 2861 /*
2862 * there's no outstanding PCI any more, so we 2862 * there's no outstanding PCI any more, so we
2863 * have to request a PCI to be sure the the PCI 2863 * have to request a PCI to be sure that the PCI
2864 * will wake at some time in the future then we 2864 * will wake at some time in the future then we
2865 * can flush packed buffers that might still be 2865 * can flush packed buffers that might still be
2866 * hanging around, which can happen if no 2866 * hanging around, which can happen if no
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
index f54fdfdbf06f..f29a4bc4f6f2 100644
--- a/drivers/s390/net/qeth_mpc.c
+++ b/drivers/s390/net/qeth_mpc.c
@@ -162,7 +162,7 @@ struct ipa_rc_msg {
162 char *msg; 162 char *msg;
163}; 163};
164 164
165struct ipa_rc_msg qeth_ipa_rc_msg[] = { 165static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
166 {IPA_RC_SUCCESS, "success"}, 166 {IPA_RC_SUCCESS, "success"},
167 {IPA_RC_NOTSUPP, "Command not supported"}, 167 {IPA_RC_NOTSUPP, "Command not supported"},
168 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, 168 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -226,7 +226,7 @@ struct ipa_cmd_names {
226 char *name; 226 char *name;
227}; 227};
228 228
229struct ipa_cmd_names qeth_ipa_cmd_names[] = { 229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"}, 230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"}, 231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"}, 232 {IPA_CMD_SETVMAC, "setvmac"},
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 324899c96efe..ddff40c4212c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -607,8 +607,7 @@ zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
607 * @sg_count: elements in array 607 * @sg_count: elements in array
608 * Return: size of entire scatter-gather list 608 * Return: size of entire scatter-gather list
609 */ 609 */
610size_t 610static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
611zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
612{ 611{
613 unsigned int i; 612 unsigned int i;
614 struct scatterlist *p; 613 struct scatterlist *p;
@@ -975,8 +974,7 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
975 mempool_destroy(adapter->pool.data_gid_pn); 974 mempool_destroy(adapter->pool.data_gid_pn);
976} 975}
977 976
978void 977static void zfcp_dummy_release(struct device *dev)
979zfcp_dummy_release(struct device *dev)
980{ 978{
981 return; 979 return;
982} 980}
@@ -1336,7 +1334,7 @@ zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
1336 1334
1337#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC 1335#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
1338 1336
1339void 1337static void
1340zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter, 1338zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1341 struct fsf_status_read_buffer *status_buffer) 1339 struct fsf_status_read_buffer *status_buffer)
1342{ 1340{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d8191d115c14..5f3212440f68 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -478,7 +478,7 @@ static struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481void 481static void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index cb08ca3cc0f9..bdf5782b8a7a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -222,7 +222,7 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
222 * Since we have been using this adapter, it is save to assume 222 * Since we have been using this adapter, it is save to assume
223 * that it is not failed but recoverable. The card seems to 223 * that it is not failed but recoverable. The card seems to
224 * report link-up events by self-initiated queue shutdown. 224 * report link-up events by self-initiated queue shutdown.
225 * That is why we need to clear the the link-down flag 225 * That is why we need to clear the link-down flag
226 * which is set again in case we have missed by a mile. 226 * which is set again in case we have missed by a mile.
227 */ 227 */
228 zfcp_erp_adapter_reopen( 228 zfcp_erp_adapter_reopen(
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 74b999d77bbf..4fab0c23814c 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -156,7 +156,7 @@ static unsigned short get_pins(unsigned minor)
156#define BPP_ICR 0x18 156#define BPP_ICR 0x18
157#define BPP_SIZE 0x1A 157#define BPP_SIZE 0x1A
158 158
159/* BPP_CSR. Bits of type RW1 are cleared with writting '1'. */ 159/* BPP_CSR. Bits of type RW1 are cleared with writing '1'. */
160#define P_DEV_ID_MASK 0xf0000000 /* R */ 160#define P_DEV_ID_MASK 0xf0000000 /* R */
161#define P_DEV_ID_ZEBRA 0x40000000 161#define P_DEV_ID_ZEBRA 0x40000000
162#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */ 162#define P_DEV_ID_L64854 0xa0000000 /* == NCR 89C100+89C105. Pity. */
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 8d72bbae96ad..0bada0028aa0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -966,7 +966,7 @@ ahd_aic790X_setup(struct ahd_softc *ahd)
966 | AHD_BUSFREEREV_BUG; 966 | AHD_BUSFREEREV_BUG;
967 ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG; 967 ahd->bugs |= AHD_LQOOVERRUN_BUG|AHD_EARLY_REQ_BUG;
968 968
969 /* If the user requested the the SLOWCRC bit to be set. */ 969 /* If the user requested that the SLOWCRC bit to be set. */
970 if (aic79xx_slowcrc) 970 if (aic79xx_slowcrc)
971 ahd->features |= AHD_AIC79XXB_SLOWCRC; 971 ahd->features |= AHD_AIC79XXB_SLOWCRC;
972 972
diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile
index e6b70123940c..e78ce0fa44d2 100644
--- a/drivers/scsi/aic94xx/Makefile
+++ b/drivers/scsi/aic94xx/Makefile
@@ -6,7 +6,7 @@
6# 6#
7# This file is licensed under GPLv2. 7# This file is licensed under GPLv2.
8# 8#
9# This file is part of the the aic94xx driver. 9# This file is part of the aic94xx driver.
10# 10#
11# The aic94xx driver is free software; you can redistribute it and/or 11# The aic94xx driver is free software; you can redistribute it and/or
12# modify it under the terms of the GNU General Public License as 12# modify it under the terms of the GNU General Public License as
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index a965ed3548d5..564ea90ed3a0 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -541,7 +541,7 @@ static struct ParameterData __devinitdata cfg_data[] = {
541 541
542 542
543/* 543/*
544 * Safe settings. If set to zero the the BIOS/default values with 544 * Safe settings. If set to zero the BIOS/default values with
545 * command line overrides will be used. If set to 1 then safe and 545 * command line overrides will be used. If set to 1 then safe and
546 * slow settings will be used. 546 * slow settings will be used.
547 */ 547 */
@@ -617,7 +617,7 @@ static void __devinit fix_settings(void)
617 617
618/* 618/*
619 * Mapping from the eeprom delay index value (index into this array) 619 * Mapping from the eeprom delay index value (index into this array)
620 * to the the number of actual seconds that the delay should be for. 620 * to the number of actual seconds that the delay should be for.
621 */ 621 */
622static char __devinitdata eeprom_index_to_delay_map[] = 622static char __devinitdata eeprom_index_to_delay_map[] =
623 { 1, 3, 5, 10, 16, 30, 60, 120 }; 623 { 1, 3, 5, 10, 16, 30, 60, 120 };
@@ -4136,7 +4136,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long
4136 * @io_port: base I/O address 4136 * @io_port: base I/O address
4137 * @addr: offset into SEEPROM 4137 * @addr: offset into SEEPROM
4138 * 4138 *
4139 * Returns the the byte read. 4139 * Returns the byte read.
4140 **/ 4140 **/
4141static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr) 4141static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
4142{ 4142{
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 2b5b8a93bc10..8263f752809d 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -721,19 +721,23 @@ static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *r
721 return ide_stopped; 721 return ide_stopped;
722} 722}
723 723
724#ifdef CONFIG_IDE_PROC_FS
724static void idescsi_add_settings(ide_drive_t *drive) 725static void idescsi_add_settings(ide_drive_t *drive)
725{ 726{
726 idescsi_scsi_t *scsi = drive_to_idescsi(drive); 727 idescsi_scsi_t *scsi = drive_to_idescsi(drive);
727 728
728/* 729/*
729 * drive setting name read/write ioctl ioctl data type min max mul_factor div_factor data pointer set function 730 * drive setting name read/write data type min max mul_factor div_factor data pointer set function
730 */ 731 */
731 ide_add_setting(drive, "bios_cyl", SETTING_RW, -1, -1, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL); 732 ide_add_setting(drive, "bios_cyl", SETTING_RW, TYPE_INT, 0, 1023, 1, 1, &drive->bios_cyl, NULL);
732 ide_add_setting(drive, "bios_head", SETTING_RW, -1, -1, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL); 733 ide_add_setting(drive, "bios_head", SETTING_RW, TYPE_BYTE, 0, 255, 1, 1, &drive->bios_head, NULL);
733 ide_add_setting(drive, "bios_sect", SETTING_RW, -1, -1, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL); 734 ide_add_setting(drive, "bios_sect", SETTING_RW, TYPE_BYTE, 0, 63, 1, 1, &drive->bios_sect, NULL);
734 ide_add_setting(drive, "transform", SETTING_RW, -1, -1, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL); 735 ide_add_setting(drive, "transform", SETTING_RW, TYPE_INT, 0, 3, 1, 1, &scsi->transform, NULL);
735 ide_add_setting(drive, "log", SETTING_RW, -1, -1, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL); 736 ide_add_setting(drive, "log", SETTING_RW, TYPE_INT, 0, 1, 1, 1, &scsi->log, NULL);
736} 737}
738#else
739static inline void idescsi_add_settings(ide_drive_t *drive) { ; }
740#endif
737 741
738/* 742/*
739 * Driver initialization. 743 * Driver initialization.
@@ -756,7 +760,7 @@ static void ide_scsi_remove(ide_drive_t *drive)
756 struct ide_scsi_obj *scsi = scsihost_to_idescsi(scsihost); 760 struct ide_scsi_obj *scsi = scsihost_to_idescsi(scsihost);
757 struct gendisk *g = scsi->disk; 761 struct gendisk *g = scsi->disk;
758 762
759 ide_unregister_subdriver(drive, scsi->driver); 763 ide_proc_unregister_driver(drive, scsi->driver);
760 764
761 ide_unregister_region(g); 765 ide_unregister_region(g);
762 766
@@ -770,13 +774,11 @@ static void ide_scsi_remove(ide_drive_t *drive)
770 774
771static int ide_scsi_probe(ide_drive_t *); 775static int ide_scsi_probe(ide_drive_t *);
772 776
773#ifdef CONFIG_PROC_FS 777#ifdef CONFIG_IDE_PROC_FS
774static ide_proc_entry_t idescsi_proc[] = { 778static ide_proc_entry_t idescsi_proc[] = {
775 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL }, 779 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
776 { NULL, 0, NULL, NULL } 780 { NULL, 0, NULL, NULL }
777}; 781};
778#else
779# define idescsi_proc NULL
780#endif 782#endif
781 783
782static ide_driver_t idescsi_driver = { 784static ide_driver_t idescsi_driver = {
@@ -790,11 +792,13 @@ static ide_driver_t idescsi_driver = {
790 .version = IDESCSI_VERSION, 792 .version = IDESCSI_VERSION,
791 .media = ide_scsi, 793 .media = ide_scsi,
792 .supports_dsc_overlap = 0, 794 .supports_dsc_overlap = 0,
793 .proc = idescsi_proc,
794 .do_request = idescsi_do_request, 795 .do_request = idescsi_do_request,
795 .end_request = idescsi_end_request, 796 .end_request = idescsi_end_request,
796 .error = idescsi_atapi_error, 797 .error = idescsi_atapi_error,
797 .abort = idescsi_atapi_abort, 798 .abort = idescsi_atapi_abort,
799#ifdef CONFIG_IDE_PROC_FS
800 .proc = idescsi_proc,
801#endif
798}; 802};
799 803
800static int idescsi_ide_open(struct inode *inode, struct file *filp) 804static int idescsi_ide_open(struct inode *inode, struct file *filp)
@@ -1153,7 +1157,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
1153 idescsi->host = host; 1157 idescsi->host = host;
1154 idescsi->disk = g; 1158 idescsi->disk = g;
1155 g->private_data = &idescsi->driver; 1159 g->private_data = &idescsi->driver;
1156 ide_register_subdriver(drive, &idescsi_driver); 1160 ide_proc_register_driver(drive, &idescsi_driver);
1157 err = 0; 1161 err = 0;
1158 idescsi_setup(drive, idescsi); 1162 idescsi_setup(drive, idescsi);
1159 g->fops = &idescsi_ops; 1163 g->fops = &idescsi_ops;
@@ -1165,7 +1169,7 @@ static int ide_scsi_probe(ide_drive_t *drive)
1165 } 1169 }
1166 /* fall through on error */ 1170 /* fall through on error */
1167 ide_unregister_region(g); 1171 ide_unregister_region(g);
1168 ide_unregister_subdriver(drive, &idescsi_driver); 1172 ide_proc_unregister_driver(drive, &idescsi_driver);
1169 1173
1170 put_disk(g); 1174 put_disk(g);
1171out_host_put: 1175out_host_put:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 61fbcdcbb009..1f5a07bf2a75 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -173,7 +173,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
173 * @retries: number of times to retry request 173 * @retries: number of times to retry request
174 * @flags: or into request flags; 174 * @flags: or into request flags;
175 * 175 *
176 * returns the req->errors value which is the the scsi_cmnd result 176 * returns the req->errors value which is the scsi_cmnd result
177 * field. 177 * field.
178 **/ 178 **/
179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 179int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index e8efe938c4e7..a6f5bfbb777b 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -5,6 +5,7 @@
5# 5#
6 6
7menu "Serial drivers" 7menu "Serial drivers"
8 depends on HAS_IOMEM
8 9
9# 10#
10# The new 8250/16550 serial drivers 11# The new 8250/16550 serial drivers
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 07c587ec71be..7c9d37f651e3 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -6,6 +6,7 @@
6# fully appropriate there, so it'd need some thought to do well. 6# fully appropriate there, so it'd need some thought to do well.
7# 7#
8menu "SPI support" 8menu "SPI support"
9 depends on HAS_IOMEM
9 10
10config SPI 11config SPI
11 bool "SPI support" 12 bool "SPI support"
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 66e7bc985797..1d8a2f6bb8eb 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -22,10 +22,7 @@
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/arch/board.h> 23#include <asm/arch/board.h>
24#include <asm/arch/gpio.h> 24#include <asm/arch/gpio.h>
25
26#ifdef CONFIG_ARCH_AT91
27#include <asm/arch/cpu.h> 25#include <asm/arch/cpu.h>
28#endif
29 26
30#include "atmel_spi.h" 27#include "atmel_spi.h"
31 28
@@ -552,10 +549,8 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
552 goto out_free_buffer; 549 goto out_free_buffer;
553 as->irq = irq; 550 as->irq = irq;
554 as->clk = clk; 551 as->clk = clk;
555#ifdef CONFIG_ARCH_AT91
556 if (!cpu_is_at91rm9200()) 552 if (!cpu_is_at91rm9200())
557 as->new_1 = 1; 553 as->new_1 = 1;
558#endif
559 554
560 ret = request_irq(irq, atmel_spi_interrupt, 0, 555 ret = request_irq(irq, atmel_spi_interrupt, 0,
561 pdev->dev.bus_id, master); 556 pdev->dev.bus_id, master);
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 7625b1816baf..dd1d6a53f3c0 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Telephony Support" 5menu "Telephony Support"
6 depends on HAS_IOMEM
6 7
7config PHONE 8config PHONE
8 tristate "Linux telephony support" 9 tristate "Linux telephony support"
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index b847bbc8b0e1..15499b7e33f4 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "USB support" 5menu "USB support"
6 depends on HAS_IOMEM
6 7
7# Host-side USB depends on having a host controller 8# Host-side USB depends on having a host controller
8# NOTE: dummy_hcd is always an option, but it's ignored here ... 9# NOTE: dummy_hcd is always an option, but it's ignored here ...
@@ -87,8 +88,6 @@ source "drivers/usb/storage/Kconfig"
87 88
88source "drivers/usb/image/Kconfig" 89source "drivers/usb/image/Kconfig"
89 90
90source "drivers/usb/net/Kconfig"
91
92source "drivers/usb/mon/Kconfig" 91source "drivers/usb/mon/Kconfig"
93 92
94comment "USB port drivers" 93comment "USB port drivers"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 0ef090b1b37c..72464b586990 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -23,13 +23,6 @@ obj-$(CONFIG_USB_PRINTER) += class/
23obj-$(CONFIG_USB_STORAGE) += storage/ 23obj-$(CONFIG_USB_STORAGE) += storage/
24obj-$(CONFIG_USB) += storage/ 24obj-$(CONFIG_USB) += storage/
25 25
26obj-$(CONFIG_USB_CATC) += net/
27obj-$(CONFIG_USB_KAWETH) += net/
28obj-$(CONFIG_USB_PEGASUS) += net/
29obj-$(CONFIG_USB_RTL8150) += net/
30obj-$(CONFIG_USB_USBNET) += net/
31obj-$(CONFIG_USB_ZD1201) += net/
32
33obj-$(CONFIG_USB_MDC800) += image/ 26obj-$(CONFIG_USB_MDC800) += image/
34obj-$(CONFIG_USB_MICROTEK) += image/ 27obj-$(CONFIG_USB_MICROTEK) += image/
35 28
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index b082d95bbbaa..11e9b15ca45a 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1033,7 +1033,7 @@ static int usbatm_do_heavy_init(void *arg)
1033 1033
1034static int usbatm_heavy_init(struct usbatm_data *instance) 1034static int usbatm_heavy_init(struct usbatm_data *instance)
1035{ 1035{
1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_KERNEL); 1036 int ret = kernel_thread(usbatm_do_heavy_init, instance, CLONE_FS | CLONE_FILES);
1037 1037
1038 if (ret < 0) { 1038 if (ret < 0) {
1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret); 1039 usb_err(instance, "%s: failed to create kernel_thread (%d)!\n", __func__, ret);
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index b5332e679c46..88fb56d5db8f 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -1307,7 +1307,7 @@ static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
1307} 1307}
1308 1308
1309 1309
1310/* remove a service from the the device 1310/* remove a service from the device
1311 scp->id must be set! */ 1311 scp->id must be set! */
1312static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp) 1312static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
1313{ 1313{
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index ba5d1dc03036..3efe67092f15 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -558,7 +558,7 @@ config USB_SERIAL_DEBUG
558 tristate "USB Debugging Device" 558 tristate "USB Debugging Device"
559 depends on USB_SERIAL 559 depends on USB_SERIAL
560 help 560 help
561 Say Y here if you have a USB debugging device used to recieve 561 Say Y here if you have a USB debugging device used to receive
562 debugging data from another machine. The most common of these 562 debugging data from another machine. The most common of these
563 devices is the NetChip TurboCONNECT device. 563 devices is the NetChip TurboCONNECT device.
564 564
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b675735bfbee..fbc8c27d5d99 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -9,7 +9,7 @@
9 * The device works as an standard CDC device, it has 2 interfaces, the first 9 * The device works as an standard CDC device, it has 2 interfaces, the first
10 * one is for firmware access and the second is the serial one. 10 * one is for firmware access and the second is the serial one.
11 * The protocol is very simply, there are two posibilities reading or writing. 11 * The protocol is very simply, there are two posibilities reading or writing.
12 * When writting the first urb must have a Header that starts with 0x20 0x29 the 12 * When writing the first urb must have a Header that starts with 0x20 0x29 the
13 * next two bytes must say how much data will be sended. 13 * next two bytes must say how much data will be sended.
14 * When reading the process is almost equal except that the header starts with 14 * When reading the process is almost equal except that the header starts with
15 * 0x00 0x20. 15 * 0x00 0x20.
@@ -18,7 +18,7 @@
18 * buffer: The First and Second byte is used for a Header, the Third and Fourth 18 * buffer: The First and Second byte is used for a Header, the Third and Fourth
19 * tells the device the amount of information the package holds. 19 * tells the device the amount of information the package holds.
20 * Packages are 60 bytes long Header Stuff. 20 * Packages are 60 bytes long Header Stuff.
21 * When writting to the device the first two bytes of the header are 0x20 0x29 21 * When writing to the device the first two bytes of the header are 0x20 0x29
22 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange 22 * When reading the bytes are 0x00 0x20, or 0x00 0x10, there is an strange
23 * situation, when too much data arrives to the device because it sends the data 23 * situation, when too much data arrives to the device because it sends the data
24 * but with out the header. I will use a simply hack to override this situation, 24 * but with out the header. I will use a simply hack to override this situation,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 18f74ac76565..4807f960150b 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2465,7 +2465,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2465 ((edge_serial->is_epic) && 2465 ((edge_serial->is_epic) &&
2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) && 2466 (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
2467 (regNum == MCR))) { 2467 (regNum == MCR))) {
2468 dbg("SendCmdWriteUartReg - Not writting to MCR Register"); 2468 dbg("SendCmdWriteUartReg - Not writing to MCR Register");
2469 return 0; 2469 return 0;
2470 } 2470 }
2471 2471
@@ -2473,7 +2473,7 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
2473 ((edge_serial->is_epic) && 2473 ((edge_serial->is_epic) &&
2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) && 2474 (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
2475 (regNum == LCR))) { 2475 (regNum == LCR))) {
2476 dbg ("SendCmdWriteUartReg - Not writting to LCR Register"); 2476 dbg ("SendCmdWriteUartReg - Not writing to LCR Register");
2477 return 0; 2477 return 0;
2478 } 2478 }
2479 2479
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1132ba5ff391..f54438828cb9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Graphics support" 5menu "Graphics support"
6 depends on HAS_IOMEM
6 7
7source "drivers/video/backlight/Kconfig" 8source "drivers/video/backlight/Kconfig"
8source "drivers/video/display/Kconfig" 9source "drivers/video/display/Kconfig"
@@ -1348,6 +1349,20 @@ config FB_VOODOO1
1348 Please read the <file:Documentation/fb/README-sstfb.txt> for supported 1349 Please read the <file:Documentation/fb/README-sstfb.txt> for supported
1349 options and other important info support. 1350 options and other important info support.
1350 1351
1352config FB_VT8623
1353 tristate "VIA VT8623 support"
1354 depends on FB && PCI
1355 select FB_CFB_FILLRECT
1356 select FB_CFB_COPYAREA
1357 select FB_CFB_IMAGEBLIT
1358 select FB_TILEBLITTING
1359 select FB_SVGALIB
1360 select VGASTATE
1361 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1362 ---help---
1363 Driver for CastleRock integrated graphics core in the
1364 VIA VT8623 [Apollo CLE266] chipset.
1365
1351config FB_CYBLA 1366config FB_CYBLA
1352 tristate "Cyberblade/i1 support" 1367 tristate "Cyberblade/i1 support"
1353 depends on FB && PCI && X86_32 && !64BIT 1368 depends on FB && PCI && X86_32 && !64BIT
@@ -1401,6 +1416,20 @@ config FB_TRIDENT_ACCEL
1401 This will compile the Trident frame buffer device with 1416 This will compile the Trident frame buffer device with
1402 acceleration functions. 1417 acceleration functions.
1403 1418
1419config FB_ARK
1420 tristate "ARK 2000PV support"
1421 depends on FB && PCI
1422 select FB_CFB_FILLRECT
1423 select FB_CFB_COPYAREA
1424 select FB_CFB_IMAGEBLIT
1425 select FB_TILEBLITTING
1426 select FB_SVGALIB
1427 select VGASTATE
1428 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1429 ---help---
1430 Driver for PCI graphics boards with ARK 2000PV chip
1431 and ICS 5342 RAMDAC.
1432
1404config FB_PM3 1433config FB_PM3
1405 tristate "Permedia3 support" 1434 tristate "Permedia3 support"
1406 depends on FB && PCI && BROKEN 1435 depends on FB && PCI && BROKEN
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a916c204274f..0b70567458fb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
54obj-$(CONFIG_FB_CT65550) += chipsfb.o 54obj-$(CONFIG_FB_CT65550) += chipsfb.o
55obj-$(CONFIG_FB_IMSTT) += imsttfb.o 55obj-$(CONFIG_FB_IMSTT) += imsttfb.o
56obj-$(CONFIG_FB_FM2) += fm2fb.o 56obj-$(CONFIG_FB_FM2) += fm2fb.o
57obj-$(CONFIG_FB_VT8623) += vt8623fb.o
57obj-$(CONFIG_FB_CYBLA) += cyblafb.o 58obj-$(CONFIG_FB_CYBLA) += cyblafb.o
58obj-$(CONFIG_FB_TRIDENT) += tridentfb.o 59obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
59obj-$(CONFIG_FB_LE80578) += vermilion/ 60obj-$(CONFIG_FB_LE80578) += vermilion/
60obj-$(CONFIG_FB_S3) += s3fb.o 61obj-$(CONFIG_FB_S3) += s3fb.o
62obj-$(CONFIG_FB_ARK) += arkfb.o
61obj-$(CONFIG_FB_STI) += stifb.o 63obj-$(CONFIG_FB_STI) += stifb.o
62obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o 64obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
63obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o 65obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
new file mode 100644
index 000000000000..ba6fede5c466
--- /dev/null
+++ b/drivers/video/arkfb.c
@@ -0,0 +1,1200 @@
1/*
2 * linux/drivers/video/arkfb.c -- Frame buffer device driver for ARK 2000PV
3 * with ICS 5342 dac (it is easy to add support for different dacs).
4 *
5 * Copyright (c) 2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb
12 */
13
14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/tty.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/fb.h>
24#include <linux/svga.h>
25#include <linux/init.h>
26#include <linux/pci.h>
27#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
28#include <video/vga.h>
29
30#ifdef CONFIG_MTRR
31#include <asm/mtrr.h>
32#endif
33
34struct arkfb_info {
35 int mclk_freq;
36 int mtrr_reg;
37
38 struct dac_info *dac;
39 struct vgastate state;
40 struct mutex open_lock;
41 unsigned int ref_count;
42 u32 pseudo_palette[16];
43};
44
45
46/* ------------------------------------------------------------------------- */
47
48
49static const struct svga_fb_format arkfb_formats[] = {
50 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
51 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 8},
52 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
53 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16},
54 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
55 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16},
56 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
57 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
58 {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
59 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
60 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
61 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
62 {24, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
63 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 8, 8},
64 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
65 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
66 SVGA_FORMAT_END
67};
68
69
70/* CRT timing register sets */
71
72static const struct vga_regset ark_h_total_regs[] = {{0x00, 0, 7}, {0x41, 7, 7}, VGA_REGSET_END};
73static const struct vga_regset ark_h_display_regs[] = {{0x01, 0, 7}, {0x41, 6, 6}, VGA_REGSET_END};
74static const struct vga_regset ark_h_blank_start_regs[] = {{0x02, 0, 7}, {0x41, 5, 5}, VGA_REGSET_END};
75static const struct vga_regset ark_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7 }, VGA_REGSET_END};
76static const struct vga_regset ark_h_sync_start_regs[] = {{0x04, 0, 7}, {0x41, 4, 4}, VGA_REGSET_END};
77static const struct vga_regset ark_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
78
79static const struct vga_regset ark_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x40, 7, 7}, VGA_REGSET_END};
80static const struct vga_regset ark_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x40, 6, 6}, VGA_REGSET_END};
81static const struct vga_regset ark_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x40, 5, 5}, VGA_REGSET_END};
82// const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 6}, VGA_REGSET_END};
83static const struct vga_regset ark_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
84static const struct vga_regset ark_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x40, 4, 4}, VGA_REGSET_END};
85static const struct vga_regset ark_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
86
87static const struct vga_regset ark_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, VGA_REGSET_END};
88static const struct vga_regset ark_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x40, 0, 2}, VGA_REGSET_END};
89static const struct vga_regset ark_offset_regs[] = {{0x13, 0, 7}, {0x41, 3, 3}, VGA_REGSET_END};
90
91static const struct svga_timing_regs ark_timing_regs = {
92 ark_h_total_regs, ark_h_display_regs, ark_h_blank_start_regs,
93 ark_h_blank_end_regs, ark_h_sync_start_regs, ark_h_sync_end_regs,
94 ark_v_total_regs, ark_v_display_regs, ark_v_blank_start_regs,
95 ark_v_blank_end_regs, ark_v_sync_start_regs, ark_v_sync_end_regs,
96};
97
98
99/* ------------------------------------------------------------------------- */
100
101
102/* Module parameters */
103
104static char *mode = "640x480-8@60";
105
106#ifdef CONFIG_MTRR
107static int mtrr = 1;
108#endif
109
110MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
111MODULE_LICENSE("GPL");
112MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
113
114module_param(mode, charp, 0444);
115MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
116
117#ifdef CONFIG_MTRR
118module_param(mtrr, int, 0444);
119MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
120#endif
121
122static int threshold = 4;
123
124module_param(threshold, int, 0644);
125MODULE_PARM_DESC(threshold, "FIFO threshold");
126
127
128/* ------------------------------------------------------------------------- */
129
130
131static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map)
132{
133 const u8 *font = map->data;
134 u8 __iomem *fb = (u8 __iomem *)info->screen_base;
135 int i, c;
136
137 if ((map->width != 8) || (map->height != 16) ||
138 (map->depth != 1) || (map->length != 256)) {
139 printk(KERN_ERR "fb%d: unsupported font parameters: width %d, "
140 "height %d, depth %d, length %d\n", info->node,
141 map->width, map->height, map->depth, map->length);
142 return;
143 }
144
145 fb += 2;
146 for (c = 0; c < map->length; c++) {
147 for (i = 0; i < map->height; i++) {
148 fb_writeb(font[i], &fb[i * 4]);
149 fb_writeb(font[i], &fb[i * 4 + (128 * 8)]);
150 }
151 fb += 128;
152
153 if ((c % 8) == 7)
154 fb += 128*8;
155
156 font += map->height;
157 }
158}
159
160static struct fb_tile_ops arkfb_tile_ops = {
161 .fb_settile = arkfb_settile,
162 .fb_tilecopy = svga_tilecopy,
163 .fb_tilefill = svga_tilefill,
164 .fb_tileblit = svga_tileblit,
165 .fb_tilecursor = svga_tilecursor,
166 .fb_get_tilemax = svga_get_tilemax,
167};
168
169
170/* ------------------------------------------------------------------------- */
171
172
173/* image data is MSB-first, fb structure is MSB-first too */
174static inline u32 expand_color(u32 c)
175{
176 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
177}
178
179/* arkfb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
180static void arkfb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
181{
182 u32 fg = expand_color(image->fg_color);
183 u32 bg = expand_color(image->bg_color);
184 const u8 *src1, *src;
185 u8 __iomem *dst1;
186 u32 __iomem *dst;
187 u32 val;
188 int x, y;
189
190 src1 = image->data;
191 dst1 = info->screen_base + (image->dy * info->fix.line_length)
192 + ((image->dx / 8) * 4);
193
194 for (y = 0; y < image->height; y++) {
195 src = src1;
196 dst = (u32 __iomem *) dst1;
197 for (x = 0; x < image->width; x += 8) {
198 val = *(src++) * 0x01010101;
199 val = (val & fg) | (~val & bg);
200 fb_writel(val, dst++);
201 }
202 src1 += image->width / 8;
203 dst1 += info->fix.line_length;
204 }
205
206}
207
208/* arkfb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
209static void arkfb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
210{
211 u32 fg = expand_color(rect->color);
212 u8 __iomem *dst1;
213 u32 __iomem *dst;
214 int x, y;
215
216 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
217 + ((rect->dx / 8) * 4);
218
219 for (y = 0; y < rect->height; y++) {
220 dst = (u32 __iomem *) dst1;
221 for (x = 0; x < rect->width; x += 8) {
222 fb_writel(fg, dst++);
223 }
224 dst1 += info->fix.line_length;
225 }
226
227}
228
229
230/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
231static inline u32 expand_pixel(u32 c)
232{
233 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
234 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
235}
236
237/* arkfb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
238static void arkfb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
239{
240 u32 fg = image->fg_color * 0x11111111;
241 u32 bg = image->bg_color * 0x11111111;
242 const u8 *src1, *src;
243 u8 __iomem *dst1;
244 u32 __iomem *dst;
245 u32 val;
246 int x, y;
247
248 src1 = image->data;
249 dst1 = info->screen_base + (image->dy * info->fix.line_length)
250 + ((image->dx / 8) * 4);
251
252 for (y = 0; y < image->height; y++) {
253 src = src1;
254 dst = (u32 __iomem *) dst1;
255 for (x = 0; x < image->width; x += 8) {
256 val = expand_pixel(*(src++));
257 val = (val & fg) | (~val & bg);
258 fb_writel(val, dst++);
259 }
260 src1 += image->width / 8;
261 dst1 += info->fix.line_length;
262 }
263
264}
265
266static void arkfb_imageblit(struct fb_info *info, const struct fb_image *image)
267{
268 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
269 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
270 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
271 arkfb_iplan_imageblit(info, image);
272 else
273 arkfb_cfb4_imageblit(info, image);
274 } else
275 cfb_imageblit(info, image);
276}
277
278static void arkfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
279{
280 if ((info->var.bits_per_pixel == 4)
281 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
282 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
283 arkfb_iplan_fillrect(info, rect);
284 else
285 cfb_fillrect(info, rect);
286}
287
288
289/* ------------------------------------------------------------------------- */
290
291
292enum
293{
294 DAC_PSEUDO8_8,
295 DAC_RGB1555_8,
296 DAC_RGB0565_8,
297 DAC_RGB0888_8,
298 DAC_RGB8888_8,
299 DAC_PSEUDO8_16,
300 DAC_RGB1555_16,
301 DAC_RGB0565_16,
302 DAC_RGB0888_16,
303 DAC_RGB8888_16,
304 DAC_MAX
305};
306
307struct dac_ops {
308 int (*dac_get_mode)(struct dac_info *info);
309 int (*dac_set_mode)(struct dac_info *info, int mode);
310 int (*dac_get_freq)(struct dac_info *info, int channel);
311 int (*dac_set_freq)(struct dac_info *info, int channel, u32 freq);
312 void (*dac_release)(struct dac_info *info);
313};
314
315typedef void (*dac_read_regs_t)(void *data, u8 *code, int count);
316typedef void (*dac_write_regs_t)(void *data, u8 *code, int count);
317
318struct dac_info
319{
320 struct dac_ops *dacops;
321 dac_read_regs_t dac_read_regs;
322 dac_write_regs_t dac_write_regs;
323 void *data;
324};
325
326
327static inline u8 dac_read_reg(struct dac_info *info, u8 reg)
328{
329 u8 code[2] = {reg, 0};
330 info->dac_read_regs(info->data, code, 1);
331 return code[1];
332}
333
334static inline void dac_read_regs(struct dac_info *info, u8 *code, int count)
335{
336 info->dac_read_regs(info->data, code, count);
337}
338
339static inline void dac_write_reg(struct dac_info *info, u8 reg, u8 val)
340{
341 u8 code[2] = {reg, val};
342 info->dac_write_regs(info->data, code, 1);
343}
344
345static inline void dac_write_regs(struct dac_info *info, u8 *code, int count)
346{
347 info->dac_write_regs(info->data, code, count);
348}
349
350static inline int dac_set_mode(struct dac_info *info, int mode)
351{
352 return info->dacops->dac_set_mode(info, mode);
353}
354
355static inline int dac_set_freq(struct dac_info *info, int channel, u32 freq)
356{
357 return info->dacops->dac_set_freq(info, channel, freq);
358}
359
360static inline void dac_release(struct dac_info *info)
361{
362 info->dacops->dac_release(info);
363}
364
365
366/* ------------------------------------------------------------------------- */
367
368
369/* ICS5342 DAC */
370
371struct ics5342_info
372{
373 struct dac_info dac;
374 u8 mode;
375};
376
377#define DAC_PAR(info) ((struct ics5342_info *) info)
378
379/* LSB is set to distinguish unused slots */
380static const u8 ics5342_mode_table[DAC_MAX] = {
381 [DAC_PSEUDO8_8] = 0x01, [DAC_RGB1555_8] = 0x21, [DAC_RGB0565_8] = 0x61,
382 [DAC_RGB0888_8] = 0x41, [DAC_PSEUDO8_16] = 0x11, [DAC_RGB1555_16] = 0x31,
383 [DAC_RGB0565_16] = 0x51, [DAC_RGB0888_16] = 0x91, [DAC_RGB8888_16] = 0x71
384};
385
386static int ics5342_set_mode(struct dac_info *info, int mode)
387{
388 u8 code;
389
390 if (mode >= DAC_MAX)
391 return -EINVAL;
392
393 code = ics5342_mode_table[mode];
394
395 if (! code)
396 return -EINVAL;
397
398 dac_write_reg(info, 6, code & 0xF0);
399 DAC_PAR(info)->mode = mode;
400
401 return 0;
402}
403
404static const struct svga_pll ics5342_pll = {3, 129, 3, 33, 0, 3,
405 60000, 250000, 14318};
406
407/* pd4 - allow only posdivider 4 (r=2) */
408static const struct svga_pll ics5342_pll_pd4 = {3, 129, 3, 33, 2, 2,
409 60000, 335000, 14318};
410
411/* 270 MHz should be upper bound for VCO clock according to specs,
412 but that is too restrictive in pd4 case */
413
414static int ics5342_set_freq(struct dac_info *info, int channel, u32 freq)
415{
416 u16 m, n, r;
417
418 /* only postdivider 4 (r=2) is valid in mode DAC_PSEUDO8_16 */
419 int rv = svga_compute_pll((DAC_PAR(info)->mode == DAC_PSEUDO8_16)
420 ? &ics5342_pll_pd4 : &ics5342_pll,
421 freq, &m, &n, &r, 0);
422
423 if (rv < 0) {
424 return -EINVAL;
425 } else {
426 u8 code[6] = {4, 3, 5, m-2, 5, (n-2) | (r << 5)};
427 dac_write_regs(info, code, 3);
428 return 0;
429 }
430}
431
432static void ics5342_release(struct dac_info *info)
433{
434 ics5342_set_mode(info, DAC_PSEUDO8_8);
435 kfree(info);
436}
437
438static struct dac_ops ics5342_ops = {
439 .dac_set_mode = ics5342_set_mode,
440 .dac_set_freq = ics5342_set_freq,
441 .dac_release = ics5342_release
442};
443
444
445static struct dac_info * ics5342_init(dac_read_regs_t drr, dac_write_regs_t dwr, void *data)
446{
447 struct dac_info *info = kzalloc(sizeof(struct ics5342_info), GFP_KERNEL);
448
449 if (! info)
450 return NULL;
451
452 info->dacops = &ics5342_ops;
453 info->dac_read_regs = drr;
454 info->dac_write_regs = dwr;
455 info->data = data;
456 DAC_PAR(info)->mode = DAC_PSEUDO8_8; /* estimation */
457 return info;
458}
459
460
461/* ------------------------------------------------------------------------- */
462
463
464static unsigned short dac_regs[4] = {0x3c8, 0x3c9, 0x3c6, 0x3c7};
465
466static void ark_dac_read_regs(void *data, u8 *code, int count)
467{
468 u8 regval = vga_rseq(NULL, 0x1C);
469
470 while (count != 0)
471 {
472 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
473 code[1] = vga_r(NULL, dac_regs[code[0] & 3]);
474 count--;
475 code += 2;
476 }
477
478 vga_wseq(NULL, 0x1C, regval);
479}
480
481static void ark_dac_write_regs(void *data, u8 *code, int count)
482{
483 u8 regval = vga_rseq(NULL, 0x1C);
484
485 while (count != 0)
486 {
487 vga_wseq(NULL, 0x1C, regval | (code[0] & 4) ? 0x80 : 0);
488 vga_w(NULL, dac_regs[code[0] & 3], code[1]);
489 count--;
490 code += 2;
491 }
492
493 vga_wseq(NULL, 0x1C, regval);
494}
495
496
497static void ark_set_pixclock(struct fb_info *info, u32 pixclock)
498{
499 struct arkfb_info *par = info->par;
500 u8 regval;
501
502 int rv = dac_set_freq(par->dac, 0, 1000000000 / pixclock);
503 if (rv < 0) {
504 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
505 return;
506 }
507
508 /* Set VGA misc register */
509 regval = vga_r(NULL, VGA_MIS_R);
510 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
511}
512
513
514/* Open framebuffer */
515
516static int arkfb_open(struct fb_info *info, int user)
517{
518 struct arkfb_info *par = info->par;
519
520 mutex_lock(&(par->open_lock));
521 if (par->ref_count == 0) {
522 memset(&(par->state), 0, sizeof(struct vgastate));
523 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
524 par->state.num_crtc = 0x60;
525 par->state.num_seq = 0x30;
526 save_vga(&(par->state));
527 }
528
529 par->ref_count++;
530 mutex_unlock(&(par->open_lock));
531
532 return 0;
533}
534
535/* Close framebuffer */
536
537static int arkfb_release(struct fb_info *info, int user)
538{
539 struct arkfb_info *par = info->par;
540
541 mutex_lock(&(par->open_lock));
542 if (par->ref_count == 0) {
543 mutex_unlock(&(par->open_lock));
544 return -EINVAL;
545 }
546
547 if (par->ref_count == 1) {
548 restore_vga(&(par->state));
549 dac_set_mode(par->dac, DAC_PSEUDO8_8);
550 }
551
552 par->ref_count--;
553 mutex_unlock(&(par->open_lock));
554
555 return 0;
556}
557
558/* Validate passed in var */
559
560static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
561{
562 int rv, mem, step;
563
564 /* Find appropriate format */
565 rv = svga_match_format (arkfb_formats, var, NULL);
566 if (rv < 0)
567 {
568 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
569 return rv;
570 }
571
572 /* Do not allow to have real resoulution larger than virtual */
573 if (var->xres > var->xres_virtual)
574 var->xres_virtual = var->xres;
575
576 if (var->yres > var->yres_virtual)
577 var->yres_virtual = var->yres;
578
579 /* Round up xres_virtual to have proper alignment of lines */
580 step = arkfb_formats[rv].xresstep - 1;
581 var->xres_virtual = (var->xres_virtual+step) & ~step;
582
583
584 /* Check whether have enough memory */
585 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
586 if (mem > info->screen_size)
587 {
588 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
589 return -EINVAL;
590 }
591
592 rv = svga_check_timings (&ark_timing_regs, var, info->node);
593 if (rv < 0)
594 {
595 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
596 return rv;
597 }
598
599 /* Interlaced mode is broken */
600 if (var->vmode & FB_VMODE_INTERLACED)
601 return -EINVAL;
602
603 return 0;
604}
605
606/* Set video mode from par */
607
608static int arkfb_set_par(struct fb_info *info)
609{
610 struct arkfb_info *par = info->par;
611 u32 value, mode, hmul, hdiv, offset_value, screen_size;
612 u32 bpp = info->var.bits_per_pixel;
613 u8 regval;
614
615 if (bpp != 0) {
616 info->fix.ypanstep = 1;
617 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
618
619 info->flags &= ~FBINFO_MISC_TILEBLITTING;
620 info->tileops = NULL;
621
622 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
623 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
624 info->pixmap.blit_y = ~(u32)0;
625
626 offset_value = (info->var.xres_virtual * bpp) / 64;
627 screen_size = info->var.yres_virtual * info->fix.line_length;
628 } else {
629 info->fix.ypanstep = 16;
630 info->fix.line_length = 0;
631
632 info->flags |= FBINFO_MISC_TILEBLITTING;
633 info->tileops = &arkfb_tile_ops;
634
635 /* supports 8x16 tiles only */
636 info->pixmap.blit_x = 1 << (8 - 1);
637 info->pixmap.blit_y = 1 << (16 - 1);
638
639 offset_value = info->var.xres_virtual / 16;
640 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
641 }
642
643 info->var.xoffset = 0;
644 info->var.yoffset = 0;
645 info->var.activate = FB_ACTIVATE_NOW;
646
647 /* Unlock registers */
648 svga_wcrt_mask(0x11, 0x00, 0x80);
649
650 /* Blank screen and turn off sync */
651 svga_wseq_mask(0x01, 0x20, 0x20);
652 svga_wcrt_mask(0x17, 0x00, 0x80);
653
654 /* Set default values */
655 svga_set_default_gfx_regs();
656 svga_set_default_atc_regs();
657 svga_set_default_seq_regs();
658 svga_set_default_crt_regs();
659 svga_wcrt_multi(ark_line_compare_regs, 0xFFFFFFFF);
660 svga_wcrt_multi(ark_start_address_regs, 0);
661
662 /* ARK specific initialization */
663 svga_wseq_mask(0x10, 0x1F, 0x1F); /* enable linear framebuffer and full memory access */
664 svga_wseq_mask(0x12, 0x03, 0x03); /* 4 MB linear framebuffer size */
665
666 vga_wseq(NULL, 0x13, info->fix.smem_start >> 16);
667 vga_wseq(NULL, 0x14, info->fix.smem_start >> 24);
668 vga_wseq(NULL, 0x15, 0);
669 vga_wseq(NULL, 0x16, 0);
670
671 /* Set the FIFO threshold register */
672 /* It is fascinating way to store 5-bit value in 8-bit register */
673 regval = 0x10 | ((threshold & 0x0E) >> 1) | (threshold & 0x01) << 7 | (threshold & 0x10) << 1;
674 vga_wseq(NULL, 0x18, regval);
675
676 /* Set the offset register */
677 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
678 svga_wcrt_multi(ark_offset_regs, offset_value);
679
680 /* fix for hi-res textmode */
681 svga_wcrt_mask(0x40, 0x08, 0x08);
682
683 if (info->var.vmode & FB_VMODE_DOUBLE)
684 svga_wcrt_mask(0x09, 0x80, 0x80);
685 else
686 svga_wcrt_mask(0x09, 0x00, 0x80);
687
688 if (info->var.vmode & FB_VMODE_INTERLACED)
689 svga_wcrt_mask(0x44, 0x04, 0x04);
690 else
691 svga_wcrt_mask(0x44, 0x00, 0x04);
692
693 hmul = 1;
694 hdiv = 1;
695 mode = svga_match_format(arkfb_formats, &(info->var), &(info->fix));
696
697 /* Set mode-specific register values */
698 switch (mode) {
699 case 0:
700 pr_debug("fb%d: text mode\n", info->node);
701 svga_set_textmode_vga_regs();
702
703 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
704 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
705 dac_set_mode(par->dac, DAC_PSEUDO8_8);
706
707 break;
708 case 1:
709 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
710 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
711
712 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
713 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
714 dac_set_mode(par->dac, DAC_PSEUDO8_8);
715 break;
716 case 2:
717 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
718
719 vga_wseq(NULL, 0x11, 0x10); /* basic VGA mode */
720 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
721 dac_set_mode(par->dac, DAC_PSEUDO8_8);
722 break;
723 case 3:
724 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
725
726 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode */
727
728 if (info->var.pixclock > 20000) {
729 pr_debug("fb%d: not using multiplex\n", info->node);
730 svga_wcrt_mask(0x46, 0x00, 0x04); /* 8bit pixel path */
731 dac_set_mode(par->dac, DAC_PSEUDO8_8);
732 } else {
733 pr_debug("fb%d: using multiplex\n", info->node);
734 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
735 dac_set_mode(par->dac, DAC_PSEUDO8_16);
736 hdiv = 2;
737 }
738 break;
739 case 4:
740 pr_debug("fb%d: 5/5/5 truecolor\n", info->node);
741
742 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
743 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
744 dac_set_mode(par->dac, DAC_RGB1555_16);
745 break;
746 case 5:
747 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
748
749 vga_wseq(NULL, 0x11, 0x1A); /* 16bpp accel mode */
750 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
751 dac_set_mode(par->dac, DAC_RGB0565_16);
752 break;
753 case 6:
754 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
755
756 vga_wseq(NULL, 0x11, 0x16); /* 8bpp accel mode ??? */
757 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
758 dac_set_mode(par->dac, DAC_RGB0888_16);
759 hmul = 3;
760 hdiv = 2;
761 break;
762 case 7:
763 pr_debug("fb%d: 8/8/8/8 truecolor\n", info->node);
764
765 vga_wseq(NULL, 0x11, 0x1E); /* 32bpp accel mode */
766 svga_wcrt_mask(0x46, 0x04, 0x04); /* 16bit pixel path */
767 dac_set_mode(par->dac, DAC_RGB8888_16);
768 hmul = 2;
769 break;
770 default:
771 printk(KERN_ERR "fb%d: unsupported mode - bug\n", info->node);
772 return -EINVAL;
773 }
774
775 ark_set_pixclock(info, (hdiv * info->var.pixclock) / hmul);
776 svga_set_timings(&ark_timing_regs, &(info->var), hmul, hdiv,
777 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1,
778 (info->var.vmode & FB_VMODE_INTERLACED) ? 2 : 1,
779 hmul, info->node);
780
781 /* Set interlaced mode start/end register */
782 value = info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len;
783 value = ((value * hmul / hdiv) / 8) - 5;
784 vga_wcrt(NULL, 0x42, (value + 1) / 2);
785
786 memset_io(info->screen_base, 0x00, screen_size);
787 /* Device and screen back on */
788 svga_wcrt_mask(0x17, 0x80, 0x80);
789 svga_wseq_mask(0x01, 0x00, 0x20);
790
791 return 0;
792}
793
794/* Set a colour register */
795
796static int arkfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
797 u_int transp, struct fb_info *fb)
798{
799 switch (fb->var.bits_per_pixel) {
800 case 0:
801 case 4:
802 if (regno >= 16)
803 return -EINVAL;
804
805 if ((fb->var.bits_per_pixel == 4) &&
806 (fb->var.nonstd == 0)) {
807 outb(0xF0, VGA_PEL_MSK);
808 outb(regno*16, VGA_PEL_IW);
809 } else {
810 outb(0x0F, VGA_PEL_MSK);
811 outb(regno, VGA_PEL_IW);
812 }
813 outb(red >> 10, VGA_PEL_D);
814 outb(green >> 10, VGA_PEL_D);
815 outb(blue >> 10, VGA_PEL_D);
816 break;
817 case 8:
818 if (regno >= 256)
819 return -EINVAL;
820
821 outb(0xFF, VGA_PEL_MSK);
822 outb(regno, VGA_PEL_IW);
823 outb(red >> 10, VGA_PEL_D);
824 outb(green >> 10, VGA_PEL_D);
825 outb(blue >> 10, VGA_PEL_D);
826 break;
827 case 16:
828 if (regno >= 16)
829 return 0;
830
831 if (fb->var.green.length == 5)
832 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
833 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
834 else if (fb->var.green.length == 6)
835 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
836 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
837 else
838 return -EINVAL;
839 break;
840 case 24:
841 case 32:
842 if (regno >= 16)
843 return 0;
844
845 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
846 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
847 break;
848 default:
849 return -EINVAL;
850 }
851
852 return 0;
853}
854
855/* Set the display blanking state */
856
857static int arkfb_blank(int blank_mode, struct fb_info *info)
858{
859 switch (blank_mode) {
860 case FB_BLANK_UNBLANK:
861 pr_debug("fb%d: unblank\n", info->node);
862 svga_wseq_mask(0x01, 0x00, 0x20);
863 svga_wcrt_mask(0x17, 0x80, 0x80);
864 break;
865 case FB_BLANK_NORMAL:
866 pr_debug("fb%d: blank\n", info->node);
867 svga_wseq_mask(0x01, 0x20, 0x20);
868 svga_wcrt_mask(0x17, 0x80, 0x80);
869 break;
870 case FB_BLANK_POWERDOWN:
871 case FB_BLANK_HSYNC_SUSPEND:
872 case FB_BLANK_VSYNC_SUSPEND:
873 pr_debug("fb%d: sync down\n", info->node);
874 svga_wseq_mask(0x01, 0x20, 0x20);
875 svga_wcrt_mask(0x17, 0x00, 0x80);
876 break;
877 }
878 return 0;
879}
880
881
882/* Pan the display */
883
884static int arkfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
885{
886 unsigned int offset;
887
888 /* Calculate the offset */
889 if (var->bits_per_pixel == 0) {
890 offset = (var->yoffset / 16) * (var->xres_virtual / 2) + (var->xoffset / 2);
891 offset = offset >> 2;
892 } else {
893 offset = (var->yoffset * info->fix.line_length) +
894 (var->xoffset * var->bits_per_pixel / 8);
895 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 3);
896 }
897
898 /* Set the offset */
899 svga_wcrt_multi(ark_start_address_regs, offset);
900
901 return 0;
902}
903
904
905/* ------------------------------------------------------------------------- */
906
907
908/* Frame buffer operations */
909
910static struct fb_ops arkfb_ops = {
911 .owner = THIS_MODULE,
912 .fb_open = arkfb_open,
913 .fb_release = arkfb_release,
914 .fb_check_var = arkfb_check_var,
915 .fb_set_par = arkfb_set_par,
916 .fb_setcolreg = arkfb_setcolreg,
917 .fb_blank = arkfb_blank,
918 .fb_pan_display = arkfb_pan_display,
919 .fb_fillrect = arkfb_fillrect,
920 .fb_copyarea = cfb_copyarea,
921 .fb_imageblit = arkfb_imageblit,
922 .fb_get_caps = svga_get_caps,
923};
924
925
926/* ------------------------------------------------------------------------- */
927
928
929/* PCI probe */
930static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
931{
932 struct fb_info *info;
933 struct arkfb_info *par;
934 int rc;
935 u8 regval;
936
937 /* Ignore secondary VGA device because there is no VGA arbitration */
938 if (! svga_primary_device(dev)) {
939 dev_info(&(dev->dev), "ignoring secondary device\n");
940 return -ENODEV;
941 }
942
943 /* Allocate and fill driver data structure */
944 info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
945 if (! info) {
946 dev_err(&(dev->dev), "cannot allocate memory\n");
947 return -ENOMEM;
948 }
949
950 par = info->par;
951 mutex_init(&par->open_lock);
952
953 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
954 info->fbops = &arkfb_ops;
955
956 /* Prepare PCI device */
957 rc = pci_enable_device(dev);
958 if (rc < 0) {
959 dev_err(&(dev->dev), "cannot enable PCI device\n");
960 goto err_enable_device;
961 }
962
963 rc = pci_request_regions(dev, "arkfb");
964 if (rc < 0) {
965 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
966 goto err_request_regions;
967 }
968
969 par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
970 if (! par->dac) {
971 rc = -ENOMEM;
972 dev_err(&(dev->dev), "RAMDAC initialization failed\n");
973 goto err_dac;
974 }
975
976 info->fix.smem_start = pci_resource_start(dev, 0);
977 info->fix.smem_len = pci_resource_len(dev, 0);
978
979 /* Map physical IO memory address into kernel space */
980 info->screen_base = pci_iomap(dev, 0, 0);
981 if (! info->screen_base) {
982 rc = -ENOMEM;
983 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
984 goto err_iomap;
985 }
986
987 /* FIXME get memsize */
988 regval = vga_rseq(NULL, 0x10);
989 info->screen_size = (1 << (regval >> 6)) << 20;
990 info->fix.smem_len = info->screen_size;
991
992 strcpy(info->fix.id, "ARK 2000PV");
993 info->fix.mmio_start = 0;
994 info->fix.mmio_len = 0;
995 info->fix.type = FB_TYPE_PACKED_PIXELS;
996 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
997 info->fix.ypanstep = 0;
998 info->fix.accel = FB_ACCEL_NONE;
999 info->pseudo_palette = (void*) (par->pseudo_palette);
1000
1001 /* Prepare startup mode */
1002 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
1003 if (! ((rc == 1) || (rc == 2))) {
1004 rc = -EINVAL;
1005 dev_err(&(dev->dev), "mode %s not found\n", mode);
1006 goto err_find_mode;
1007 }
1008
1009 rc = fb_alloc_cmap(&info->cmap, 256, 0);
1010 if (rc < 0) {
1011 dev_err(&(dev->dev), "cannot allocate colormap\n");
1012 goto err_alloc_cmap;
1013 }
1014
1015 rc = register_framebuffer(info);
1016 if (rc < 0) {
1017 dev_err(&(dev->dev), "cannot register framebugger\n");
1018 goto err_reg_fb;
1019 }
1020
1021 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
1022 pci_name(dev), info->fix.smem_len >> 20);
1023
1024 /* Record a reference to the driver data */
1025 pci_set_drvdata(dev, info);
1026
1027#ifdef CONFIG_MTRR
1028 if (mtrr) {
1029 par->mtrr_reg = -1;
1030 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
1031 }
1032#endif
1033
1034 return 0;
1035
1036 /* Error handling */
1037err_reg_fb:
1038 fb_dealloc_cmap(&info->cmap);
1039err_alloc_cmap:
1040err_find_mode:
1041 pci_iounmap(dev, info->screen_base);
1042err_iomap:
1043 dac_release(par->dac);
1044err_dac:
1045 pci_release_regions(dev);
1046err_request_regions:
1047/* pci_disable_device(dev); */
1048err_enable_device:
1049 framebuffer_release(info);
1050 return rc;
1051}
1052
1053/* PCI remove */
1054
1055static void __devexit ark_pci_remove(struct pci_dev *dev)
1056{
1057 struct fb_info *info = pci_get_drvdata(dev);
1058 struct arkfb_info *par = info->par;
1059
1060 if (info) {
1061#ifdef CONFIG_MTRR
1062 if (par->mtrr_reg >= 0) {
1063 mtrr_del(par->mtrr_reg, 0, 0);
1064 par->mtrr_reg = -1;
1065 }
1066#endif
1067
1068 dac_release(par->dac);
1069 unregister_framebuffer(info);
1070 fb_dealloc_cmap(&info->cmap);
1071
1072 pci_iounmap(dev, info->screen_base);
1073 pci_release_regions(dev);
1074/* pci_disable_device(dev); */
1075
1076 pci_set_drvdata(dev, NULL);
1077 framebuffer_release(info);
1078 }
1079}
1080
1081
1082#ifdef CONFIG_PM
1083/* PCI suspend */
1084
1085static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
1086{
1087 struct fb_info *info = pci_get_drvdata(dev);
1088 struct arkfb_info *par = info->par;
1089
1090 dev_info(&(dev->dev), "suspend\n");
1091
1092 acquire_console_sem();
1093 mutex_lock(&(par->open_lock));
1094
1095 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
1096 mutex_unlock(&(par->open_lock));
1097 release_console_sem();
1098 return 0;
1099 }
1100
1101 fb_set_suspend(info, 1);
1102
1103 pci_save_state(dev);
1104 pci_disable_device(dev);
1105 pci_set_power_state(dev, pci_choose_state(dev, state));
1106
1107 mutex_unlock(&(par->open_lock));
1108 release_console_sem();
1109
1110 return 0;
1111}
1112
1113
1114/* PCI resume */
1115
1116static int ark_pci_resume (struct pci_dev* dev)
1117{
1118 struct fb_info *info = pci_get_drvdata(dev);
1119 struct arkfb_info *par = info->par;
1120
1121 dev_info(&(dev->dev), "resume\n");
1122
1123 acquire_console_sem();
1124 mutex_lock(&(par->open_lock));
1125
1126 if (par->ref_count == 0) {
1127 mutex_unlock(&(par->open_lock));
1128 release_console_sem();
1129 return 0;
1130 }
1131
1132 pci_set_power_state(dev, PCI_D0);
1133 pci_restore_state(dev);
1134
1135 if (pci_enable_device(dev))
1136 goto fail;
1137
1138 pci_set_master(dev);
1139
1140 arkfb_set_par(info);
1141 fb_set_suspend(info, 0);
1142
1143 mutex_unlock(&(par->open_lock));
1144fail:
1145 release_console_sem();
1146 return 0;
1147}
1148#else
1149#define ark_pci_suspend NULL
1150#define ark_pci_resume NULL
1151#endif /* CONFIG_PM */
1152
1153/* List of boards that we are trying to support */
1154
1155static struct pci_device_id ark_devices[] __devinitdata = {
1156 {PCI_DEVICE(0xEDD8, 0xA099)},
1157 {0, 0, 0, 0, 0, 0, 0}
1158};
1159
1160
1161MODULE_DEVICE_TABLE(pci, ark_devices);
1162
1163static struct pci_driver arkfb_pci_driver = {
1164 .name = "arkfb",
1165 .id_table = ark_devices,
1166 .probe = ark_pci_probe,
1167 .remove = __devexit_p(ark_pci_remove),
1168 .suspend = ark_pci_suspend,
1169 .resume = ark_pci_resume,
1170};
1171
1172/* Cleanup */
1173
1174static void __exit arkfb_cleanup(void)
1175{
1176 pr_debug("arkfb: cleaning up\n");
1177 pci_unregister_driver(&arkfb_pci_driver);
1178}
1179
1180/* Driver Initialisation */
1181
1182static int __init arkfb_init(void)
1183{
1184
1185#ifndef MODULE
1186 char *option = NULL;
1187
1188 if (fb_get_options("arkfb", &option))
1189 return -ENODEV;
1190
1191 if (option && *option)
1192 mode = option;
1193#endif
1194
1195 pr_debug("arkfb: initializing\n");
1196 return pci_register_driver(&arkfb_pci_driver);
1197}
1198
1199module_init(arkfb_init);
1200module_exit(arkfb_cleanup);
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index f577bd80e020..03cfb7ac5733 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/video/softcursor.c 2 * linux/drivers/video/console/softcursor.c
3 * 3 *
4 * Generic software cursor for frame buffer devices 4 * Generic software cursor for frame buffer devices
5 * 5 *
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 08d4e11d9121..38c2e2558f5e 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1236,6 +1236,10 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__) 1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1239#elif defined(__avr32__)
1240 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
1241 & ~_PAGE_CACHABLE)
1242 | (_PAGE_BUFFER | _PAGE_DIRTY));
1239#elif defined(__ia64__) 1243#elif defined(__ia64__)
1240 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start)) 1244 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
1241 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 1245 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 7e760197cf29..1a7d7789d877 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1717,7 +1717,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
1717 * @info: pointer to device specific info structure 1717 * @info: pointer to device specific info structure
1718 * 1718 *
1719 * DESCRIPTION: 1719 * DESCRIPTION:
1720 * Sets the the user monitor's horizontal and vertical 1720 * Sets the user monitor's horizontal and vertical
1721 * frequency limits 1721 * frequency limits
1722 */ 1722 */
1723static void __devinit i810_init_monspecs(struct fb_info *info) 1723static void __devinit i810_init_monspecs(struct fb_info *info)
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
index a5690a5f29d5..9445cdb759b1 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/matrox/matroxfb_Ti3026.c
@@ -72,7 +72,7 @@
72 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 72 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
73 * 73 *
74 * (following author is not in any relation with this code, but his ideas 74 * (following author is not in any relation with this code, but his ideas
75 * were used when writting this driver) 75 * were used when writing this driver)
76 * 76 *
77 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 77 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
78 * 78 *
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index a5c825d99466..c57aaadf410c 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -70,7 +70,7 @@
70 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 70 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
71 * 71 *
72 * (following author is not in any relation with this code, but his ideas 72 * (following author is not in any relation with this code, but his ideas
73 * were used when writting this driver) 73 * were used when writing this driver)
74 * 74 *
75 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 75 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
76 * 76 *
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index cb2aa402ddfd..c8559a756b75 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -93,7 +93,7 @@
93 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 93 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
94 * 94 *
95 * (following author is not in any relation with this code, but his ideas 95 * (following author is not in any relation with this code, but his ideas
96 * were used when writting this driver) 96 * were used when writing this driver)
97 * 97 *
98 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 98 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
99 * 99 *
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index 18886b629cb1..5948e54b9ef9 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -78,7 +78,7 @@
78 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> 78 * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de>
79 * 79 *
80 * (following author is not in any relation with this code, but his ideas 80 * (following author is not in any relation with this code, but his ideas
81 * were used when writting this driver) 81 * were used when writing this driver)
82 * 82 *
83 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> 83 * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk>
84 * 84 *
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index f297c7b14a41..c627955aa124 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -149,8 +149,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
149 pll = NV_RD32(par->PMC, 0x4024); 149 pll = NV_RD32(par->PMC, 0x4024);
150 M = pll & 0xFF; 150 M = pll & 0xFF;
151 N = (pll >> 8) & 0xFF; 151 N = (pll >> 8) & 0xFF;
152 if (((par->Chipset & 0xfff0) == 0x0290) || 152 if (((par->Chipset & 0xfff0) == 0x0290) || ((par->Chipset & 0xfff0) == 0x0390) || ((par->Chipset & 0xfff0) == 0x02E0)) {
153 ((par->Chipset & 0xfff0) == 0x0390)) {
154 MB = 1; 153 MB = 1;
155 NB = 1; 154 NB = 1;
156 } else { 155 } else {
@@ -963,6 +962,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
963 962
964 if (((par->Chipset & 0xfff0) == 0x0090) || 963 if (((par->Chipset & 0xfff0) == 0x0090) ||
965 ((par->Chipset & 0xfff0) == 0x01D0) || 964 ((par->Chipset & 0xfff0) == 0x01D0) ||
965 ((par->Chipset & 0xfff0) == 0x02E0) ||
966 ((par->Chipset & 0xfff0) == 0x0290)) 966 ((par->Chipset & 0xfff0) == 0x0290))
967 regions = 15; 967 regions = 15;
968 for(i = 0; i < regions; i++) { 968 for(i = 0; i < regions; i++) {
@@ -1275,6 +1275,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1275 0x00100000); 1275 0x00100000);
1276 break; 1276 break;
1277 case 0x0090: 1277 case 0x0090:
1278 case 0x02E0:
1278 case 0x0290: 1279 case 0x0290:
1279 NV_WR32(par->PRAMDAC, 0x0608, 1280 NV_WR32(par->PRAMDAC, 0x0608,
1280 NV_RD32(par->PRAMDAC, 0x0608) | 1281 NV_RD32(par->PRAMDAC, 0x0608) |
@@ -1352,6 +1353,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1352 } else { 1353 } else {
1353 if (((par->Chipset & 0xfff0) == 0x0090) || 1354 if (((par->Chipset & 0xfff0) == 0x0090) ||
1354 ((par->Chipset & 0xfff0) == 0x01D0) || 1355 ((par->Chipset & 0xfff0) == 0x01D0) ||
1356 ((par->Chipset & 0xfff0) == 0x02E0) ||
1355 ((par->Chipset & 0xfff0) == 0x0290)) { 1357 ((par->Chipset & 0xfff0) == 0x0290)) {
1356 for (i = 0; i < 60; i++) { 1358 for (i = 0; i < 60; i++) {
1357 NV_WR32(par->PGRAPH, 1359 NV_WR32(par->PGRAPH,
@@ -1403,6 +1405,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1403 } else { 1405 } else {
1404 if ((par->Chipset & 0xfff0) == 0x0090 || 1406 if ((par->Chipset & 0xfff0) == 0x0090 ||
1405 (par->Chipset & 0xfff0) == 0x01D0 || 1407 (par->Chipset & 0xfff0) == 0x01D0 ||
1408 (par->Chipset & 0xfff0) == 0x02E0 ||
1406 (par->Chipset & 0xfff0) == 0x0290) { 1409 (par->Chipset & 0xfff0) == 0x0290) {
1407 NV_WR32(par->PGRAPH, 0x0DF0, 1410 NV_WR32(par->PGRAPH, 0x0DF0,
1408 NV_RD32(par->PFB, 0x0200)); 1411 NV_RD32(par->PFB, 0x0200));
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 7c36b5fe582e..f85edf084da3 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1243,6 +1243,7 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1243 case 0x0140: /* GeForce 6600 */ 1243 case 0x0140: /* GeForce 6600 */
1244 case 0x0160: /* GeForce 6200 */ 1244 case 0x0160: /* GeForce 6200 */
1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */ 1245 case 0x01D0: /* GeForce 7200, 7300, 7400 */
1246 case 0x02E0: /* GeForce 7300 GT */
1246 case 0x0090: /* GeForce 7800 */ 1247 case 0x0090: /* GeForce 7800 */
1247 case 0x0210: /* GeForce 6800 */ 1248 case 0x0210: /* GeForce 6800 */
1248 case 0x0220: /* GeForce 6200 */ 1249 case 0x0220: /* GeForce 6200 */
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 756fafb41d78..d11735895a01 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -796,23 +796,6 @@ static int s3fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
796 return 0; 796 return 0;
797} 797}
798 798
799/* Get capabilities of accelerator based on the mode */
800
801static void s3fb_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
802 struct fb_var_screeninfo *var)
803{
804 if (var->bits_per_pixel == 0) {
805 /* can only support 256 8x16 bitmap */
806 caps->x = 1 << (8 - 1);
807 caps->y = 1 << (16 - 1);
808 caps->len = 256;
809 } else {
810 caps->x = ~(u32)0;
811 caps->y = ~(u32)0;
812 caps->len = ~(u32)0;
813 }
814}
815
816/* ------------------------------------------------------------------------- */ 799/* ------------------------------------------------------------------------- */
817 800
818/* Frame buffer operations */ 801/* Frame buffer operations */
@@ -829,7 +812,7 @@ static struct fb_ops s3fb_ops = {
829 .fb_fillrect = s3fb_fillrect, 812 .fb_fillrect = s3fb_fillrect,
830 .fb_copyarea = cfb_copyarea, 813 .fb_copyarea = cfb_copyarea,
831 .fb_imageblit = s3fb_imageblit, 814 .fb_imageblit = s3fb_imageblit,
832 .fb_get_caps = s3fb_get_caps, 815 .fb_get_caps = svga_get_caps,
833}; 816};
834 817
835/* ------------------------------------------------------------------------- */ 818/* ------------------------------------------------------------------------- */
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 842b5cd054c6..836a612af977 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -14,7 +14,7 @@
14 * of it. 14 * of it.
15 * 15 *
16 * First the roles of struct fb_info and struct display have changed. Struct 16 * First the roles of struct fb_info and struct display have changed. Struct
17 * display will go away. The way the the new framebuffer console code will 17 * display will go away. The way the new framebuffer console code will
18 * work is that it will act to translate data about the tty/console in 18 * work is that it will act to translate data about the tty/console in
19 * struct vc_data to data in a device independent way in struct fb_info. Then 19 * struct vc_data to data in a device independent way in struct fb_info. Then
20 * various functions in struct fb_ops will be called to store the device 20 * various functions in struct fb_ops will be called to store the device
diff --git a/drivers/video/svgalib.c b/drivers/video/svgalib.c
index 079cdc911e48..25df928d37d8 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/svgalib.c
@@ -347,6 +347,23 @@ int svga_get_tilemax(struct fb_info *info)
347 return 256; 347 return 256;
348} 348}
349 349
350/* Get capabilities of accelerator based on the mode */
351
352void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
353 struct fb_var_screeninfo *var)
354{
355 if (var->bits_per_pixel == 0) {
356 /* can only support 256 8x16 bitmap */
357 caps->x = 1 << (8 - 1);
358 caps->y = 1 << (16 - 1);
359 caps->len = 256;
360 } else {
361 caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
362 caps->y = ~(u32)0;
363 caps->len = ~(u32)0;
364 }
365}
366EXPORT_SYMBOL(svga_get_caps);
350 367
351/* ------------------------------------------------------------------------- */ 368/* ------------------------------------------------------------------------- */
352 369
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
new file mode 100644
index 000000000000..5e9755e464a1
--- /dev/null
+++ b/drivers/video/vt8623fb.c
@@ -0,0 +1,927 @@
1/*
2 * linux/drivers/video/vt8623fb.c - fbdev driver for
3 * integrated graphic core in VIA VT8623 [CLE266] chipset
4 *
5 * Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file COPYING in the main directory of this archive for
9 * more details.
10 *
11 * Code is based on s3fb, some parts are from David Boucher's viafb
12 * (http://davesdomain.org.uk/viafb/)
13 */
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/tty.h>
22#include <linux/slab.h>
23#include <linux/delay.h>
24#include <linux/fb.h>
25#include <linux/svga.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
29#include <video/vga.h>
30
31#ifdef CONFIG_MTRR
32#include <asm/mtrr.h>
33#endif
34
35struct vt8623fb_info {
36 char __iomem *mmio_base;
37 int mtrr_reg;
38 struct vgastate state;
39 struct mutex open_lock;
40 unsigned int ref_count;
41 u32 pseudo_palette[16];
42};
43
44
45
46/* ------------------------------------------------------------------------- */
47
48static const struct svga_fb_format vt8623fb_formats[] = {
49 { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
50 FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP8, FB_VISUAL_PSEUDOCOLOR, 16, 16},
51 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
52 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 16, 16},
53 { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1,
54 FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 16, 16},
55 { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0,
56 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 8},
57/* {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0,
58 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4}, */
59 {16, {11, 5, 0}, {5, 6, 0}, {0, 5, 0}, {0, 0, 0}, 0,
60 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 4, 4},
61 {32, {16, 8, 0}, {8, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0,
62 FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 2},
63 SVGA_FORMAT_END
64};
65
66static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
67 60000, 300000, 14318};
68
69/* CRT timing register sets */
70
71struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
72struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
73struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
74struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
75struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
76struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
77
78struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
79struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
80struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
81struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
82struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
83struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
84
85struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
86struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
87struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
88struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
89
90struct svga_timing_regs vt8623_timing_regs = {
91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
94 vt8623_v_blank_end_regs, vt8623_v_sync_start_regs, vt8623_v_sync_end_regs,
95};
96
97
98/* ------------------------------------------------------------------------- */
99
100
101/* Module parameters */
102
103static char *mode = "640x480-8@60";
104
105#ifdef CONFIG_MTRR
106static int mtrr = 1;
107#endif
108
109MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
110MODULE_LICENSE("GPL");
111MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
112
113module_param(mode, charp, 0644);
114MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
115
116#ifdef CONFIG_MTRR
117module_param(mtrr, int, 0444);
118MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
119#endif
120
121
122/* ------------------------------------------------------------------------- */
123
124
125static struct fb_tile_ops vt8623fb_tile_ops = {
126 .fb_settile = svga_settile,
127 .fb_tilecopy = svga_tilecopy,
128 .fb_tilefill = svga_tilefill,
129 .fb_tileblit = svga_tileblit,
130 .fb_tilecursor = svga_tilecursor,
131 .fb_get_tilemax = svga_get_tilemax,
132};
133
134
135/* ------------------------------------------------------------------------- */
136
137
138/* image data is MSB-first, fb structure is MSB-first too */
139static inline u32 expand_color(u32 c)
140{
141 return ((c & 1) | ((c & 2) << 7) | ((c & 4) << 14) | ((c & 8) << 21)) * 0xFF;
142}
143
144/* vt8623fb_iplan_imageblit silently assumes that almost everything is 8-pixel aligned */
145static void vt8623fb_iplan_imageblit(struct fb_info *info, const struct fb_image *image)
146{
147 u32 fg = expand_color(image->fg_color);
148 u32 bg = expand_color(image->bg_color);
149 const u8 *src1, *src;
150 u8 __iomem *dst1;
151 u32 __iomem *dst;
152 u32 val;
153 int x, y;
154
155 src1 = image->data;
156 dst1 = info->screen_base + (image->dy * info->fix.line_length)
157 + ((image->dx / 8) * 4);
158
159 for (y = 0; y < image->height; y++) {
160 src = src1;
161 dst = (u32 __iomem *) dst1;
162 for (x = 0; x < image->width; x += 8) {
163 val = *(src++) * 0x01010101;
164 val = (val & fg) | (~val & bg);
165 fb_writel(val, dst++);
166 }
167 src1 += image->width / 8;
168 dst1 += info->fix.line_length;
169 }
170}
171
172/* vt8623fb_iplan_fillrect silently assumes that almost everything is 8-pixel aligned */
173static void vt8623fb_iplan_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
174{
175 u32 fg = expand_color(rect->color);
176 u8 __iomem *dst1;
177 u32 __iomem *dst;
178 int x, y;
179
180 dst1 = info->screen_base + (rect->dy * info->fix.line_length)
181 + ((rect->dx / 8) * 4);
182
183 for (y = 0; y < rect->height; y++) {
184 dst = (u32 __iomem *) dst1;
185 for (x = 0; x < rect->width; x += 8) {
186 fb_writel(fg, dst++);
187 }
188 dst1 += info->fix.line_length;
189 }
190}
191
192
193/* image data is MSB-first, fb structure is high-nibble-in-low-byte-first */
194static inline u32 expand_pixel(u32 c)
195{
196 return (((c & 1) << 24) | ((c & 2) << 27) | ((c & 4) << 14) | ((c & 8) << 17) |
197 ((c & 16) << 4) | ((c & 32) << 7) | ((c & 64) >> 6) | ((c & 128) >> 3)) * 0xF;
198}
199
200/* vt8623fb_cfb4_imageblit silently assumes that almost everything is 8-pixel aligned */
201static void vt8623fb_cfb4_imageblit(struct fb_info *info, const struct fb_image *image)
202{
203 u32 fg = image->fg_color * 0x11111111;
204 u32 bg = image->bg_color * 0x11111111;
205 const u8 *src1, *src;
206 u8 __iomem *dst1;
207 u32 __iomem *dst;
208 u32 val;
209 int x, y;
210
211 src1 = image->data;
212 dst1 = info->screen_base + (image->dy * info->fix.line_length)
213 + ((image->dx / 8) * 4);
214
215 for (y = 0; y < image->height; y++) {
216 src = src1;
217 dst = (u32 __iomem *) dst1;
218 for (x = 0; x < image->width; x += 8) {
219 val = expand_pixel(*(src++));
220 val = (val & fg) | (~val & bg);
221 fb_writel(val, dst++);
222 }
223 src1 += image->width / 8;
224 dst1 += info->fix.line_length;
225 }
226}
227
228static void vt8623fb_imageblit(struct fb_info *info, const struct fb_image *image)
229{
230 if ((info->var.bits_per_pixel == 4) && (image->depth == 1)
231 && ((image->width % 8) == 0) && ((image->dx % 8) == 0)) {
232 if (info->fix.type == FB_TYPE_INTERLEAVED_PLANES)
233 vt8623fb_iplan_imageblit(info, image);
234 else
235 vt8623fb_cfb4_imageblit(info, image);
236 } else
237 cfb_imageblit(info, image);
238}
239
240static void vt8623fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
241{
242 if ((info->var.bits_per_pixel == 4)
243 && ((rect->width % 8) == 0) && ((rect->dx % 8) == 0)
244 && (info->fix.type == FB_TYPE_INTERLEAVED_PLANES))
245 vt8623fb_iplan_fillrect(info, rect);
246 else
247 cfb_fillrect(info, rect);
248}
249
250
251/* ------------------------------------------------------------------------- */
252
253
254static void vt8623_set_pixclock(struct fb_info *info, u32 pixclock)
255{
256 u16 m, n, r;
257 u8 regval;
258 int rv;
259
260 rv = svga_compute_pll(&vt8623_pll, 1000000000 / pixclock, &m, &n, &r, info->node);
261 if (rv < 0) {
262 printk(KERN_ERR "fb%d: cannot set requested pixclock, keeping old value\n", info->node);
263 return;
264 }
265
266 /* Set VGA misc register */
267 regval = vga_r(NULL, VGA_MIS_R);
268 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
269
270 /* Set clock registers */
271 vga_wseq(NULL, 0x46, (n | (r << 6)));
272 vga_wseq(NULL, 0x47, m);
273
274 udelay(1000);
275
276 /* PLL reset */
277 svga_wseq_mask(0x40, 0x02, 0x02);
278 svga_wseq_mask(0x40, 0x00, 0x02);
279}
280
281
282static int vt8623fb_open(struct fb_info *info, int user)
283{
284 struct vt8623fb_info *par = info->par;
285
286 mutex_lock(&(par->open_lock));
287 if (par->ref_count == 0) {
288 memset(&(par->state), 0, sizeof(struct vgastate));
289 par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS | VGA_SAVE_CMAP;
290 par->state.num_crtc = 0xA2;
291 par->state.num_seq = 0x50;
292 save_vga(&(par->state));
293 }
294
295 par->ref_count++;
296 mutex_unlock(&(par->open_lock));
297
298 return 0;
299}
300
301static int vt8623fb_release(struct fb_info *info, int user)
302{
303 struct vt8623fb_info *par = info->par;
304
305 mutex_lock(&(par->open_lock));
306 if (par->ref_count == 0) {
307 mutex_unlock(&(par->open_lock));
308 return -EINVAL;
309 }
310
311 if (par->ref_count == 1)
312 restore_vga(&(par->state));
313
314 par->ref_count--;
315 mutex_unlock(&(par->open_lock));
316
317 return 0;
318}
319
320static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
321{
322 int rv, mem, step;
323
324 /* Find appropriate format */
325 rv = svga_match_format (vt8623fb_formats, var, NULL);
326 if (rv < 0)
327 {
328 printk(KERN_ERR "fb%d: unsupported mode requested\n", info->node);
329 return rv;
330 }
331
332 /* Do not allow to have real resoulution larger than virtual */
333 if (var->xres > var->xres_virtual)
334 var->xres_virtual = var->xres;
335
336 if (var->yres > var->yres_virtual)
337 var->yres_virtual = var->yres;
338
339 /* Round up xres_virtual to have proper alignment of lines */
340 step = vt8623fb_formats[rv].xresstep - 1;
341 var->xres_virtual = (var->xres_virtual+step) & ~step;
342
343 /* Check whether have enough memory */
344 mem = ((var->bits_per_pixel * var->xres_virtual) >> 3) * var->yres_virtual;
345 if (mem > info->screen_size)
346 {
347 printk(KERN_ERR "fb%d: not enough framebuffer memory (%d kB requested , %d kB available)\n", info->node, mem >> 10, (unsigned int) (info->screen_size >> 10));
348 return -EINVAL;
349 }
350
351 /* Text mode is limited to 256 kB of memory */
352 if ((var->bits_per_pixel == 0) && (mem > (256*1024)))
353 {
354 printk(KERN_ERR "fb%d: text framebuffer size too large (%d kB requested, 256 kB possible)\n", info->node, mem >> 10);
355 return -EINVAL;
356 }
357
358 rv = svga_check_timings (&vt8623_timing_regs, var, info->node);
359 if (rv < 0)
360 {
361 printk(KERN_ERR "fb%d: invalid timings requested\n", info->node);
362 return rv;
363 }
364
365 /* Interlaced mode not supported */
366 if (var->vmode & FB_VMODE_INTERLACED)
367 return -EINVAL;
368
369 return 0;
370}
371
372
373static int vt8623fb_set_par(struct fb_info *info)
374{
375 u32 mode, offset_value, fetch_value, screen_size;
376 u32 bpp = info->var.bits_per_pixel;
377
378 if (bpp != 0) {
379 info->fix.ypanstep = 1;
380 info->fix.line_length = (info->var.xres_virtual * bpp) / 8;
381
382 info->flags &= ~FBINFO_MISC_TILEBLITTING;
383 info->tileops = NULL;
384
385 /* in 4bpp supports 8p wide tiles only, any tiles otherwise */
386 info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
387 info->pixmap.blit_y = ~(u32)0;
388
389 offset_value = (info->var.xres_virtual * bpp) / 64;
390 fetch_value = ((info->var.xres * bpp) / 128) + 4;
391
392 if (bpp == 4)
393 fetch_value = (info->var.xres / 8) + 8; /* + 0 is OK */
394
395 screen_size = info->var.yres_virtual * info->fix.line_length;
396 } else {
397 info->fix.ypanstep = 16;
398 info->fix.line_length = 0;
399
400 info->flags |= FBINFO_MISC_TILEBLITTING;
401 info->tileops = &vt8623fb_tile_ops;
402
403 /* supports 8x16 tiles only */
404 info->pixmap.blit_x = 1 << (8 - 1);
405 info->pixmap.blit_y = 1 << (16 - 1);
406
407 offset_value = info->var.xres_virtual / 16;
408 fetch_value = (info->var.xres / 8) + 8;
409 screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
410 }
411
412 info->var.xoffset = 0;
413 info->var.yoffset = 0;
414 info->var.activate = FB_ACTIVATE_NOW;
415
416 /* Unlock registers */
417 svga_wseq_mask(0x10, 0x01, 0x01);
418 svga_wcrt_mask(0x11, 0x00, 0x80);
419 svga_wcrt_mask(0x47, 0x00, 0x01);
420
421 /* Device, screen and sync off */
422 svga_wseq_mask(0x01, 0x20, 0x20);
423 svga_wcrt_mask(0x36, 0x30, 0x30);
424 svga_wcrt_mask(0x17, 0x00, 0x80);
425
426 /* Set default values */
427 svga_set_default_gfx_regs();
428 svga_set_default_atc_regs();
429 svga_set_default_seq_regs();
430 svga_set_default_crt_regs();
431 svga_wcrt_multi(vt8623_line_compare_regs, 0xFFFFFFFF);
432 svga_wcrt_multi(vt8623_start_address_regs, 0);
433
434 svga_wcrt_multi(vt8623_offset_regs, offset_value);
435 svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
436
437 if (info->var.vmode & FB_VMODE_DOUBLE)
438 svga_wcrt_mask(0x09, 0x80, 0x80);
439 else
440 svga_wcrt_mask(0x09, 0x00, 0x80);
441
442 svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus
443 svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus
444 svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read treshold
445 vga_wseq(NULL, 0x17, 0x1F); // FIFO depth
446 vga_wseq(NULL, 0x18, 0x4E);
447 svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ?
448
449 vga_wcrt(NULL, 0x32, 0x00);
450 vga_wcrt(NULL, 0x34, 0x00);
451 vga_wcrt(NULL, 0x6A, 0x80);
452 vga_wcrt(NULL, 0x6A, 0xC0);
453
454 vga_wgfx(NULL, 0x20, 0x00);
455 vga_wgfx(NULL, 0x21, 0x00);
456 vga_wgfx(NULL, 0x22, 0x00);
457
458 /* Set SR15 according to number of bits per pixel */
459 mode = svga_match_format(vt8623fb_formats, &(info->var), &(info->fix));
460 switch (mode) {
461 case 0:
462 pr_debug("fb%d: text mode\n", info->node);
463 svga_set_textmode_vga_regs();
464 svga_wseq_mask(0x15, 0x00, 0xFE);
465 svga_wcrt_mask(0x11, 0x60, 0x70);
466 break;
467 case 1:
468 pr_debug("fb%d: 4 bit pseudocolor\n", info->node);
469 vga_wgfx(NULL, VGA_GFX_MODE, 0x40);
470 svga_wseq_mask(0x15, 0x20, 0xFE);
471 svga_wcrt_mask(0x11, 0x00, 0x70);
472 break;
473 case 2:
474 pr_debug("fb%d: 4 bit pseudocolor, planar\n", info->node);
475 svga_wseq_mask(0x15, 0x00, 0xFE);
476 svga_wcrt_mask(0x11, 0x00, 0x70);
477 break;
478 case 3:
479 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
480 svga_wseq_mask(0x15, 0x22, 0xFE);
481 break;
482 case 4:
483 pr_debug("fb%d: 5/6/5 truecolor\n", info->node);
484 svga_wseq_mask(0x15, 0xB6, 0xFE);
485 break;
486 case 5:
487 pr_debug("fb%d: 8/8/8 truecolor\n", info->node);
488 svga_wseq_mask(0x15, 0xAE, 0xFE);
489 break;
490 default:
491 printk(KERN_ERR "vt8623fb: unsupported mode - bug\n");
492 return (-EINVAL);
493 }
494
495 vt8623_set_pixclock(info, info->var.pixclock);
496 svga_set_timings(&vt8623_timing_regs, &(info->var), 1, 1,
497 (info->var.vmode & FB_VMODE_DOUBLE) ? 2 : 1, 1,
498 1, info->node);
499
500 memset_io(info->screen_base, 0x00, screen_size);
501
502 /* Device and screen back on */
503 svga_wcrt_mask(0x17, 0x80, 0x80);
504 svga_wcrt_mask(0x36, 0x00, 0x30);
505 svga_wseq_mask(0x01, 0x00, 0x20);
506
507 return 0;
508}
509
510
511static int vt8623fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
512 u_int transp, struct fb_info *fb)
513{
514 switch (fb->var.bits_per_pixel) {
515 case 0:
516 case 4:
517 if (regno >= 16)
518 return -EINVAL;
519
520 outb(0x0F, VGA_PEL_MSK);
521 outb(regno, VGA_PEL_IW);
522 outb(red >> 10, VGA_PEL_D);
523 outb(green >> 10, VGA_PEL_D);
524 outb(blue >> 10, VGA_PEL_D);
525 break;
526 case 8:
527 if (regno >= 256)
528 return -EINVAL;
529
530 outb(0xFF, VGA_PEL_MSK);
531 outb(regno, VGA_PEL_IW);
532 outb(red >> 10, VGA_PEL_D);
533 outb(green >> 10, VGA_PEL_D);
534 outb(blue >> 10, VGA_PEL_D);
535 break;
536 case 16:
537 if (regno >= 16)
538 return 0;
539
540 if (fb->var.green.length == 5)
541 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xF800) >> 1) |
542 ((green & 0xF800) >> 6) | ((blue & 0xF800) >> 11);
543 else if (fb->var.green.length == 6)
544 ((u32*)fb->pseudo_palette)[regno] = (red & 0xF800) |
545 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
546 else
547 return -EINVAL;
548 break;
549 case 24:
550 case 32:
551 if (regno >= 16)
552 return 0;
553
554 /* ((transp & 0xFF00) << 16) */
555 ((u32*)fb->pseudo_palette)[regno] = ((red & 0xFF00) << 8) |
556 (green & 0xFF00) | ((blue & 0xFF00) >> 8);
557 break;
558 default:
559 return -EINVAL;
560 }
561
562 return 0;
563}
564
565
566static int vt8623fb_blank(int blank_mode, struct fb_info *info)
567{
568 switch (blank_mode) {
569 case FB_BLANK_UNBLANK:
570 pr_debug("fb%d: unblank\n", info->node);
571 svga_wcrt_mask(0x36, 0x00, 0x30);
572 svga_wseq_mask(0x01, 0x00, 0x20);
573 break;
574 case FB_BLANK_NORMAL:
575 pr_debug("fb%d: blank\n", info->node);
576 svga_wcrt_mask(0x36, 0x00, 0x30);
577 svga_wseq_mask(0x01, 0x20, 0x20);
578 break;
579 case FB_BLANK_HSYNC_SUSPEND:
580 pr_debug("fb%d: DPMS standby (hsync off)\n", info->node);
581 svga_wcrt_mask(0x36, 0x10, 0x30);
582 svga_wseq_mask(0x01, 0x20, 0x20);
583 break;
584 case FB_BLANK_VSYNC_SUSPEND:
585 pr_debug("fb%d: DPMS suspend (vsync off)\n", info->node);
586 svga_wcrt_mask(0x36, 0x20, 0x30);
587 svga_wseq_mask(0x01, 0x20, 0x20);
588 break;
589 case FB_BLANK_POWERDOWN:
590 pr_debug("fb%d: DPMS off (no sync)\n", info->node);
591 svga_wcrt_mask(0x36, 0x30, 0x30);
592 svga_wseq_mask(0x01, 0x20, 0x20);
593 break;
594 }
595
596 return 0;
597}
598
599
600static int vt8623fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
601{
602 unsigned int offset;
603
604 /* Calculate the offset */
605 if (var->bits_per_pixel == 0) {
606 offset = (var->yoffset / 16) * var->xres_virtual + var->xoffset;
607 offset = offset >> 3;
608 } else {
609 offset = (var->yoffset * info->fix.line_length) +
610 (var->xoffset * var->bits_per_pixel / 8);
611 offset = offset >> ((var->bits_per_pixel == 4) ? 2 : 1);
612 }
613
614 /* Set the offset */
615 svga_wcrt_multi(vt8623_start_address_regs, offset);
616
617 return 0;
618}
619
620
621/* ------------------------------------------------------------------------- */
622
623
624/* Frame buffer operations */
625
626static struct fb_ops vt8623fb_ops = {
627 .owner = THIS_MODULE,
628 .fb_open = vt8623fb_open,
629 .fb_release = vt8623fb_release,
630 .fb_check_var = vt8623fb_check_var,
631 .fb_set_par = vt8623fb_set_par,
632 .fb_setcolreg = vt8623fb_setcolreg,
633 .fb_blank = vt8623fb_blank,
634 .fb_pan_display = vt8623fb_pan_display,
635 .fb_fillrect = vt8623fb_fillrect,
636 .fb_copyarea = cfb_copyarea,
637 .fb_imageblit = vt8623fb_imageblit,
638 .fb_get_caps = svga_get_caps,
639};
640
641
642/* PCI probe */
643
644static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
645{
646 struct fb_info *info;
647 struct vt8623fb_info *par;
648 unsigned int memsize1, memsize2;
649 int rc;
650
651 /* Ignore secondary VGA device because there is no VGA arbitration */
652 if (! svga_primary_device(dev)) {
653 dev_info(&(dev->dev), "ignoring secondary device\n");
654 return -ENODEV;
655 }
656
657 /* Allocate and fill driver data structure */
658 info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
659 if (! info) {
660 dev_err(&(dev->dev), "cannot allocate memory\n");
661 return -ENOMEM;
662 }
663
664 par = info->par;
665 mutex_init(&par->open_lock);
666
667 info->flags = FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
668 info->fbops = &vt8623fb_ops;
669
670 /* Prepare PCI device */
671
672 rc = pci_enable_device(dev);
673 if (rc < 0) {
674 dev_err(&(dev->dev), "cannot enable PCI device\n");
675 goto err_enable_device;
676 }
677
678 rc = pci_request_regions(dev, "vt8623fb");
679 if (rc < 0) {
680 dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
681 goto err_request_regions;
682 }
683
684 info->fix.smem_start = pci_resource_start(dev, 0);
685 info->fix.smem_len = pci_resource_len(dev, 0);
686 info->fix.mmio_start = pci_resource_start(dev, 1);
687 info->fix.mmio_len = pci_resource_len(dev, 1);
688
689 /* Map physical IO memory address into kernel space */
690 info->screen_base = pci_iomap(dev, 0, 0);
691 if (! info->screen_base) {
692 rc = -ENOMEM;
693 dev_err(&(dev->dev), "iomap for framebuffer failed\n");
694 goto err_iomap_1;
695 }
696
697 par->mmio_base = pci_iomap(dev, 1, 0);
698 if (! par->mmio_base) {
699 rc = -ENOMEM;
700 dev_err(&(dev->dev), "iomap for MMIO failed\n");
701 goto err_iomap_2;
702 }
703
704 /* Find how many physical memory there is on card */
705 memsize1 = (vga_rseq(NULL, 0x34) + 1) >> 1;
706 memsize2 = vga_rseq(NULL, 0x39) << 2;
707
708 if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
709 info->screen_size = memsize1 << 20;
710 else {
711 dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
712 info->screen_size = 16 << 20;
713 }
714
715 info->fix.smem_len = info->screen_size;
716 strcpy(info->fix.id, "VIA VT8623");
717 info->fix.type = FB_TYPE_PACKED_PIXELS;
718 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
719 info->fix.ypanstep = 0;
720 info->fix.accel = FB_ACCEL_NONE;
721 info->pseudo_palette = (void*)par->pseudo_palette;
722
723 /* Prepare startup mode */
724
725 rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
726 if (! ((rc == 1) || (rc == 2))) {
727 rc = -EINVAL;
728 dev_err(&(dev->dev), "mode %s not found\n", mode);
729 goto err_find_mode;
730 }
731
732 rc = fb_alloc_cmap(&info->cmap, 256, 0);
733 if (rc < 0) {
734 dev_err(&(dev->dev), "cannot allocate colormap\n");
735 goto err_alloc_cmap;
736 }
737
738 rc = register_framebuffer(info);
739 if (rc < 0) {
740 dev_err(&(dev->dev), "cannot register framebugger\n");
741 goto err_reg_fb;
742 }
743
744 printk(KERN_INFO "fb%d: %s on %s, %d MB RAM\n", info->node, info->fix.id,
745 pci_name(dev), info->fix.smem_len >> 20);
746
747 /* Record a reference to the driver data */
748 pci_set_drvdata(dev, info);
749
750#ifdef CONFIG_MTRR
751 if (mtrr) {
752 par->mtrr_reg = -1;
753 par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
754 }
755#endif
756
757 return 0;
758
759 /* Error handling */
760err_reg_fb:
761 fb_dealloc_cmap(&info->cmap);
762err_alloc_cmap:
763err_find_mode:
764 pci_iounmap(dev, par->mmio_base);
765err_iomap_2:
766 pci_iounmap(dev, info->screen_base);
767err_iomap_1:
768 pci_release_regions(dev);
769err_request_regions:
770/* pci_disable_device(dev); */
771err_enable_device:
772 framebuffer_release(info);
773 return rc;
774}
775
776/* PCI remove */
777
778static void __devexit vt8623_pci_remove(struct pci_dev *dev)
779{
780 struct fb_info *info = pci_get_drvdata(dev);
781 struct vt8623fb_info *par = info->par;
782
783 if (info) {
784#ifdef CONFIG_MTRR
785 if (par->mtrr_reg >= 0) {
786 mtrr_del(par->mtrr_reg, 0, 0);
787 par->mtrr_reg = -1;
788 }
789#endif
790
791 unregister_framebuffer(info);
792 fb_dealloc_cmap(&info->cmap);
793
794 pci_iounmap(dev, info->screen_base);
795 pci_iounmap(dev, par->mmio_base);
796 pci_release_regions(dev);
797/* pci_disable_device(dev); */
798
799 pci_set_drvdata(dev, NULL);
800 framebuffer_release(info);
801 }
802}
803
804
805#ifdef CONFIG_PM
806/* PCI suspend */
807
808static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
809{
810 struct fb_info *info = pci_get_drvdata(dev);
811 struct vt8623fb_info *par = info->par;
812
813 dev_info(&(dev->dev), "suspend\n");
814
815 acquire_console_sem();
816 mutex_lock(&(par->open_lock));
817
818 if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
819 mutex_unlock(&(par->open_lock));
820 release_console_sem();
821 return 0;
822 }
823
824 fb_set_suspend(info, 1);
825
826 pci_save_state(dev);
827 pci_disable_device(dev);
828 pci_set_power_state(dev, pci_choose_state(dev, state));
829
830 mutex_unlock(&(par->open_lock));
831 release_console_sem();
832
833 return 0;
834}
835
836
837/* PCI resume */
838
839static int vt8623_pci_resume(struct pci_dev* dev)
840{
841 struct fb_info *info = pci_get_drvdata(dev);
842 struct vt8623fb_info *par = info->par;
843
844 dev_info(&(dev->dev), "resume\n");
845
846 acquire_console_sem();
847 mutex_lock(&(par->open_lock));
848
849 if (par->ref_count == 0) {
850 mutex_unlock(&(par->open_lock));
851 release_console_sem();
852 return 0;
853 }
854
855 pci_set_power_state(dev, PCI_D0);
856 pci_restore_state(dev);
857
858 if (pci_enable_device(dev))
859 goto fail;
860
861 pci_set_master(dev);
862
863 vt8623fb_set_par(info);
864 fb_set_suspend(info, 0);
865
866 mutex_unlock(&(par->open_lock));
867fail:
868 release_console_sem();
869
870 return 0;
871}
872#else
873#define vt8623_pci_suspend NULL
874#define vt8623_pci_resume NULL
875#endif /* CONFIG_PM */
876
877/* List of boards that we are trying to support */
878
879static struct pci_device_id vt8623_devices[] __devinitdata = {
880 {PCI_DEVICE(PCI_VENDOR_ID_VIA, 0x3122)},
881 {0, 0, 0, 0, 0, 0, 0}
882};
883
884MODULE_DEVICE_TABLE(pci, vt8623_devices);
885
886static struct pci_driver vt8623fb_pci_driver = {
887 .name = "vt8623fb",
888 .id_table = vt8623_devices,
889 .probe = vt8623_pci_probe,
890 .remove = __devexit_p(vt8623_pci_remove),
891 .suspend = vt8623_pci_suspend,
892 .resume = vt8623_pci_resume,
893};
894
895/* Cleanup */
896
897static void __exit vt8623fb_cleanup(void)
898{
899 pr_debug("vt8623fb: cleaning up\n");
900 pci_unregister_driver(&vt8623fb_pci_driver);
901}
902
903/* Driver Initialisation */
904
905int __init vt8623fb_init(void)
906{
907
908#ifndef MODULE
909 char *option = NULL;
910
911 if (fb_get_options("vt8623fb", &option))
912 return -ENODEV;
913
914 if (option && *option)
915 mode = option;
916#endif
917
918 pr_debug("vt8623fb: initializing\n");
919 return pci_register_driver(&vt8623fb_pci_driver);
920}
921
922/* ------------------------------------------------------------------------- */
923
924/* Modularization */
925
926module_init(vt8623fb_init);
927module_exit(vt8623fb_cleanup);
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index c287a9ae4fdd..ca75b3ad3a2e 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -1,4 +1,5 @@
1menu "Dallas's 1-wire bus" 1menu "Dallas's 1-wire bus"
2 depends on HAS_IOMEM
2 3
3config W1 4config W1
4 tristate "Dallas's 1-wire support" 5 tristate "Dallas's 1-wire support"